ID
stringlengths
36
36
Language
stringclasses
1 value
Repository Name
stringclasses
13 values
File Name
stringlengths
2
48
File Path in Repository
stringlengths
11
111
File Path for Unit Test
stringlengths
13
116
Code
stringlengths
0
278k
Unit Test - (Ground Truth)
stringlengths
78
663k
Code Url
stringlengths
91
198
Test Code Url
stringlengths
93
203
Commit Hash
stringclasses
13 values
c6ee76f1-6ebb-49a4-b2a3-6cc5517b6271
cpp
tensorflow/tensorflow
gpu_hlo_schedule
third_party/xla/xla/service/gpu/gpu_hlo_schedule.cc
third_party/xla/xla/service/gpu/gpu_hlo_schedule_test.cc
#include "xla/service/gpu/gpu_hlo_schedule.h" #include <cstddef> #include <cstdint> #include <deque> #include <memory> #include <optional> #include <string> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/match.h" #include "absl/strings/numbers.h" #include "absl/strings/str_split.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_input_output_alias_config.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/ir/hlo_schedule.h" #include "xla/hlo/pass/hlo_pass_pipeline.h" #include "xla/hlo/utils/hlo_query.h" #include "xla/service/buffer_value.h" #include "xla/service/collective_ops_utils.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/gpu_latency_hiding_scheduler.h" #include "xla/service/gpu/model/analytical_latency_estimator.h" #include "xla/service/gpu/transforms/pgle_accuracy_checker.h" #include "xla/service/gpu/transforms/schedule_postprocessing.h" #include "xla/service/gpu/transforms/scheduling_instruction_annotator.h" #include "xla/service/hlo_memory_scheduler.h" #include "xla/service/latency_hiding_scheduler.h" #include "xla/service/p2p_schedule_preparation.h" #include "xla/service/profile_guided_latency_estimator.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/stream_executor/device_description.h" #include "xla/util.h" #include "tsl/platform/env.h" #include "tsl/platform/errors.h" #include "tsl/platform/path.h" #include "tsl/platform/protobuf.h" #include "tsl/platform/statusor.h" #include "tsl/profiler/lib/traceme.h" namespace xla { namespace gpu { namespace { bool ShouldScheduleAsEarlyAsPossible(const HloInstruction& instr) { switch (instr.opcode()) { case HloOpcode::kAllReduceStart: case HloOpcode::kCollectivePermuteStart: return !IsSyncCollective(&instr); case HloOpcode::kAsyncStart: return true; case HloOpcode::kCustomCall: return static_cast<const HloCustomCallInstruction&>(instr) .custom_call_schedule() == CustomCallSchedule::SCHEDULE_EARLIEST; default: return false; } } bool ShouldScheduleSuccessor(const HloInstruction& sussessor, const HloPredicate& is_scheduled) { return ShouldScheduleAsEarlyAsPossible(sussessor) && absl::c_all_of(sussessor.operands(), is_scheduled) && absl::c_all_of(sussessor.control_predecessors(), is_scheduled); } bool ShouldScheduleAsLateAsPossible(const HloInstruction& instr) { switch (instr.opcode()) { case HloOpcode::kAllReduceDone: case HloOpcode::kCollectivePermuteDone: return ShouldScheduleAsEarlyAsPossible(*instr.operand(0)); case HloOpcode::kAsyncDone: return true; case HloOpcode::kCustomCall: return static_cast<const HloCustomCallInstruction&>(instr) .custom_call_schedule() == CustomCallSchedule::SCHEDULE_LATEST; default: return false; } } bool ShouldSchedulePredecessor(const HloInstruction& predecessor, const HloPredicate& is_scheduled) { return ShouldScheduleAsLateAsPossible(predecessor) && absl::c_all_of(predecessor.users(), is_scheduled) && absl::c_all_of(predecessor.control_successors(), is_scheduled); } HloInstructionSequence PostprocessorToScheduleAsEarlyOrLateAsPossible( const HloInstructionSequence& input) { std::vector<HloInstruction*> earliest_scheduled; { absl::flat_hash_set<HloInstruction*> scheduled; auto is_scheduled = [&](const HloInstruction* instr) -> bool { return scheduled.contains(instr); }; auto add_to_schedule = [&](HloInstruction* instr) { earliest_scheduled.push_back(instr); scheduled.insert(instr); }; for (HloInstruction* instr : input.instructions()) { if (is_scheduled(instr)) continue; add_to_schedule(instr); for (HloInstruction* user : instr->users()) { if (is_scheduled(user)) continue; if (ShouldScheduleSuccessor(*user, is_scheduled)) { add_to_schedule(user); } } for (HloInstruction* successor : instr->control_successors()) { if (is_scheduled(successor)) continue; if (ShouldScheduleSuccessor(*successor, is_scheduled)) { add_to_schedule(successor); } } } } std::deque<HloInstruction*> latest_scheduled; { absl::flat_hash_set<HloInstruction*> scheduled; auto is_scheduled = [&](const HloInstruction* instr) -> bool { return scheduled.contains(instr); }; auto add_to_schedule = [&](HloInstruction* instr) { latest_scheduled.push_front(instr); scheduled.insert(instr); }; for (auto it = earliest_scheduled.rbegin(); it != earliest_scheduled.rend(); it++) { if (is_scheduled(*it)) continue; add_to_schedule(*it); for (HloInstruction* operand : (*it)->operands()) { if (is_scheduled(operand)) continue; if (ShouldSchedulePredecessor(*operand, is_scheduled)) { add_to_schedule(operand); } } for (HloInstruction* predecessor : (*it)->control_predecessors()) { if (is_scheduled(predecessor)) continue; if (ShouldSchedulePredecessor(*predecessor, is_scheduled)) { add_to_schedule(predecessor); } } } } HloInstructionSequence result; absl::c_for_each(latest_scheduled, [&](HloInstruction* i) { result.push_back(i); }); CHECK(input.instructions().size() == result.size()) << "schedule as early or late post-processing changed schedule size from " << input.instructions().size() << " to " << result.size(); return result; } HloInstructionSequence PostprocessorToScheduleSyncCollectives( const HloInstructionSequence& input) { HloInstructionSequence result; auto is_sync_start = [](const HloInstruction* instr) { return hlo_query::IsAsyncCollectiveStartOp(instr, true) && IsSyncCollective(instr); }; for (HloInstruction* instr : input.instructions()) { if (is_sync_start(instr)) continue; if (hlo_query::IsAsyncCollectiveDoneOp(instr, true)) { HloInstruction* start = instr->mutable_operand(0); if (is_sync_start(start)) result.push_back(start); } result.push_back(instr); } CHECK(input.instructions().size() == result.size()) << "sync collectives post-processing changed schedule size from " << input.instructions().size() << " to " << result.size(); return result; } SchedulerConfig GetSchedulerConfig(int64_t memory_limit) { SchedulerConfig config; config.all_reduce_overlap_limit = 1; config.collective_broadcast_overlap_limit = 1; config.collective_permute_overlap_limit = 1; config.use_real_cost_model = false; config.aggressive_scheduling_policies = true; config.schedule_send_recvs = true; config.memory_limit = memory_limit; return config; } tensorflow::profiler::ProfiledInstructionsProto GetProfileForFingerprint( tensorflow::profiler::ProfiledInstructionsProto& profile, const std::string& fingerprint) { tensorflow::profiler::ProfiledInstructionsProto result; bool merge_remat_clones = false; for (const auto& cost : profile.costs()) { absl::string_view cost_name = cost.name(); std::string new_cost_name = cost.name(); absl::string_view cost_sep = "::"; if (absl::StrContains(cost_name, cost_sep)) { std::vector<std::string> split_names = absl::StrSplit(cost_name, cost_sep); if (split_names.size() != 2 || split_names[0] != fingerprint) { continue; } new_cost_name = split_names[1]; } merge_remat_clones |= absl::StrContains(new_cost_name, ".remat"); auto* new_cost = result.add_costs(); new_cost->set_cost_us(cost.cost_us()); new_cost->set_name(new_cost_name); } if (!merge_remat_clones) { return result; } auto strip_remat_suffix = [](absl::string_view name) -> absl::string_view { absl::string_view suffix = ".remat"; size_t index = name.rfind(suffix); if (index == std::string::npos) { return name; } auto after_suffix = name.substr(index + suffix.size()); int64_t numeric_suffix; if (after_suffix.empty() || absl::SimpleAtoi(after_suffix, &numeric_suffix)) { return name.substr(0, index); } return name; }; absl::flat_hash_map<absl::string_view, std::pair<double, int64_t>> costs; for (const auto& cost : result.costs()) { std::pair<double, int64_t>& data = costs[strip_remat_suffix(cost.name())]; data.first += cost.cost_us(); data.second++; } tensorflow::profiler::ProfiledInstructionsProto merged_result; for (const auto& cost : costs) { auto* new_cost = merged_result.add_costs(); double average = cost.second.first / cost.second.second; new_cost->set_cost_us(average); new_cost->set_name(std::string(cost.first)); } return merged_result; } std::optional<tensorflow::profiler::ProfiledInstructionsProto> ReadPGLEProfile( const HloModule* module, const std::string& fingerprint) { tensorflow::profiler::ProfiledInstructionsProto profile; absl::string_view fdo_profile = module->config().fdo_profile(); if (!fdo_profile.empty()) { if (tsl::ParseProtoUnlimited(&profile, fdo_profile.data(), fdo_profile.size())) { LOG(INFO) << "Using PGLE profile for module from fdo_profile (binary)"; return GetProfileForFingerprint(profile, fingerprint); } profile.Clear(); if (tsl::protobuf::TextFormat::ParseFromString(std::string(fdo_profile), &profile)) { LOG(INFO) << "Using PGLE profile for module from fdo_profile (text)"; return GetProfileForFingerprint(profile, fingerprint); } LOG(ERROR) << "Unable to prase FDO profile: not a valid text or binary " "ProfiledInstructionsProto"; } const std::string& pgle_profile_file_or_dir_path = module->config() .debug_options() .xla_gpu_pgle_profile_file_or_directory_path(); if (pgle_profile_file_or_dir_path.empty()) { return std::nullopt; } tsl::Env* env = tsl::Env::Default(); auto read_text_or_binary_profile = [&profile, env, &fingerprint]( const std::string& text_path, const std::string& binary_path) -> std::optional<tensorflow::profiler::ProfiledInstructionsProto> { if (env->FileExists(text_path).ok()) { absl::Status s = tsl::ReadTextProto(env, text_path, &profile); if (s.ok()) { LOG(INFO) << "Using PGLE profile from " << text_path; return GetProfileForFingerprint(profile, fingerprint); } else { LOG(ERROR) << "Unable to read PGLE text proto from " << text_path << ": " << s.message(); } profile.Clear(); } if (env->FileExists(binary_path).ok()) { absl::Status s = tsl::ReadBinaryProto(env, binary_path, &profile); if (s.ok()) { LOG(INFO) << "Using PGLE profile from " << binary_path; return GetProfileForFingerprint(profile, fingerprint); } else { LOG(ERROR) << "Unable to read PGLE binary proto from " << binary_path << ": " << s.message(); } profile.Clear(); } return std::nullopt; }; if (env->IsDirectory(pgle_profile_file_or_dir_path).ok()) { std::string pgle_profile_path_prefix = pgle_profile_file_or_dir_path + "/" + fingerprint; return read_text_or_binary_profile(pgle_profile_path_prefix + ".pbtxt", pgle_profile_path_prefix + ".pb"); } auto extension = tsl::io::Extension(pgle_profile_file_or_dir_path); if (extension == "pbtxt") { return read_text_or_binary_profile(pgle_profile_file_or_dir_path, ""); } else if (extension == "pb") { return read_text_or_binary_profile("", pgle_profile_file_or_dir_path); } else { return read_text_or_binary_profile(pgle_profile_file_or_dir_path, pgle_profile_file_or_dir_path); } } } static int64_t GetSchedulerMemoryLimit( const HloModule* module, const se::DeviceDescription& gpu_device_info, int pointer_size); absl::StatusOr<ScheduleMetadata> ScheduleGpuModule( HloModule* module, int64_t pointer_size, const se::DeviceDescription& gpu_device_info) { tsl::profiler::TraceMe traceme("GpuCompiler::CompileToBackendResult"); int64_t memory_limit = GetSchedulerMemoryLimit(module, gpu_device_info, pointer_size); if (module->has_schedule()) { return ScheduleMetadata{memory_limit}; } HloPassPipeline prepare_pipeline("p2p-schedule-preparation"); prepare_pipeline.AddPass<P2PSchedulePreparation>(); TF_RETURN_IF_ERROR(prepare_pipeline.Run(module).status()); TF_ASSIGN_OR_RETURN( HloSchedule schedule, ScheduleGpuModuleWithMemoryScheduler(module, pointer_size)); TF_RETURN_IF_ERROR(module->set_schedule(std::move(schedule))); std::string fingerprint = module->GetFingerprint128( HloPrintOptions::Canonical().set_print_backend_config(true)); FrontendAttributes attributes; (*attributes.mutable_map())[std::string(kFingerprintBeforeLHS)] = fingerprint; module->add_frontend_attributes(attributes); VLOG(1) << "Fingerprint before LHS for module " << module->name() << "(" << module->unique_id() << ") = " << fingerprint; const bool enable_latency_hiding_scheduler = module->config() .debug_options() .xla_gpu_enable_latency_hiding_scheduler(); if (!enable_latency_hiding_scheduler) { return ScheduleMetadata{memory_limit}; } SchedulerConfig config = GetSchedulerConfig(memory_limit); auto gpu_latency_estimator = std::make_unique<GpuLatencyEstimator>(pointer_size); std::unique_ptr<LatencyEstimator> latency_estimator; std::optional<tensorflow::profiler::ProfiledInstructionsProto> profile = ReadPGLEProfile(module, fingerprint); const bool enable_analytical_latency_estimator = module->config() .debug_options() .xla_gpu_enable_analytical_latency_estimator(); HloPassPipeline pipeline("latency-hiding-scheduler"); if (profile.has_value()) { auto aggregator = std::make_unique<GPUProfileStatisticsAggregator>(); auto pg_latency_estimator = std::make_unique<ProfileGuidedLatencyEstimator>( config, std::move(gpu_latency_estimator), profile.value(), std::move(aggregator)); LOG(INFO) << "Found profile, using profile guided latency estimator"; VLOG(1) << "Profile:\n" << profile->DebugString(); if (module->config() .debug_options() .xla_gpu_enable_pgle_accuracy_checker()) { pipeline.AddPass<PGLEAccuracyChecker>(*pg_latency_estimator); } latency_estimator = std::move(pg_latency_estimator); } else if (enable_analytical_latency_estimator) { latency_estimator = std::make_unique<AnalyticalLatencyEstimator>( config, std::move(gpu_latency_estimator), gpu_device_info, [input_pointer_size = pointer_size](const Shape& shape) { return GetSizeOfShape(shape, input_pointer_size); }, module->entry_computation()); LOG(INFO) << "Using analytical latency estimator"; } else { latency_estimator = std::move(gpu_latency_estimator); } auto async_tracker = [&]() -> std::unique_ptr<AsyncTracker> { return module->config() .debug_options() .xla_gpu_lhs_enable_gpu_async_tracker() ? std::make_unique<GpuAsyncTracker>(config) : std::make_unique<GpuAsyncTrackerBase>(config); }(); auto shape_size_in_bytes = [pointer_size](const Shape& shape) { return GetSizeOfShape(shape, pointer_size); }; auto scheduler_core = std::make_unique<DefaultSchedulerCore>( shape_size_in_bytes, async_tracker.get(), latency_estimator.get(), config); pipeline.AddPass<SchedulingInstructionAnnotator>(); pipeline.AddPass<LatencyHidingScheduler>( std::move(latency_estimator), std::move(async_tracker), std::move(scheduler_core), shape_size_in_bytes); TF_RETURN_IF_ERROR(pipeline.Run(module).status()); HloPassPipeline postprocessing_pipeline("schedule-postprocessing"); postprocessing_pipeline.AddPass<SchedulePostprocessing>(); TF_RETURN_IF_ERROR(postprocessing_pipeline.Run(module).status()); return ScheduleMetadata{memory_limit}; } absl::StatusOr<HloSchedule> ScheduleGpuModuleWithMemoryScheduler( const HloModule* module, int64_t pointer_size, int64_t* peak_memory_bytes) { return ScheduleModule( module, [pointer_size](const BufferValue& buffer) { return ShapeUtil::ByteSizeOf(buffer.shape(), pointer_size); }, ComputationSchedulerToModuleScheduler(DefaultMemoryScheduler, PostProcessSchedule), {}, peak_memory_bytes); } HloInstructionSequence PostProcessSchedule( const HloInstructionSequence& input) { HloInstructionSequence result = PostprocessorToScheduleSyncCollectives(input); return PostprocessorToScheduleAsEarlyOrLateAsPossible(result); } static int64_t GetSchedulerMemoryLimit( const HloModule* module, const se::DeviceDescription& gpu_device_info, int pointer_size) { const int64_t base_limit = module->config().device_memory_size() != 0 ? module->config().device_memory_size() : gpu_device_info.device_memory_size() * 80 / 100; int64_t total_io_size = 0; for (HloInstruction* param : module->entry_computation()->parameter_instructions()) { ShapeUtil::ForEachSubshape( param->shape(), [&](const Shape& subshape, const ShapeIndex& ) { total_io_size += GetSizeOfShape(subshape, pointer_size); }); } ShapeUtil::ForEachSubshape( module->result_shape(), [&](const Shape& subshape, const ShapeIndex& ) { total_io_size += GetSizeOfShape(subshape, pointer_size); }); module->input_output_alias_config().ForEachAlias( [&](const ShapeIndex& output_index, const HloInputOutputAliasConfig::Alias&) { const Shape& subshape = ShapeUtil::GetSubshape(module->result_shape(), output_index); total_io_size -= GetSizeOfShape(subshape, pointer_size); }); int64_t limit = (base_limit - total_io_size) * module->config().debug_options().xla_gpu_memory_limit_slop_factor() / 100; return limit; } } }
#include "xla/service/gpu/gpu_hlo_schedule.h" #include <algorithm> #include <cstdint> #include <cstdlib> #include <memory> #include <optional> #include <string> #include <string_view> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/algorithm/container.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/collective_device_list.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/ir/hlo_schedule.h" #include "xla/hlo/utils/hlo_query.h" #include "xla/service/backend.h" #include "xla/service/gpu/gpu_compiler.h" #include "xla/service/hlo_module_config.h" #include "xla/service/hlo_ordering.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/stream_executor/device_description.h" #include "xla/tests/filecheck.h" #include "xla/tests/hlo_test_base.h" #include "xla/tests/test_utils.h" #include "xla/tests/verified_hlo_module.h" #include "xla/tsl/lib/core/status_test_util.h" #include "tsl/platform/status.h" #include "tsl/platform/status_matchers.h" #include "tsl/platform/statusor.h" #include "tsl/profiler/protobuf/profiled_instructions.pb.h" namespace xla { namespace gpu { using ::testing::ElementsAre; using ::tsl::testing::StatusIs; class GpuHloScheduleTest : public HloTestBase { protected: using HloVec = std::vector<HloInstruction*>; Shape f32_2x2_ = ShapeUtil::MakeShape(F32, {2, 2}); SequentialHloOrdering BuildHloOrdering(HloModule* module) { Backend& test_backend = backend(); const se::DeviceDescription& gpu_device_info = test_backend.default_stream_executor()->GetDeviceDescription(); TF_CHECK_OK(ScheduleGpuModule(module, 8, gpu_device_info) .status()); return SequentialHloOrdering{module->schedule()}; } HloModuleConfig GetModuleConfig(bool enable_latency_hiding_scheduler, bool enable_gpu_async_tracker = false, absl::string_view fdo_profile = "") { HloModuleConfig config; DebugOptions debug_options = GetDebugOptionsForTest(); debug_options.set_xla_gpu_enable_latency_hiding_scheduler( enable_latency_hiding_scheduler); debug_options.set_xla_gpu_lhs_enable_gpu_async_tracker( enable_gpu_async_tracker); config.set_debug_options(debug_options); *config.mutable_fdo_profile() = fdo_profile; return config; } std::unique_ptr<HloModule> CreateNewVerifiedModule( bool enable_latency_hiding_scheduler = false) { return std::make_unique<HloModule>( "test_module", GetModuleConfig(enable_latency_hiding_scheduler)); } static bool HasValidFingerprint(HloModule* module) { const FrontendAttributes& attrs = module->frontend_attributes(); auto it = attrs.map().find(kFingerprintBeforeLHS); return it != attrs.map().end() && it->second.size() == 128 / 4; } }; TEST_F(GpuHloScheduleTest, SequentialMatMul) { HloComputation::Builder builder("entry_computation"); HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter( 0, f32_2x2_, "x")); HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter( 1, f32_2x2_, "y")); HloInstruction* z = builder.AddInstruction(HloInstruction::CreateParameter( 2, f32_2x2_, "z")); HloInstruction* dot1 = builder.AddInstruction(CreateCanonicalDot(f32_2x2_, x, y)); HloInstruction* dot2 = builder.AddInstruction(CreateCanonicalDot(f32_2x2_, dot1, z)); auto module = CreateNewVerifiedModule(); module->AddEntryComputation(builder.Build(dot2)); SequentialHloOrdering order = BuildHloOrdering(module.get()); EXPECT_TRUE(order.ExecutesBefore(y, x)); EXPECT_TRUE(order.ExecutesBefore(y, dot1)); EXPECT_TRUE(order.ExecutesBefore(z, dot1)); EXPECT_TRUE(order.ExecutesBefore(z, dot2)); EXPECT_TRUE(order.ExecutesBefore(dot1, dot2)); EXPECT_TRUE(HasValidFingerprint(module.get())); } TEST_F(GpuHloScheduleTest, SequentialAdd) { HloComputation::Builder builder("entry_computation"); HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter( 0, f32_2x2_, "x")); HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter( 1, f32_2x2_, "y")); HloInstruction* z = builder.AddInstruction(HloInstruction::CreateParameter( 2, f32_2x2_, "z")); HloInstruction* add1 = builder.AddInstruction( HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, x, y)); HloInstruction* add2 = builder.AddInstruction( HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, y, z)); HloInstruction* add3 = builder.AddInstruction( HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, add1, add2)); auto module = CreateNewVerifiedModule(); module->AddEntryComputation(builder.Build(add3)); SequentialHloOrdering order = BuildHloOrdering(module.get()); EXPECT_TRUE(order.ExecutesBefore(y, x)); EXPECT_TRUE(order.ExecutesBefore(y, add1)); EXPECT_TRUE(order.ExecutesBefore(z, add1)); EXPECT_TRUE(order.ExecutesBefore(z, add2)); EXPECT_TRUE(order.ExecutesBefore(add1, add2)); EXPECT_TRUE(order.ExecutesBefore(add2, add3)); EXPECT_TRUE(HasValidFingerprint(module.get())); } TEST_F(GpuHloScheduleTest, AsyncCustomCall) { HloComputation::Builder builder("entry_computation"); HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter( 0, f32_2x2_, "x")); HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter( 1, f32_2x2_, "y")); HloInstruction* z = builder.AddInstruction(HloInstruction::CreateParameter( 2, f32_2x2_, "z")); HloInstruction* add0 = builder.AddInstruction( HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, x, y)); HloInstruction* add1 = builder.AddInstruction( HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, add0, y)); HloInstruction* add2 = builder.AddInstruction( HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, add1, z)); HloInstruction* nonblocking_call = builder.AddInstruction(HloInstruction::CreateCustomCall( f32_2x2_, {add0}, "nonblocking-call-start", "")); static_cast<HloCustomCallInstruction*>(nonblocking_call) ->set_custom_call_schedule(SCHEDULE_EARLIEST); TF_CHECK_OK(add1->AddControlDependencyTo(nonblocking_call)); HloInstruction* blocking_call = builder.AddInstruction(HloInstruction::CreateCustomCall( f32_2x2_, {nonblocking_call}, "blocking-call-done", "")); static_cast<HloCustomCallInstruction*>(blocking_call) ->set_custom_call_schedule(SCHEDULE_LATEST); HloInstruction* add3 = builder.AddInstruction( HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, add1, add2)); HloInstruction* add4 = builder.AddInstruction(HloInstruction::CreateBinary( f32_2x2_, HloOpcode::kAdd, add3, blocking_call)); auto module = CreateNewVerifiedModule(); module->AddEntryComputation(builder.Build(add4)); SequentialHloOrdering order = BuildHloOrdering(module.get()); VLOG(2) << order.ToString(); EXPECT_TRUE(order.ExecutesBefore(add0, nonblocking_call)); EXPECT_TRUE(order.ExecutesBefore(add1, nonblocking_call)); EXPECT_TRUE(order.ExecutesBefore(nonblocking_call, add2)); EXPECT_TRUE(order.ExecutesBefore(nonblocking_call, add3)); EXPECT_TRUE(order.ExecutesBefore(nonblocking_call, add4)); EXPECT_TRUE(order.ExecutesBefore(add3, blocking_call)); EXPECT_TRUE(order.ExecutesBefore(blocking_call, add4)); EXPECT_TRUE(HasValidFingerprint(module.get())); } TEST_F(GpuHloScheduleTest, AsyncCollectivePermute) { std::unique_ptr<HloModule> module = CreateNewVerifiedModule(); HloComputation::Builder builder("entry_computation"); HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter( 0, f32_2x2_, "x")); HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter( 1, f32_2x2_, "y")); HloInstruction* z = builder.AddInstruction(HloInstruction::CreateParameter( 2, f32_2x2_, "z")); HloInstruction* add0 = builder.AddInstruction( HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, x, y)); HloInstruction* add1 = builder.AddInstruction( HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, add0, y)); HloInstruction* add2 = builder.AddInstruction( HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, add1, z)); Shape u32_scalar = ShapeUtil::MakeShape(U32, {}); Shape collective_permute_start_shape = ShapeUtil::MakeTupleShape({f32_2x2_, f32_2x2_}); HloInstruction* collective_permute_start = builder.AddInstruction(HloInstruction::CreateCollectivePermuteStart( collective_permute_start_shape, add0, {{0, 1}}, std::nullopt)); TF_CHECK_OK(add1->AddControlDependencyTo(collective_permute_start)); HloInstruction* collective_permute_done = builder.AddInstruction( HloInstruction::CreateUnary(f32_2x2_, HloOpcode::kCollectivePermuteDone, collective_permute_start)); HloInstruction* add3 = builder.AddInstruction( HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, add1, add2)); HloInstruction* add4 = builder.AddInstruction(HloInstruction::CreateBinary( f32_2x2_, HloOpcode::kAdd, add3, collective_permute_done)); module->AddEntryComputation(builder.Build(add4)); SequentialHloOrdering order = BuildHloOrdering(module.get()); VLOG(2) << order.ToString(); EXPECT_TRUE(order.ExecutesBefore(add0, collective_permute_start)); EXPECT_TRUE(order.ExecutesBefore(add1, collective_permute_start)); EXPECT_TRUE(order.ExecutesBefore(collective_permute_start, add2)); EXPECT_TRUE(order.ExecutesBefore(collective_permute_start, add3)); EXPECT_TRUE(order.ExecutesBefore(collective_permute_start, add4)); EXPECT_TRUE(order.ExecutesBefore(add3, collective_permute_done)); EXPECT_TRUE(order.ExecutesBefore(collective_permute_done, add4)); EXPECT_TRUE(HasValidFingerprint(module.get())); } TEST_F(GpuHloScheduleTest, LHSCostModel) { const char* hlo_text = R"( HloModule AsyncAR apply_op { x = f32[] parameter(0) y = f32[] parameter(1) ROOT apply_op = f32[] add(x, y) } ENTRY ar { p0 = f32[32] parameter(0) p1 = f32[32, 32] parameter(1) p2 = f32[32, 32] parameter(2) p3 = f32[32] parameter(3) dot0 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm" dot1 = f32[32,32]{1,0} custom-call(dot0, p2), custom_call_target="__cublas$gemm" dot2 = f32[32,32]{1,0} custom-call(dot1, p2), custom_call_target="__cublas$gemm" dot3 = f32[32,32]{1,0} custom-call(dot2, p2), custom_call_target="__cublas$gemm" dot4 = f32[32,32]{1,0} custom-call(dot3, p2), custom_call_target="__cublas$gemm" dot5 = f32[32,32]{1,0} custom-call(dot4, p2), custom_call_target="__cublas$gemm" dot6 = f32[32,32]{1,0} custom-call(dot5, p2), custom_call_target="__cublas$gemm" ar-start = f32[32] all-reduce-start(p0), to_apply=apply_op ar-done = f32[32] all-reduce-done(ar-start) ar-start1 = f32[32] all-reduce-start(p3), to_apply=apply_op ar-done1 = f32[32] all-reduce-done(ar-start1) add0 = f32[32,32] add(dot0, dot1) add1 = f32[32,32] add(add0, dot2) add2 = f32[32,32] add(add1, dot3) add3 = f32[32,32] add(add2, dot4) add4 = f32[32,32] add(add3, dot5) add5 = f32[32,32] add(add4, dot6) ROOT t = (f32[32], f32[32], f32[32,32]) tuple(ar-done, ar-done1, add5) })"; TF_ASSERT_OK_AND_ASSIGN( auto module, ParseAndReturnVerifiedModule( hlo_text, GetModuleConfig(true))); SequentialHloOrdering order = BuildHloOrdering(module.get()); HloComputation* entry = module->entry_computation(); std::vector<int64_t> count_between_pairs; bool in_between = false; for (const HloInstruction* inst : order.SequentialOrder(*entry)->instructions()) { if (inst->opcode() == HloOpcode::kAllReduceStart) { in_between = true; count_between_pairs.push_back(0); } else if (inst->opcode() == HloOpcode::kAllReduceDone) { in_between = false; } else if (in_between && inst->opcode() == HloOpcode::kCustomCall) { count_between_pairs.back()++; } } EXPECT_EQ(count_between_pairs.size(), 2); EXPECT_GT(count_between_pairs[0], 0); EXPECT_GT(count_between_pairs[1], 0); EXPECT_TRUE(HasValidFingerprint(module.get())); } TEST_F(GpuHloScheduleTest, ScheduleGpuModuleWithMemorySchedulerReturnsPeakMemoryBytes) { absl::string_view kHloText = R"( HloModule m ENTRY ar { p0 = f32[32,32] parameter(0) p1 = f32[32,32] parameter(1) ROOT _ = f32[32,32]{1,0} custom-call(p0, p1), custom_call_target="__cublas$gemm" })"; TF_ASSERT_OK_AND_ASSIGN( auto module, ParseAndReturnVerifiedModule( kHloText, GetModuleConfig(true))); int64_t pointer_size = dynamic_cast<GpuCompiler*>(backend().compiler())->GetPointerSize(); int64_t peak_memory_bytes = -1; TF_ASSERT_OK_AND_ASSIGN(auto schedule, ScheduleGpuModuleWithMemoryScheduler( module.get(), pointer_size, &peak_memory_bytes)); EXPECT_GT(peak_memory_bytes, 0); } TEST_F(GpuHloScheduleTest, LHSCostModelCostlyAR) { const char* hlo_text = R"( HloModule AsyncAR apply_op { x = bf16[] parameter(0) y = bf16[] parameter(1) ROOT apply_op = bf16[] add(x, y) } ENTRY ar { p0 = bf16[32505856] parameter(0) p1 = f32[32, 32] parameter(1) p2 = f32[32, 32] parameter(2) dot0 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm" dot1 = f32[32,32]{1,0} custom-call(dot0, p2), custom_call_target="__cublas$gemm" dot2 = f32[32,32]{1,0} custom-call(dot1, p2), custom_call_target="__cublas$gemm" dot3 = f32[32,32]{1,0} custom-call(dot2, p2), custom_call_target="__cublas$gemm" dot4 = f32[32,32]{1,0} custom-call(dot3, p2), custom_call_target="__cublas$gemm" dot5 = f32[32,32]{1,0} custom-call(dot4, p2), custom_call_target="__cublas$gemm" dot6 = f32[32,32]{1,0} custom-call(dot5, p2), custom_call_target="__cublas$gemm" ar-start = bf16[32505856] all-reduce-start(p0), to_apply=apply_op ar-done = bf16[32505856] all-reduce-done(ar-start) ROOT t = (bf16[32505856], f32[32,32]) tuple(ar-done, dot6) })"; TF_ASSERT_OK_AND_ASSIGN( auto module, ParseAndReturnVerifiedModule( hlo_text, GetModuleConfig(true))); SequentialHloOrdering order = BuildHloOrdering(module.get()); HloComputation* entry = module->entry_computation(); std::vector<int64_t> count_between_pairs; bool in_between = false; for (const HloInstruction* inst : order.SequentialOrder(*entry)->instructions()) { if (inst->opcode() == HloOpcode::kAllReduceStart) { in_between = true; count_between_pairs.push_back(0); } else if (inst->opcode() == HloOpcode::kAllReduceDone) { in_between = false; } else if (in_between && inst->opcode() == HloOpcode::kCustomCall) { count_between_pairs.back()++; } } EXPECT_EQ(count_between_pairs.size(), 1); EXPECT_EQ(count_between_pairs[0], 7); EXPECT_TRUE(HasValidFingerprint(module.get())); } TEST_F(GpuHloScheduleTest, ProfileGuidedCostModel) { const char* hlo_text = R"( HloModule AsyncAR apply_op { x = f32[] parameter(0) y = f32[] parameter(1) ROOT apply_op = f32[] add(x, y) } ENTRY ar { p0 = f32[32] parameter(0) p1 = f32[32, 32] parameter(1) p2 = f32[32, 32] parameter(2) p3 = f32[32] parameter(3) dot0 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm" dot1 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm" add0 = f32[32,32] add(dot0, dot1) ar-start = f32[32] all-reduce-start(p0), to_apply=apply_op ar-done = f32[32] all-reduce-done(ar-start) ar-start1 = f32[32] all-reduce-start(p3), to_apply=apply_op ar-done1 = f32[32] all-reduce-done(ar-start1) ROOT t = (f32[32], f32[32], f32[32,32]) tuple(ar-done, ar-done1, add0) })"; struct SubTest { std::string profile; std::string target_start, target_done; }; std::vector<SubTest> subtests; const std::string ar_long_latency_proto_text = R"pb( costs { name: "dot0" cost_us: 100.0 } costs { name: "dot1" cost_us: 100.0 } costs { name: "add0" cost_us: 10.0 } costs { name: "ar-start" cost_us: 1000.0 } costs { name: "ar-start1" cost_us: 10.0 } )pb"; subtests.push_back({ar_long_latency_proto_text, "ar-start", "ar-done"}); const std::string ar1_long_latency_proto_text = R"pb( costs { name: "dot0" cost_us: 100.0 } costs { name: "dot1" cost_us: 100.0 } costs { name: "add0" cost_us: 10.0 } costs { name: "ar-start" cost_us: 10.0 } costs { name: "ar-start1" cost_us: 1000.0 } )pb"; tensorflow::profiler::ProfiledInstructionsProto profile; ASSERT_TRUE(tsl::protobuf::TextFormat::ParseFromString( ar1_long_latency_proto_text, &profile)); std::string ar1_long_latency_proto_binary = profile.SerializeAsString(); subtests.push_back({profile.SerializeAsString(), "ar-start1", "ar-done1"}); for (const SubTest& subtest : subtests) { TF_ASSERT_OK_AND_ASSIGN( auto module, ParseAndReturnVerifiedModule( hlo_text, GetModuleConfig(true, true, subtest.profile))); SequentialHloOrdering order = BuildHloOrdering(module.get()); HloComputation* entry = module->entry_computation(); bool between_target_collective_pair = false; for (const HloInstruction* inst : order.SequentialOrder(*entry)->instructions()) { if (inst->name() == subtest.target_start) { between_target_collective_pair = true; } else if (inst->name() == subtest.target_done) { between_target_collective_pair = false; } else if (inst->opcode() == HloOpcode::kDot || inst->opcode() == HloOpcode::kAdd) { EXPECT_TRUE(between_target_collective_pair); } } } } TEST_F(GpuHloScheduleTest, ProfileGuidedCostModelFailsWithIncompleteProfile) { const absl::string_view kHloString = R"( HloModule m apply_op { x = f32[] parameter(0) y = f32[] parameter(1) ROOT apply_op = f32[] add(x, y) } ENTRY ar { p0 = f32[32] parameter(0) p1 = f32[32,32] parameter(1) p2 = f32[32,32] parameter(2) p3 = f32[32] parameter(3) dot0 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm" dot1 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm" add0 = f32[32,32] add(dot0, dot1) ar-start = f32[32] all-reduce-start(p0), to_apply=apply_op ar-done = f32[32] all-reduce-done(ar-start) ar-start1 = f32[32] all-reduce-start(p3), to_apply=apply_op ar-done1 = f32[32] all-reduce-done(ar-start1) ROOT t = (f32[32],f32[32],f32[32,32]) tuple(ar-done, ar-done1, add0) })"; const absl::string_view kProfile = R"pb( costs { name: "dot0" cost_us: 100.0 } costs { name: "add0" cost_us: 10.0 } costs { name: "ar-start" cost_us: 1000.0 } )pb"; TF_ASSERT_OK_AND_ASSIGN( auto module, ParseAndReturnVerifiedModule( kHloString, GetModuleConfig(true, true, kProfile))); HloModuleConfig config(module->config()); DebugOptions dboptions(config.debug_options()); dboptions.set_xla_gpu_enable_pgle_accuracy_checker(true); config.set_debug_options(dboptions); module->set_config(config); EXPECT_THAT(ScheduleGpuModule( module.get(), 8, backend().default_stream_executor()->GetDeviceDescription()) .status(), StatusIs(absl::StatusCode::kInvalidArgument)); } TEST_F( GpuHloScheduleTest, ProfileGuidedCostModelDoesNotFailWithIncompleteProfileIfAccuracyCheckerIsDisabled) { const absl::string_view kHloString = R"( HloModule m apply_op { x = f32[] parameter(0) y = f32[] parameter(1) ROOT apply_op = f32[] add(x, y) } ENTRY ar { p0 = f32[32] parameter(0) p1 = f32[32,32] parameter(1) p2 = f32[32,32] parameter(2) p3 = f32[32] parameter(3) dot0 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm" dot1 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm" add0 = f32[32,32] add(dot0, dot1) ar-start = f32[32] all-reduce-start(p0), to_apply=apply_op ar-done = f32[32] all-reduce-done(ar-start) ar-start1 = f32[32] all-reduce-start(p3), to_apply=apply_op ar-done1 = f32[32] all-reduce-done(ar-start1) ROOT t = (f32[32],f32[32],f32[32,32]) tuple(ar-done, ar-done1, add0) })"; const absl::string_view kProfile = R"pb( costs { name: "dot0" cost_us: 100.0 } costs { name: "add0" cost_us: 10.0 } costs { name: "ar-start" cost_us: 1000.0 } )pb"; TF_ASSERT_OK_AND_ASSIGN( auto module, ParseAndReturnVerifiedModule( kHloString, GetModuleConfig(true, true, kProfile))); module->mutable_config().mutable_debug_options().add_xla_disable_hlo_passes( "pgle-accuracy-checker"); TF_EXPECT_OK(ScheduleGpuModule( module.get(), 8, backend().default_stream_executor()->GetDeviceDescription()) .status()); } TEST_F(GpuHloScheduleTest, ProfileGuidedCostModelWithRematData) { const char* hlo_text = R"( HloModule AsyncAR apply_op { x = f32[] parameter(0) y = f32[] parameter(1) ROOT apply_op = f32[] add(x, y) } ENTRY ar { p0 = f32[32] parameter(0) p1 = f32[32, 32] parameter(1) p2 = f32[32, 32] parameter(2) p3 = f32[32] parameter(3) dot0 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm" dot1 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm" add0 = f32[32,32] add(dot0, dot1) ar-start = f32[32] all-reduce-start(p0), to_apply=apply_op ar-done = f32[32] all-reduce-done(ar-start) ar-start1 = f32[32] all-reduce-start(p3), to_apply=apply_op ar-done1 = f32[32] all-reduce-done(ar-start1) ROOT t = (f32[32], f32[32], f32[32,32]) tuple(ar-done, ar-done1, add0) })"; const std::string ar_long_latency_proto_text = R"pb( costs { name: "dot0" cost_us: 100.0 } costs { name: "dot1" cost_us: 100.0 } costs { name: "add0" cost_us: 10.0 } costs { name: "ar-start" cost_us: 1.0 } costs { name: "ar-start1" cost_us: 1.0 } costs { name: "ar-start.remat100" cost_us: 2000.0 } )pb"; TF_ASSERT_OK_AND_ASSIGN( auto module, ParseAndReturnVerifiedModule( hlo_text, GetModuleConfig(true, true, ar_long_latency_proto_text))); SequentialHloOrdering order = BuildHloOrdering(module.get()); HloComputation* entry = module->entry_computation(); bool between_target_collective_pair = false; for (const HloInstruction* inst : order.SequentialOrder(*entry)->instructions()) { if (inst->name() == "ar-start") { between_target_collective_pair = true; } else if (inst->name() == "ar-done") { between_target_collective_pair = false; } else if (inst->opcode() == HloOpcode::kDot || inst->opcode() == HloOpcode::kAdd) { EXPECT_TRUE(between_target_collective_pair); } } } TEST_F(GpuHloScheduleTest, LHSSendRecv) { const char* hlo_text = R"( HloModule test while_cond { param = (u32[], f32[1, 1024, 1024]) parameter(0) count = get-tuple-element(%param), index=0 ub = u32[] constant(25) ROOT cond_result = pred[] compare(count, ub), direction=LT } while_body { param = (u32[], f32[1, 1024, 1024]) parameter(0) count = get-tuple-element(%param), index=0 send-data = get-tuple-element(%param), index=1 after-all = token[] after-all() recv = (f32[1, 1024, 1024], u32[], token[]) recv(after-all), channel_id=1, frontend_attributes={ _xla_send_recv_source_target_pairs="{{0, 1}}" } send = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all), channel_id=1, frontend_attributes={ _xla_send_recv_source_target_pairs="{{0, 1}}" } recv-done = (f32[1, 1024, 1024], token[]) recv-done(recv), channel_id=1 send-done = token[] send-done(send), channel_id=1 recv-data = f32[1, 1024, 1024] get-tuple-element(recv-done), index=0 c1 = u32[] constant(1) new_count = u32[] add(count, c1) replica = u32[] replica-id() c10 = u32[] constant(10) sum = u32[] add(replica, c10) sum2 = u32[] add(sum, count) conv = f32[] convert(sum2) p = f32[1, 1024, 1024] broadcast(conv), dimensions={} b = f32[1, 1024, 1024] add(p, recv-data) c = f32[1, 1024, 1024] multiply(b, b) d = f32[1, 1024, 1024] tan(c) s = f32[1, 1024, 1024] dot(c, d), lhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1} ROOT result = (u32[], f32[1, 1024, 1024]) tuple(new_count, s) } ENTRY test_computation { c0 = u32[] constant(0) f0 = f32[] constant(0.0) init = f32[1, 1024, 1024] broadcast(f0), dimensions={} while_init = (u32[], f32[1, 1024, 1024]) tuple(c0, init) while_result = (u32[], f32[1, 1024, 1024]) while(while_init), body=while_body, condition=while_cond ROOT entry_result = f32[1, 1024, 1024] get-tuple-element(while_result), index=1 } )"; TF_ASSERT_OK_AND_ASSIGN( auto module, ParseAndReturnVerifiedModule( hlo_text, GetModuleConfig(true))); SequentialHloOrdering order = BuildHloOrdering(module.get()); HloComputation* while_body = module->GetComputationWithName("while_body"); const std::vector<HloInstruction*>& instruction_sequence = order.SequentialOrder(*while_body)->instructions(); auto get_index = [&](absl::string_view hlo_name) { return absl::c_find_if(instruction_sequence, [hlo_name](HloInstruction* instruction) { return instruction->name() == hlo_name; }) - instruction_sequence.begin(); }; EXPECT_LT(get_index("recv"), get_index("send")); EXPECT_LT(get_index("send"), get_index("recv-done")); EXPECT_GE(get_index("send-done") - get_index("recv-done"), 8); EXPECT_LT(abs(get_index("send-done") - get_index("result")), 2); EXPECT_TRUE(HasValidFingerprint(module.get())); } TEST_F(GpuHloScheduleTest, LHSSendRecvPairs2) { const char* hlo_text = R"( HloModule test while_cond { param = (u32[], f32[1, 1024, 1024]) parameter(0) count = get-tuple-element(%param), index=0 ub = u32[] constant(25) ROOT cond_result = pred[] compare(count, ub), direction=LT } while_body { param = (u32[], f32[1, 1024, 1024]) parameter(0) count = get-tuple-element(%param), index=0 send-data = get-tuple-element(%param), index=1 after-all-0 = token[] after-all() recv-0 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all-0), channel_id=1, frontend_attributes={ _xla_send_recv_source_target_pairs="{{0, 1}}" } send-0 = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all-0), channel_id=1, frontend_attributes={ _xla_send_recv_source_target_pairs="{{0, 1}}" } recv-done-0 = (f32[1, 1024, 1024], token[]) recv-done(recv-0), channel_id=1 send-done-0 = token[] send-done(send-0), channel_id=1 recv-data-0 = f32[1, 1024, 1024] get-tuple-element(recv-done-0), index=0 c1 = u32[] constant(1) new_count = u32[] add(count, c1) replica = u32[] replica-id() c10 = u32[] constant(10) sum = u32[] add(replica, c10) sum2 = u32[] add(sum, count) conv = f32[] convert(sum2) bc1 = f32[1, 1024, 1024] broadcast(conv), dimensions={} after-all-1 = token[] after-all() recv-1 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all-1), channel_id=2, frontend_attributes={ _xla_send_recv_source_target_pairs="{{1, 0}}" } send-1 = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all-1), channel_id=2, frontend_attributes={ _xla_send_recv_source_target_pairs="{{1, 0}}" } recv-done-1 = (f32[1, 1024, 1024], token[]) recv-done(recv-1), channel_id=2 send-done-1 = token[] send-done(send-1), channel_id=2 recv-data-1 = f32[1, 1024, 1024] get-tuple-element(recv-done-1), index=0 add2 = f32[1, 1024, 1024] add(recv-data-0, bc1) add = f32[1, 1024, 1024] add(recv-data-1, add2) ROOT result = (u32[], f32[1, 1024, 1024]) tuple(new_count, add) } ENTRY test_computation { c0 = u32[] constant(0) f0 = f32[] constant(0.0) init = f32[1, 1024, 1024] broadcast(f0), dimensions={} while_init = (u32[], f32[1, 1024, 1024]) tuple(c0, init) while_result = (u32[], f32[1, 1024, 1024]) while(while_init), body=while_body, condition=while_cond ROOT entry_result = f32[1, 1024, 1024] get-tuple-element(while_result), index=1 } )"; TF_ASSERT_OK_AND_ASSIGN( auto module, ParseAndReturnVerifiedModule( hlo_text, GetModuleConfig(true, true))); SequentialHloOrdering order = BuildHloOrdering(module.get()); HloComputation* while_body = module->GetComputationWithName("while_body"); const std::vector<HloInstruction*>& instruction_sequence = order.SequentialOrder(*while_body)->instructions(); auto get_index = [&](absl::string_view hlo_name) { return absl::c_find_if(instruction_sequence, [hlo_name](HloInstruction* instruction) { return instruction->name() == hlo_name; }) - instruction_sequence.begin(); }; EXPECT_TRUE(HasValidFingerprint(module.get())); EXPECT_LT(get_index("recv-1"), get_index("send-1")); EXPECT_LT(get_index("send-1"), get_index("recv-done-1")); EXPECT_GT(get_index("send-done-1"), get_index("send-1")); EXPECT_LT(get_index("send-done-1"), get_index("recv-0")); EXPECT_LT(abs(get_index("send-done-0") - get_index("result")), 2); } TEST_F(GpuHloScheduleTest, LHSSendRecvAllReduce) { const char* hlo_text = R"( HloModule test add (x: f32[], y: f32[]) -> f32[] { x = f32[] parameter(0) y = f32[] parameter(1) ROOT add = f32[] add(f32[] x, f32[] y) } while_cond { param = (u32[], f32[1, 1024, 1024]) parameter(0) count = get-tuple-element(%param), index=0 ub = u32[] constant(25) ROOT cond_result = pred[] compare(count, ub), direction=LT } while_body { param = (u32[], f32[1, 1024, 1024]) parameter(0) count = get-tuple-element(%param), index=0 send-data = get-tuple-element(%param), index=1 after-all = token[] after-all() recv = (f32[1, 1024, 1024], u32[], token[]) recv(after-all), channel_id=1, frontend_attributes={ _xla_send_recv_source_target_pairs="{{0, 1}}" } send = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all), channel_id=1, frontend_attributes={ _xla_send_recv_source_target_pairs="{{0, 1}}" } recv-done = (f32[1, 1024, 1024], token[]) recv-done(recv), channel_id=1 send-done = token[] send-done(send), channel_id=1 recv-data = f32[1, 1024, 1024] get-tuple-element(recv-done), index=0 c1 = u32[] constant(1) new_count = u32[] add(count, c1) replica = u32[] replica-id() c10 = u32[] constant(10) sum = u32[] add(replica, c10) sum2 = u32[] add(sum, count) conv = f32[] convert(sum2) p = f32[1, 1024, 1024] broadcast(conv), dimensions={} b = f32[1, 1024, 1024] add(p, recv-data) c = f32[1, 1024, 1024] multiply(b, b) d = f32[1, 1024, 1024] tan(c) s = f32[1, 1024, 1024] dot(c, d), lhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1} all-reduce-start = f32[1, 1024, 1024] all-reduce-start(f32[1, 1024, 1024] p), replica_groups={{0,1}}, to_apply=add, backend_config={"collective_backend_config":{"is_sync":false}} all-reduce-done = f32[1, 1024, 1024] all-reduce-done(f32[1, 1024, 1024] all-reduce-start) new-data = f32[1, 1024, 1024] add(s, all-reduce-done) ROOT result = (u32[], f32[1, 1024, 1024]) tuple(new_count, new-data) } ENTRY test_computation { c0 = u32[] constant(0) f0 = f32[] constant(0.0) init = f32[1, 1024, 1024] broadcast(f0), dimensions={} while_init = (u32[], f32[1, 1024, 1024]) tuple(c0, init) while_result = (u32[], f32[1, 1024, 1024]) while(while_init), body=while_body, condition=while_cond ROOT entry_result = f32[1, 1024, 1024] get-tuple-element(while_result), index=1 } )"; TF_ASSERT_OK_AND_ASSIGN( auto module, ParseAndReturnVerifiedModule( hlo_text, GetModuleConfig(true, true))); SequentialHloOrdering order = BuildHloOrdering(module.get()); HloComputation* while_body = module->GetComputationWithName("while_body"); const std::vector<HloInstruction*>& instruction_sequence = order.SequentialOrder(*while_body)->instructions(); auto get_index = [&](absl::string_view hlo_name) { return absl::c_find_if(instruction_sequence, [hlo_name](HloInstruction* instruction) { return instruction->name() == hlo_name; }) - instruction_sequence.begin(); }; EXPECT_LT(get_index("recv"), get_index("send")); EXPECT_LT(get_index("send"), get_index("recv-done")); EXPECT_GE(get_index("send-done") - get_index("recv-done"), 3); EXPECT_TRUE(get_index("send-done") < get_index("all-reduce-start") || get_index("recv") > get_index("all-reduce-start")); EXPECT_TRUE(HasValidFingerprint(module.get())); } TEST_F(GpuHloScheduleTest, LHSSendRecvPipelined1) { const char* hlo_text = R"( HloModule test while_cond { param = (u32[], (f32[1,1024,1024], token[]), token[]) parameter(0) count = get-tuple-element(param), index=0 ub = u32[] constant(25) ROOT cond-result = pred[] compare(count, ub), direction=LT } while_body { param = (u32[], (f32[1,1024,1024], token[]), token[]) parameter(0) count = get-tuple-element(param), index=0 recv-done.1.q = (f32[1,1024,1024], token[]) get-tuple-element(param), index=1 recv-data = f32[1, 1024, 1024] get-tuple-element(recv-done.1.q), index=0 c1 = u32[] constant(1) new-count = u32[] add(count, c1) replica = u32[] replica-id() c10 = u32[] constant(10) sum = u32[] add(replica, c10) sum2 = u32[] add(sum, count) conv = f32[] convert(sum2) p = f32[1, 1024, 1024] broadcast(conv), dimensions={} b = f32[1, 1024, 1024] add(p, recv-data) c = f32[1, 1024, 1024] multiply(b, b) d = f32[1, 1024, 1024] tan(c) s = f32[1, 1024, 1024] dot(c, d), lhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1} send-data = f32[1, 1024, 1024] add(c, s) after-all.1 = token[] after-all() send.1 = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all.1), channel_id=1, frontend_attributes={ _xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}", _xla_send_recv_pipeline="0" } recv.1 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all.1), channel_id=1, frontend_attributes={ _xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}", _xla_send_recv_pipeline="0" } recv-done.1 = (f32[1,1024,1024], token[]) recv-done(recv.1), channel_id=1, frontend_attributes={ _xla_send_recv_pipeline="0" } send-done.1 = token[] send-done(send.1), channel_id=1, frontend_attributes={ _xla_send_recv_pipeline="0" } ROOT body-result = (u32[], (f32[1,1024,1024], token[]), token[]) tuple(new-count, recv-done.1, send-done.1) } ENTRY main { c0 = u32[] constant(0) f0 = f32[] constant(0.0) init = f32[1, 1024, 1024] broadcast(f0), dimensions={} after-all.2 = token[] after-all() recv.2 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all.2), channel_id=1, frontend_attributes={ _xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}", _xla_send_recv_pipeline="0" } send.2 = (f32[1, 1024, 1024], u32[], token[]) send(init, after-all.2), channel_id=1, frontend_attributes={ _xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}", _xla_send_recv_pipeline="0" } recv-done.2 = (f32[1,1024,1024], token[]) recv-done(recv.2), channel_id=1, frontend_attributes={ _xla_send_recv_pipeline="0" } send-done.2 = token[] send-done(send.2), channel_id=1, frontend_attributes={ _xla_send_recv_pipeline="0" } while-init = (u32[], (f32[1,1024,1024], token[]), token[]) tuple(c0, recv-done.2, send-done.2) while-result = (u32[], (f32[1,1024,1024], token[]), token[]) while(while-init), body=while_body, condition=while_cond, backend_config={"known_trip_count":{"n":"25"}} recv-done.2.q = (f32[1,1024,1024], token[]) get-tuple-element(while-result), index=1 ROOT entry-result = f32[1, 1024, 1024] get-tuple-element(recv-done.2.q), index=0 } )"; TF_ASSERT_OK_AND_ASSIGN( auto module, ParseAndReturnVerifiedModule( hlo_text, GetModuleConfig(true, true))); SequentialHloOrdering order = BuildHloOrdering(module.get()); const std::vector<HloInstruction*>& while_body = order.SequentialOrder(*module->GetComputationWithName("while_body")) ->instructions(); const std::vector<HloInstruction*>& main = order.SequentialOrder(*module->GetComputationWithName("main")) ->instructions(); auto get_index = [](absl::string_view hlo_name, const std::vector<HloInstruction*>& instruction_sequence) { return absl::c_find_if(instruction_sequence, [hlo_name](HloInstruction* instruction) { return instruction->name() == hlo_name; }) - instruction_sequence.begin(); }; EXPECT_TRUE(HasValidFingerprint(module.get())); EXPECT_EQ(get_index("recv.2", main) + 1, get_index("send.2", main)); EXPECT_LT(get_index("send.2", main), get_index("recv-done.2", main)); EXPECT_LT(get_index("recv-done.2", main), get_index("send-done.2", main)); EXPECT_LT(get_index("send-done.2", main), get_index("while-result", main)); EXPECT_EQ(get_index("recv.1", while_body) + 1, get_index("send.1", while_body)); EXPECT_LT(get_index("send.1", while_body), get_index("recv-done.1", while_body)); EXPECT_LT(get_index("recv-done.1", while_body), get_index("send-done.1", while_body)); } TEST_F(GpuHloScheduleTest, LHSSendRecvPipelined2) { const char* hlo_text = R"( HloModule test while_cond { param = (u32[], (f32[1,1024,1024], token[]), token[], (f32[1,1024,1024], token[]), token[]) parameter(0) count = get-tuple-element(param), index=0 ub = u32[] constant(25) ROOT cond-result = pred[] compare(count, ub), direction=LT } while_body { param = (u32[], (f32[1,1024,1024], token[]), token[], (f32[1,1024,1024], token[]), token[]) parameter(0) count = get-tuple-element(param), index=0 recv-done.0.q = (f32[1,1024,1024], token[]) get-tuple-element(param), index=1 recv-data.0 = f32[1, 1024, 1024] get-tuple-element(recv-done.0.q), index=0 recv-done.1.q = (f32[1,1024,1024], token[]) get-tuple-element(param), index=3 recv-data.1 = f32[1, 1024, 1024] get-tuple-element(recv-done.1.q), index=0 replica = u32[] replica-id() constant0 = u32[] constant(0) compare0 = pred[] compare(replica, constant0), direction=EQ compare = pred[1, 1024, 1024] broadcast(compare0), dimensions={} recv-data = f32[1, 1024, 1024] select(compare, recv-data.0, recv-data.1) c1 = u32[] constant(1) new-count = u32[] add(count, c1) c10 = u32[] constant(10) sum = u32[] add(replica, c10) sum2 = u32[] add(sum, count) conv = f32[] convert(sum2) p = f32[1, 1024, 1024] broadcast(conv), dimensions={} b = f32[1, 1024, 1024] add(p, recv-data) c = f32[1, 1024, 1024] multiply(b, b) d = f32[1, 1024, 1024] tan(c) s = f32[1, 1024, 1024] dot(c, d), lhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1} send-data = f32[1, 1024, 1024] add(c, s) after-all.0 = token[] after-all() send.0 = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all.0), channel_id=1, frontend_attributes={ _xla_send_recv_source_target_pairs="{{3,0}}", _xla_send_recv_pipeline="0" } recv.0 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all.0), channel_id=1, frontend_attributes={ _xla_send_recv_source_target_pairs="{{3,0}}", _xla_send_recv_pipeline="0" } recv-done.0 = (f32[1,1024,1024], token[]) recv-done(recv.0), channel_id=1, frontend_attributes={ _xla_send_recv_pipeline="0" } send-done.0 = token[] send-done(send.0), channel_id=1, frontend_attributes={ _xla_send_recv_pipeline="0" } after-all.1 = token[] after-all() send.1 = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all.1), channel_id=2, frontend_attributes={ _xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}}", _xla_send_recv_pipeline="1" } recv.1 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all.1), channel_id=2, frontend_attributes={ _xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}}", _xla_send_recv_pipeline="1" } recv-done.1 = (f32[1,1024,1024], token[]) recv-done(recv.1), channel_id=2, frontend_attributes={ _xla_send_recv_pipeline="1" } send-done.1 = token[] send-done(send.1), channel_id=2, frontend_attributes={ _xla_send_recv_pipeline="1" } ROOT body-result = (u32[], (f32[1,1024,1024], token[]), token[], (f32[1,1024,1024], token[]), token[]) tuple(new-count, recv-done.0, send-done.0, recv-done.1, send-done.1) } ENTRY main { c0 = u32[] constant(0) f0 = f32[] constant(0.0) init = f32[1, 1024, 1024] broadcast(f0), dimensions={} after-all.2 = token[] after-all() recv.2 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all.2), channel_id=1, frontend_attributes={ _xla_send_recv_source_target_pairs="{{3,0}}", _xla_send_recv_pipeline="0" } send.2 = (f32[1, 1024, 1024], u32[], token[]) send(init, after-all.2), channel_id=1, frontend_attributes={ _xla_send_recv_source_target_pairs="{{3,0}}", _xla_send_recv_pipeline="0" } recv-done.2 = (f32[1,1024,1024], token[]) recv-done(recv.2), channel_id=1, frontend_attributes={ _xla_send_recv_pipeline="0" } send-done.2 = token[] send-done(send.2), channel_id=1, frontend_attributes={ _xla_send_recv_pipeline="0" } after-all.3 = token[] after-all() recv.3 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all.3), channel_id=2, frontend_attributes={ _xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}}", _xla_send_recv_pipeline="1" } send.3 = (f32[1, 1024, 1024], u32[], token[]) send(init, after-all.3), channel_id=2, frontend_attributes={ _xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}}", _xla_send_recv_pipeline="1" } recv-done.3 = (f32[1,1024,1024], token[]) recv-done(recv.3), channel_id=2, frontend_attributes={ _xla_send_recv_pipeline="1" } send-done.3 = token[] send-done(send.3), channel_id=2, frontend_attributes={ _xla_send_recv_pipeline="1" } while-init = (u32[], (f32[1,1024,1024], token[]), token[], (f32[1,1024,1024], token[]), token[]) tuple(c0, recv-done.2, send-done.2, recv-done.3, send-done.3) while-result = (u32[], (f32[1,1024,1024], token[]), token[], (f32[1,1024,1024], token[]), token[]) while(while-init), body=while_body, condition=while_cond, backend_config={"known_trip_count":{"n":"25"}} recv-done.2.q = (f32[1,1024,1024], token[]) get-tuple-element(while-result), index=1 recv-data.2 = f32[1, 1024, 1024] get-tuple-element(recv-done.2.q), index=0 recv-done.3.q = (f32[1,1024,1024], token[]) get-tuple-element(while-result), index=3 recv-data.3 = f32[1, 1024, 1024] get-tuple-element(recv-done.3.q), index=0 replica = u32[] replica-id() constant0 = u32[] constant(0) compare0 = pred[] compare(replica, constant0), direction=EQ compare = pred[1, 1024, 1024] broadcast(compare0), dimensions={} ROOT entry-result = f32[1, 1024, 1024] select(compare, recv-data.2, recv-data.3) } )"; TF_ASSERT_OK_AND_ASSIGN( auto module, ParseAndReturnVerifiedModule( hlo_text, GetModuleConfig(true, true))); SequentialHloOrdering order = BuildHloOrdering(module.get()); const std::vector<HloInstruction*>& while_body = order.SequentialOrder(*module->GetComputationWithName("while_body")) ->instructions(); const std::vector<HloInstruction*>& main = order.SequentialOrder(*module->GetComputationWithName("main")) ->instructions(); auto get_index = [](absl::string_view hlo_name, const std::vector<HloInstruction*>& instruction_sequence) { return absl::c_find_if(instruction_sequence, [hlo_name](HloInstruction* instruction) { return instruction->name() == hlo_name; }) - instruction_sequence.begin(); }; EXPECT_TRUE(HasValidFingerprint(module.get())); EXPECT_EQ(get_index("recv.2", main) + 1, get_index("send.2", main)); EXPECT_LT(get_index("send.2", main), get_index("recv.3", main)); EXPECT_EQ(get_index("recv.3", main) + 1, get_index("send.3", main)); EXPECT_LT(get_index("send.3", main), get_index("recv-done.2", main)); EXPECT_LT(get_index("recv-done.2", main), get_index("recv-done.3", main)); EXPECT_LT(get_index("recv-done.3", main), get_index("send-done.2", main)); EXPECT_LT(get_index("send-done.2", main), get_index("send-done.3", main)); EXPECT_LT(get_index("send-done.3", main), get_index("while-result", main)); EXPECT_EQ(get_index("recv.0", while_body) + 1, get_index("send.0", while_body)); EXPECT_LT(get_index("send.0", while_body), get_index("recv.1", while_body)); EXPECT_EQ(get_index("recv.1", while_body) + 1, get_index("send.1", while_body)); EXPECT_LT(get_index("send.1", while_body), get_index("recv-done.0", while_body)); EXPECT_LT(get_index("recv-done.0", while_body), get_index("recv-done.1", while_body)); EXPECT_LT(get_index("recv-done.1", while_body), get_index("send-done.0", while_body)); EXPECT_LT(get_index("send-done.0", while_body), get_index("send-done.1", while_body)); } TEST_F(GpuHloScheduleTest, SkipAlreadyScheduled) { auto module = ParseAndReturnVerifiedModule(R"( HloModule m, is_scheduled=true fused_computation { param_0 = f32[1024,1024]{1,0} parameter(0) ROOT exponential.1 = f32[1024,1024]{1,0} exponential(param_0) } fused_computation.1 { param_0.1 = f32[1024,1024]{1,0} parameter(0) ROOT negate.1 = f32[1024,1024]{1,0} negate(param_0.1) } ENTRY e { p = f32[1024,1024]{1,0} parameter(0) wrapped_negate = f32[1024,1024]{1,0} fusion(p), kind=kLoop, calls=fused_computation.1 wrapped_exponential = f32[1024,1024]{1,0} fusion(p), kind=kLoop, calls=fused_computation ROOT t = (f32[1024,1024]{1,0}, f32[1024,1024]{1,0}) tuple(wrapped_exponential, wrapped_negate) })") .value(); TF_CHECK_OK(ScheduleGpuModule( module.get(), 8, backend().default_stream_executor()->GetDeviceDescription()) .status()); EXPECT_TRUE(*RunFileCheck(module->ToString(), R"( )")); } TEST_F(GpuHloScheduleTest, ProfileGuidedCostModelWithForceEarliestSchedule) { const char* hlo_text = R"( HloModule AsyncAR apply_op { x = f32[] parameter(0) y = f32[] parameter(1) ROOT apply_op = f32[] add(x, y) } ENTRY main { p0 = f32[32] parameter(0) p1 = f32[32, 32] parameter(1) p2 = f32[32, 32] parameter(2) p3 = f32[32] parameter(3) dot0 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm", backend_config={"force_earliest_schedule":true} dot1 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm" add0 = f32[32,32] add(dot0, dot1) ar-start = f32[32] all-reduce-start(p0), to_apply=apply_op ar-done = f32[32] all-reduce-done(ar-start) ROOT t = (f32[32], f32[32,32]) tuple(ar-done, add0) })"; const std::string ar_long_latency_proto_text = R"pb( costs { name: "dot0" cost_us: 100.0 } costs { name: "dot1" cost_us: 100.0 } costs { name: "add0" cost_us: 10.0 } costs { name: "ar-start" cost_us: 1000.0 } )pb"; tensorflow::profiler::ProfiledInstructionsProto profile; ASSERT_TRUE(tsl::protobuf::TextFormat::ParseFromString( ar_long_latency_proto_text, &profile)); std::string ar_long_latency_proto_binary = profile.SerializeAsString(); TF_ASSERT_OK_AND_ASSIGN( auto module, ParseAndReturnVerifiedModule( hlo_text, GetModuleConfig(true, false, ar_long_latency_proto_binary))); SequentialHloOrdering order = BuildHloOrdering(module.get()); const std::vector<HloInstruction*>& main = order.SequentialOrder(*module->GetComputationWithName("main")) ->instructions(); auto get_index = [](absl::string_view hlo_name, const std::vector<HloInstruction*>& instruction_sequence) { return absl::c_find_if(instruction_sequence, [hlo_name](HloInstruction* instruction) { return instruction->name() == hlo_name; }) - instruction_sequence.begin(); }; EXPECT_LT(get_index("dot0", main), get_index("ar-start", main)); EXPECT_GT(get_index("dot1", main), get_index("ar-start", main)); EXPECT_LT(get_index("dot1", main), get_index("ar-done", main)); } class GpuHloScheduleParameterizedTest : public GpuHloScheduleTest, public ::testing::WithParamInterface<bool> {}; TEST_P(GpuHloScheduleParameterizedTest, AsyncAllReduce) { HloComputation::Builder reduction_builder("add"); HloInstruction* x0 = reduction_builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeScalarShape(F32), "x")); HloInstruction* y0 = reduction_builder.AddInstruction(HloInstruction::CreateParameter( 1, ShapeUtil::MakeScalarShape(F32), "y")); HloInstruction* add = reduction_builder.AddInstruction(HloInstruction::CreateBinary( ShapeUtil::MakeScalarShape(F32), HloOpcode::kAdd, x0, y0)); const bool use_latency_hiding_scheduler = GetParam(); std::unique_ptr<HloModule> module = CreateNewVerifiedModule(use_latency_hiding_scheduler); HloComputation* reduction_computation = module->AddEmbeddedComputation(reduction_builder.Build(add)); HloComputation::Builder builder("entry_computation"); HloInstruction* x = builder.AddInstruction(HloInstruction::CreateParameter( 0, f32_2x2_, "x")); HloInstruction* y = builder.AddInstruction(HloInstruction::CreateParameter( 1, f32_2x2_, "y")); HloInstruction* z = builder.AddInstruction(HloInstruction::CreateParameter( 2, f32_2x2_, "z")); HloInstruction* add0 = builder.AddInstruction( HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, x, y)); HloInstruction* add1 = builder.AddInstruction( HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, add0, y)); HloInstruction* add2 = builder.AddInstruction( HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, add1, z)); Shape all_reduce_start_shape = ShapeUtil::MakeTupleShape({f32_2x2_, f32_2x2_}); HloInstruction* all_reduce_start = builder.AddInstruction(HloInstruction::CreateAllReduceStart( all_reduce_start_shape, {add0}, reduction_computation, CollectiveDeviceList(), false, std::nullopt, true)); TF_CHECK_OK(add1->AddControlDependencyTo(all_reduce_start)); HloInstruction* all_reduce_done = builder.AddInstruction(HloInstruction::CreateUnary( f32_2x2_, HloOpcode::kAllReduceDone, all_reduce_start)); HloInstruction* add3 = builder.AddInstruction( HloInstruction::CreateBinary(f32_2x2_, HloOpcode::kAdd, add1, add2)); HloInstruction* add4 = builder.AddInstruction(HloInstruction::CreateBinary( f32_2x2_, HloOpcode::kAdd, add3, all_reduce_done)); module->AddEntryComputation(builder.Build(add4)); SequentialHloOrdering order = BuildHloOrdering(module.get()); VLOG(2) << order.ToString(); EXPECT_TRUE(order.ExecutesBefore(add0, all_reduce_start)); EXPECT_TRUE(order.ExecutesBefore(add1, all_reduce_start)); EXPECT_TRUE(order.ExecutesBefore(all_reduce_start, add2)); EXPECT_TRUE(order.ExecutesBefore(all_reduce_start, add3)); EXPECT_TRUE(order.ExecutesBefore(all_reduce_start, add4)); EXPECT_TRUE(order.ExecutesBefore(add3, all_reduce_done)); EXPECT_TRUE(order.ExecutesBefore(all_reduce_done, add4)); EXPECT_TRUE(HasValidFingerprint(module.get())); } TEST_P(GpuHloScheduleParameterizedTest, LHSResourceModel) { const char* hlo_text = R"( HloModule AsyncModule apply_op { x = f32[] parameter(0) y = f32[] parameter(1) ROOT apply_op = f32[] add(x, y) } ENTRY ar { p0 = f32[32] parameter(0) p1 = f32[32, 32] parameter(1) p2 = f32[32, 32] parameter(2) p3 = f32[32] parameter(3) dot0 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm" dot1 = f32[32,32]{1,0} custom-call(dot0, p2), custom_call_target="__cublas$gemm" dot2 = f32[32,32]{1,0} custom-call(dot1, p2), custom_call_target="__cublas$gemm" dot3 = f32[32,32]{1,0} custom-call(dot2, p2), custom_call_target="__cublas$gemm" dot4 = f32[32,32]{1,0} custom-call(dot3, p2), custom_call_target="__cublas$gemm" dot5 = f32[32,32]{1,0} custom-call(dot4, p2), custom_call_target="__cublas$gemm" dot6 = f32[32,32]{1,0} custom-call(dot5, p2), custom_call_target="__cublas$gemm" ar-start = f32[32] all-reduce-start(p0), to_apply=apply_op ar-done = f32[32] all-reduce-done(ar-start) %ag-start = (f32[32], f32[64]) all-gather-start(p3), dimensions={0} %ag-done = f32[64] all-gather-done(%ag-start) add0 = f32[32,32] add(dot0, dot1) add1 = f32[32,32] add(add0, dot2) add2 = f32[32,32] add(add1, dot3) add3 = f32[32,32] add(add2, dot4) add4 = f32[32,32] add(add3, dot5) add5 = f32[32,32] add(add4, dot6) ROOT t = (f32[32], f32[64], f32[32,32]) tuple(ar-done, %ag-done, add5) })"; const bool enable_gpu_async_tracker = GetParam(); TF_ASSERT_OK_AND_ASSIGN( auto module, ParseAndReturnVerifiedModule( hlo_text, GetModuleConfig( true, enable_gpu_async_tracker))); SequentialHloOrdering order = BuildHloOrdering(module.get()); uint32_t in_flight = 0; uint32_t max_in_flight = 0; for (const HloInstruction* inst : order.SequentialOrder(*module->entry_computation())->instructions()) { if (hlo_query::IsAsyncCollectiveStartOp(inst)) { in_flight++; max_in_flight = std::max(max_in_flight, in_flight); } else if (hlo_query::IsAsyncCollectiveDoneOp(inst)) { in_flight--; } } const uint32_t expected_max_in_flight = enable_gpu_async_tracker ? 1 : 2; EXPECT_EQ(expected_max_in_flight, max_in_flight); EXPECT_TRUE(HasValidFingerprint(module.get())); } INSTANTIATE_TEST_SUITE_P(GpuHloScheduleParameterizedTest, GpuHloScheduleParameterizedTest, ::testing::Bool()); using GpuHloSchedulePostProcessTest = HloTestBase; TEST_F(GpuHloSchedulePostProcessTest, PostProcessAsyncCollectives) { const char* hlo_text = R"( HloModule AsyncModule, is_scheduled=true apply_op { x = f32[] parameter(0) y = f32[] parameter(1) ROOT apply_op = f32[] add(x, y) } ENTRY ar { p0 = f32[32] parameter(0) p1 = f32[32] parameter(1) ar-start = f32[32] all-reduce-start(p0), to_apply=apply_op add0 = f32[32] add(p0, p0) ar-done = f32[32] all-reduce-done(ar-start) ag-start = (f32[32], f32[64]) all-gather-start(p1), dimensions={0}, backend_config="{\"collective_backend_config\":{\"is_sync\":true}}" add1 = f32[32] add(p1, p1) ag-done = f32[64] all-gather-done(ag-start) add2 = f32[32] add(add0, add1) add3 = f32[32] add(add2, ar-done) ROOT result = (f32[32], f32[64]) tuple(add3, ag-done) })"; TF_ASSERT_OK_AND_ASSIGN( auto module, ParseAndReturnVerifiedModule(hlo_text, 2)); const HloInstructionSequence& input = module->schedule().sequence(module->entry_computation()); HloInstructionSequence result = PostProcessSchedule(input); const std::vector<std::string_view> expected_sequence = { "p0", "ar-start", "p1", "add0", "add1", "ag-start", "ag-done", "add2", "ar-done", "add3", "result"}; ASSERT_EQ(expected_sequence.size(), result.size()); for (int i = 0; i < result.size(); ++i) { EXPECT_EQ(expected_sequence[i], result.instructions()[i]->name()); } } TEST_F(GpuHloScheduleTest, AsyncOps) { const char* hlo_text = R"( HloModule m op1 { p0 = f32[2,2] parameter(0) ROOT add = f32[2,2] add(p0, p0) } op2 { p0 = f32[2,2] parameter(0) ROOT add = f32[2,2] add(p0, p0) } ENTRY main { p0 = f32[2,2] parameter(0) acc1_start = ((f32[2,2]), f32[2,2], s32[]) fusion-start(p0), kind=kLoop, calls=op1 acc1_done = f32[2,2] fusion-done(acc1_start) acc2_start = ((f32[2,2]), f32[2,2], s32[]) fusion-start(p0), kind=kLoop, calls=op2 acc2_done = f32[2,2] fusion-done(acc2_start) ROOT done = f32[2,2] add(acc1_done, acc2_done) })"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<xla::VerifiedHloModule> module, ParseAndReturnVerifiedModule(hlo_text, HloModuleConfig{})); SequentialHloOrdering order = BuildHloOrdering(module.get()); std::vector<HloOpcode> opcodes; for (HloInstruction* instruction : order.SequentialOrder(*module->entry_computation())->instructions()) { opcodes.push_back(instruction->opcode()); } EXPECT_THAT(opcodes, ElementsAre(HloOpcode::kParameter, HloOpcode::kAsyncStart, HloOpcode::kAsyncStart, HloOpcode::kAsyncDone, HloOpcode::kAsyncDone, HloOpcode::kAdd)); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_hlo_schedule.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/gpu_hlo_schedule_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
38fbeaea-f8df-4f7a-a655-4f468d1949c7
cpp
tensorflow/tensorflow
target_util
third_party/xla/xla/service/gpu/target_util.cc
third_party/xla/xla/service/gpu/target_util_test.cc
#include "xla/service/gpu/target_util.h" #include <functional> #include <string> #include <variant> #include <vector> #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "llvm/IR/Attributes.h" #include "llvm/IR/CallingConv.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/FPEnv.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/IntrinsicsAMDGPU.h" #include "llvm/IR/IntrinsicsNVPTX.h" #include "llvm/IR/MDBuilder.h" #include "llvm/IR/Metadata.h" #include "llvm/IR/Module.h" #include "llvm/IR/Type.h" #include "llvm/IR/Value.h" #include "llvm/Support/Casting.h" #include "llvm/TargetParser/Triple.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/primitive_util.h" #include "xla/service/llvm_ir/llvm_type_conversion_util.h" #include "xla/service/llvm_ir/llvm_util.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/logging.h" namespace xla { namespace gpu { namespace { using absl::StrCat; struct TargetIntrinsics { llvm::Intrinsic::ID nvptx_intrinsic; std::variant<llvm::Intrinsic::ID, std::function<llvm::CallInst*(llvm::IRBuilder<>*)>> amdgpu_intrinsic_or_function; std::variant<llvm::Intrinsic::ID, std::function<llvm::CallInst*(llvm::IRBuilder<>*)>> spir_intrinsic_or_function; }; struct TargetIntrinsics GetIntrinsic(TargetIntrinsicID intrin) { switch (intrin) { case TargetIntrinsicID::kThreadIdx: { return { llvm::Intrinsic::nvvm_read_ptx_sreg_tid_x, llvm::Intrinsic::amdgcn_workitem_id_x, [](llvm::IRBuilder<>* b_) -> llvm::CallInst* { return EmitDeviceFunctionCall( "_Z32__spirv_BuiltInLocalInvocationIdi", {b_->getInt32(0)}, {U32}, U64, {b_->getContext()}, b_); }, }; } case TargetIntrinsicID::kThreadIdy: { return { llvm::Intrinsic::nvvm_read_ptx_sreg_tid_y, llvm::Intrinsic::amdgcn_workitem_id_y, [](llvm::IRBuilder<>* b_) -> llvm::CallInst* { return EmitDeviceFunctionCall( "_Z32__spirv_BuiltInLocalInvocationIdi", {b_->getInt32(1)}, {U32}, U64, {b_->getContext()}, b_); }, }; } case TargetIntrinsicID::kThreadIdz: { return { llvm::Intrinsic::nvvm_read_ptx_sreg_tid_z, llvm::Intrinsic::amdgcn_workitem_id_z, [](llvm::IRBuilder<>* b_) -> llvm::CallInst* { return EmitDeviceFunctionCall( "_Z32__spirv_BuiltInLocalInvocationIdi", {b_->getInt32(2)}, {U32}, U64, {b_->getContext()}, b_); }, }; } case TargetIntrinsicID::kBlockIdx: { return { llvm::Intrinsic::nvvm_read_ptx_sreg_ctaid_x, llvm::Intrinsic::amdgcn_workgroup_id_x, [](llvm::IRBuilder<>* b_) -> llvm::CallInst* { return EmitDeviceFunctionCall("_Z26__spirv_BuiltInWorkgroupIdi", {b_->getInt32(0)}, {U32}, U64, {b_->getContext()}, b_); }, }; } case TargetIntrinsicID::kBlockIdy: { return { llvm::Intrinsic::nvvm_read_ptx_sreg_ctaid_y, llvm::Intrinsic::amdgcn_workgroup_id_y, [](llvm::IRBuilder<>* b_) -> llvm::CallInst* { return EmitDeviceFunctionCall("_Z26__spirv_BuiltInWorkgroupIdi", {b_->getInt32(1)}, {U32}, U64, {b_->getContext()}, b_); }, }; } case TargetIntrinsicID::kBlockIdz: { return { llvm::Intrinsic::nvvm_read_ptx_sreg_ctaid_z, llvm::Intrinsic::amdgcn_workgroup_id_z, [](llvm::IRBuilder<>* b_) -> llvm::CallInst* { return EmitDeviceFunctionCall("_Z26__spirv_BuiltInWorkgroupIdi", {b_->getInt32(2)}, {U32}, U64, {b_->getContext()}, b_); }, }; } case TargetIntrinsicID::kBarrierId: { return {llvm::Intrinsic::nvvm_barrier0, llvm::Intrinsic::amdgcn_s_barrier, [](llvm::IRBuilder<>* b_) -> llvm::CallInst* { return EmitDeviceFunctionCall( "_Z22__spirv_ControlBarrierjjj", {b_->getInt32(2), b_->getInt32(2), b_->getInt32(272)}, {U32, U32, U32}, U32, llvm::AttrBuilder(b_->getContext()) .addAttribute(llvm::Attribute::Convergent), b_); }}; } case TargetIntrinsicID::kBlockDimx: { return {llvm::Intrinsic::nvvm_read_ptx_sreg_ntid_x, [](llvm::IRBuilder<>* b_) -> llvm::CallInst* { return EmitDeviceFunctionCall("__ockl_get_local_size", {b_->getInt32(0)}, {U32}, U64, {b_->getContext()}, b_); }, [](llvm::IRBuilder<>* b_) -> llvm::CallInst* { return EmitDeviceFunctionCall( "_Z28__spirv_BuiltInWorkgroupSizei", {b_->getInt32(0)}, {U32}, U64, {b_->getContext()}, b_); }}; } case TargetIntrinsicID::kBlockDimy: { return {llvm::Intrinsic::nvvm_read_ptx_sreg_ntid_y, [](llvm::IRBuilder<>* b_) -> llvm::CallInst* { return EmitDeviceFunctionCall("__ockl_get_local_size", {b_->getInt32(1)}, {U32}, U64, {b_->getContext()}, b_); }, [](llvm::IRBuilder<>* b_) -> llvm::CallInst* { return EmitDeviceFunctionCall( "_Z28__spirv_BuiltInWorkgroupSizei", {b_->getInt32(1)}, {U32}, U64, {b_->getContext()}, b_); }}; } case TargetIntrinsicID::kBlockDimz: { return {llvm::Intrinsic::nvvm_read_ptx_sreg_ntid_z, [](llvm::IRBuilder<>* b_) -> llvm::CallInst* { return EmitDeviceFunctionCall("__ockl_get_local_size", {b_->getInt32(2)}, {U32}, U64, {b_->getContext()}, b_); }, [](llvm::IRBuilder<>* b_) -> llvm::CallInst* { return EmitDeviceFunctionCall( "_Z28__spirv_BuiltInWorkgroupSizei", {b_->getInt32(2)}, {U32}, U64, {b_->getContext()}, b_); }}; } case TargetIntrinsicID::kGroupBarrierId: { return {llvm::Intrinsic::nvvm_bar_warp_sync, llvm::Intrinsic::amdgcn_wave_barrier, [](llvm::IRBuilder<>* b_) -> llvm::CallInst* { return EmitDeviceFunctionCall( "_Z22__spirv_ControlBarrierjjj", {b_->getInt32(2), b_->getInt32(2), b_->getInt32(272)}, {U32, U32, U32}, U32, llvm::AttrBuilder(b_->getContext()) .addAttribute(llvm::Attribute::Convergent), b_); }}; } } } struct TargetDeviceFunction { const std::string nvptx_root; const std::string amdgpu_root; const std::string spir_root; }; struct TargetDeviceFunction GetDeviceFunctionRoot( TargetDeviceFunctionID func_id) { switch (func_id) { case TargetDeviceFunctionID::kAtan2: { return {"__nv_atan2", "__ocml_atan2", "_Z17__spirv_ocl_atan2"}; } case TargetDeviceFunctionID::kCos: { return {"__nv_cos", "__ocml_cos", "_Z15__spirv_ocl_cos"}; } case TargetDeviceFunctionID::kErf: { return {"__nv_erf", "__ocml_erf", "_Z15__spirv_ocl_erf"}; } case TargetDeviceFunctionID::kExp: { return {"__nv_exp", "__ocml_exp", "_Z15__spirv_ocl_exp"}; } case TargetDeviceFunctionID::kExpm1: { return {"__nv_expm1", "__ocml_expm1", "_Z17__spirv_ocl_expm1"}; } case TargetDeviceFunctionID::kFmod: { return {"__nv_fmod", "__ocml_fmod", "_Z16__spirv_ocl_fmod"}; } case TargetDeviceFunctionID::kHypot: { return {"__nv_hypot", "__ocml_hypot", "_Z17__spirv_ocl_hypot"}; } case TargetDeviceFunctionID::kLog: { return {"__nv_log", "__ocml_log", "_Z15__spirv_ocl_log"}; } case TargetDeviceFunctionID::kLog1p: { return {"__nv_log1p", "__ocml_log1p", "_Z17__spirv_ocl_log1p"}; } case TargetDeviceFunctionID::kPow: { return {"__nv_pow", "__ocml_pow", "_Z15__spirv_ocl_pow"}; } case TargetDeviceFunctionID::kRsqrt: { return {"__nv_rsqrt", "__ocml_rsqrt", "_Z17__spirv_ocl_rsqrt"}; } case TargetDeviceFunctionID::kSin: { return {"__nv_sin", "__ocml_sin", "_Z15__spirv_ocl_sin"}; } case TargetDeviceFunctionID::kSqrt: { return {"__nv_sqrt", "__ocml_sqrt", "_Z16__spirv_ocl_sqrt"}; } case TargetDeviceFunctionID::kTan: { return {"__nv_tan", "__ocml_tan", "_Z15__spirv_ocl_tan"}; } case TargetDeviceFunctionID::kTanh: { return {"__nv_tanh", "__ocml_tanh", "_Z16__spirv_ocl_tanh"}; } case TargetDeviceFunctionID::kCbrt: { return {"__nv_cbrt", "__ocml_cbrt", "_Z16__spirv_ocl_cbrt"}; } } } } absl::StatusOr<TargetDeviceFunctionID> GetTargetDeviceFunctionID(HloOpcode op) { switch (op) { case HloOpcode::kAtan2: return TargetDeviceFunctionID::kAtan2; case HloOpcode::kCos: return TargetDeviceFunctionID::kCos; case HloOpcode::kExp: return TargetDeviceFunctionID::kExp; case HloOpcode::kErf: return TargetDeviceFunctionID::kErf; case HloOpcode::kExpm1: return TargetDeviceFunctionID::kExpm1; case HloOpcode::kLog: return TargetDeviceFunctionID::kLog; case HloOpcode::kLog1p: return TargetDeviceFunctionID::kLog1p; case HloOpcode::kPower: return TargetDeviceFunctionID::kPow; case HloOpcode::kRemainder: return TargetDeviceFunctionID::kFmod; case HloOpcode::kRsqrt: return TargetDeviceFunctionID::kRsqrt; case HloOpcode::kSin: return TargetDeviceFunctionID::kSin; case HloOpcode::kSqrt: return TargetDeviceFunctionID::kSqrt; case HloOpcode::kTan: return TargetDeviceFunctionID::kTan; case HloOpcode::kTanh: return TargetDeviceFunctionID::kTanh; case HloOpcode::kCbrt: return TargetDeviceFunctionID::kCbrt; default: break; } return NotFound("The HLO opcode %s is not mapped to a device function", HloOpcodeString(op)); } std::string ObtainDeviceFunctionName(TargetDeviceFunctionID func_id, PrimitiveType output_type, llvm::Triple target_triple) { struct TargetDeviceFunction gpu_root_names = GetDeviceFunctionRoot(func_id); if (target_triple.isNVPTX()) { if (output_type == F32) { return StrCat(gpu_root_names.nvptx_root, "f"); } else if (output_type == F64) { return gpu_root_names.nvptx_root; } else { LOG(FATAL) << "Unexpected type while getting device function name: " << primitive_util::LowercasePrimitiveTypeName(output_type); } } else if (target_triple.getArch() == llvm::Triple::amdgcn) { if (output_type == F32) { return StrCat(gpu_root_names.amdgpu_root, "_f32"); } else if (output_type == F64) { return StrCat(gpu_root_names.amdgpu_root, "_f64"); } else { LOG(FATAL) << "Unexpected type while getting device function name."; } } else if (target_triple.isSPIR()) { if (output_type == F32) { if (gpu_root_names.spir_root == "_Z17__spirv_ocl_hypot" || gpu_root_names.spir_root == "_Z15__spirv_ocl_pow" || gpu_root_names.spir_root == "_Z17__spirv_ocl_atan2" || gpu_root_names.spir_root == "_Z16__spirv_ocl_fmod") { return StrCat(gpu_root_names.spir_root, "ff"); } else { return StrCat(gpu_root_names.spir_root, "f"); } } else if (output_type == F64) { if (gpu_root_names.spir_root == "_Z17__spirv_ocl_hypot" || gpu_root_names.spir_root == "_Z15__spirv_ocl_pow" || gpu_root_names.spir_root == "_Z17__spirv_ocl_atan2" || gpu_root_names.spir_root == "_Z16__spirv_ocl_fmod") { return StrCat(gpu_root_names.spir_root, "dd"); } else { return StrCat(gpu_root_names.spir_root, "d"); } } else { LOG(FATAL) << "Unexpected type while getting device function name."; } } else { LOG(FATAL) << "Invalid triple " << target_triple.str(); } } llvm::CallInst* EmitDeviceFunctionCall( const std::string& callee_name, absl::Span<llvm::Value* const> operands, absl::Span<const PrimitiveType> input_types, PrimitiveType output_type, const llvm::AttrBuilder& attributes, llvm::IRBuilder<>* b, absl::string_view name) { std::vector<llvm::Type*> ir_input_types; llvm::Module* module = b->GetInsertBlock()->getModule(); llvm::Triple target_triple = llvm::Triple(module->getTargetTriple()); for (PrimitiveType input_type : input_types) { ir_input_types.push_back( llvm_ir::PrimitiveTypeToIrType(input_type, module)); } llvm::FunctionType* callee_type = llvm::FunctionType::get( llvm_ir::PrimitiveTypeToIrType(output_type, module), ir_input_types, false); llvm::Function* callee = llvm::dyn_cast<llvm::Function>( b->GetInsertBlock() ->getModule() ->getOrInsertFunction(callee_name, callee_type) .getCallee()); callee->addFnAttrs(attributes); if (target_triple.isSPIR()) callee->setCallingConv(llvm::CallingConv::SPIR_FUNC); return b->CreateCall(callee, llvm_ir::AsArrayRef(operands), name.data()); } llvm::CallInst* EmitCallToTargetIntrinsic( TargetIntrinsicID intrinsic_id, absl::Span<llvm::Value* const> operands, absl::Span<llvm::Type* const> overloaded_types, llvm::IRBuilder<>* b) { llvm::Module* module = b->GetInsertBlock()->getModule(); struct TargetIntrinsics gpu_intrinsic_id = GetIntrinsic(intrinsic_id); llvm::Triple target_triple = llvm::Triple(module->getTargetTriple()); llvm::Intrinsic::ID llvm_intrinsic_id = llvm::Intrinsic::not_intrinsic; if (target_triple.isNVPTX()) { llvm_intrinsic_id = gpu_intrinsic_id.nvptx_intrinsic; } else if (target_triple.getArch() == llvm::Triple::amdgcn) { llvm::Intrinsic::ID* llvm_intrinsic_id_ptr = std::get_if<llvm::Intrinsic::ID>( &gpu_intrinsic_id.amdgpu_intrinsic_or_function); if (llvm_intrinsic_id_ptr) { llvm_intrinsic_id = *llvm_intrinsic_id_ptr; } else { std::function<llvm::CallInst*(llvm::IRBuilder<>*)>* builder_func = std::get_if<std::function<llvm::CallInst*(llvm::IRBuilder<>*)>>( &gpu_intrinsic_id.amdgpu_intrinsic_or_function); return (*builder_func)(b); } } else if (target_triple.isSPIR()) { llvm::Intrinsic::ID* llvm_intrinsic_id_ptr = std::get_if<llvm::Intrinsic::ID>( &gpu_intrinsic_id.spir_intrinsic_or_function); if (llvm_intrinsic_id_ptr) { llvm_intrinsic_id = *llvm_intrinsic_id_ptr; } else { std::function<llvm::CallInst*(llvm::IRBuilder<>*)>* builder_func = std::get_if<std::function<llvm::CallInst*(llvm::IRBuilder<>*)>>( &gpu_intrinsic_id.spir_intrinsic_or_function); return (*builder_func)(b); } } else { LOG(FATAL) << "Invalid triple " << target_triple.str(); } llvm::Function* intrinsic = llvm::Intrinsic::getDeclaration( module, llvm_intrinsic_id, llvm_ir::AsArrayRef(overloaded_types)); return b->CreateCall(intrinsic, llvm_ir::AsArrayRef(operands)); } void AnnotateFunctionAsGpuKernel(llvm::Module* module, llvm::Function* func, llvm::IRBuilder<>* b) { llvm::Triple target_triple = llvm::Triple(module->getTargetTriple()); if (target_triple.isNVPTX()) { llvm::LLVMContext& context = module->getContext(); llvm::NamedMDNode* nvvm_annotations_node = module->getOrInsertNamedMetadata("nvvm.annotations"); nvvm_annotations_node->addOperand(llvm::MDNode::get( context, {llvm::ConstantAsMetadata::get(func), llvm::MDString::get(context, "kernel"), llvm::ConstantAsMetadata::get(b->getInt32(1))})); } else if (target_triple.getArch() == llvm::Triple::amdgcn) { func->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL); func->addFnAttr("amdgpu-flat-work-group-size", "1, 1024"); } else if (target_triple.isSPIR()) { func->setCallingConv(llvm::CallingConv::SPIR_KERNEL); } else { LOG(FATAL) << "Invalid triple " << target_triple.str(); } } } }
#include "xla/service/gpu/target_util.h" #include "llvm/IR/BasicBlock.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Function.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Verifier.h" #include "llvm/Support/raw_ostream.h" #include "tsl/platform/test.h" namespace xla { namespace gpu { namespace { class TargetUtilTest : public testing::Test { public: TargetUtilTest() : module_("test", ctx_), builder_(ctx_) {} protected: void SetUp() override { auto fn = llvm::Function::Create( llvm::FunctionType::get(llvm::Type::getVoidTy(ctx_), {}), llvm::Function::LinkageTypes::ExternalLinkage, "fn", module_); auto block = llvm::BasicBlock::Create(ctx_, "blk", fn); builder_.SetInsertPoint(block); } llvm::LLVMContext ctx_; llvm::Module module_; llvm::IRBuilder<> builder_; }; TEST_F(TargetUtilTest, NVPTXGroupBarrier) { module_.setTargetTriple("nvptx"); EmitCallToTargetIntrinsic(TargetIntrinsicID::kGroupBarrierId, {builder_.getInt32(-1)}, {}, &builder_); builder_.CreateRetVoid(); EXPECT_FALSE(llvm::verifyModule(module_, &llvm::errs())); } TEST_F(TargetUtilTest, AMDGCNGroupBarrier) { module_.setTargetTriple("amdgcn"); EmitCallToTargetIntrinsic(TargetIntrinsicID::kGroupBarrierId, {}, {}, &builder_); builder_.CreateRetVoid(); EXPECT_FALSE(llvm::verifyModule(module_, &llvm::errs())); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/target_util.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/target_util_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
b232161e-af97-49a2-b0c3-42cb833ce35d
cpp
tensorflow/tensorflow
hlo_fusion_stats
third_party/xla/xla/service/gpu/hlo_fusion_stats.cc
third_party/xla/xla/service/gpu/hlo_fusion_stats_test.cc
#include "xla/service/gpu/hlo_fusion_stats.h" #include <set> #include <string> #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_join.h" #include "xla/hlo/ir/dfs_hlo_visitor_with_default.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "tsl/platform/errors.h" namespace xla { namespace gpu { namespace { class OpcodeCollector : public ConstDfsHloVisitorWithDefault { public: std::set<std::string> GetUniqueOpcodes() { return opcodes_; } protected: absl::Status DefaultAction(const xla::HloInstruction* instr) final { switch (instr->opcode()) { case HloOpcode::kConstant: break; case HloOpcode::kParameter: break; case HloOpcode::kAbs: case HloOpcode::kCbrt: case HloOpcode::kCeil: case HloOpcode::kCos: case HloOpcode::kErf: case HloOpcode::kExp: case HloOpcode::kExpm1: case HloOpcode::kFloor: case HloOpcode::kLog: case HloOpcode::kLog1p: case HloOpcode::kLogistic: case HloOpcode::kNegate: case HloOpcode::kRoundNearestAfz: case HloOpcode::kRoundNearestEven: case HloOpcode::kRsqrt: case HloOpcode::kSign: case HloOpcode::kSin: case HloOpcode::kSqrt: case HloOpcode::kTan: case HloOpcode::kTanh: case HloOpcode::kAdd: case HloOpcode::kAtan2: case HloOpcode::kDivide: case HloOpcode::kMultiply: case HloOpcode::kSubtract: opcodes_.insert("cwise"); break; default: opcodes_.insert(std::string(HloOpcodeString(instr->opcode()))); } return absl::OkStatus(); } private: std::set<std::string> opcodes_; }; std::set<std::string> GetUniqueOpcodes(HloComputation* computation) { OpcodeCollector collector; if (!computation->Accept(&collector).ok()) { return {}; } return collector.GetUniqueOpcodes(); } } std::string HloOpcodeHistogram::ToString() { std::string result; for (const auto& entry : *this) { absl::StrAppend(&result, "{", absl::StrJoin(entry.first, ", "), "}: ", entry.second, "\n"); } return result; } absl::Status HloFusionStatsVisitor::RunOnModule(HloModule* module) { TF_RETURN_IF_ERROR(module->entry_computation()->Accept(this)); return absl::OkStatus(); } std::string HloFusionStatsVisitor::ToString() { return absl::StrCat("HLO Fusion Stats:\n", "Number of fusion ops: ", num_fusions_, "\n", "Number of kLoop fusions: ", num_loop_fusions_, "\n", loop_fusion_opcode_histogram_.ToString(), "\n", "Number of kInput fusions: ", num_input_fusions_, "\n", input_fusion_opcode_histogram_.ToString()); } absl::Status HloFusionStatsVisitor::DefaultAction( const xla::HloInstruction* instr) { return absl::OkStatus(); } absl::Status HloFusionStatsVisitor::HandleFusion(const HloInstruction* fusion) { num_fusions_++; std::set<std::string> opcodes = GetUniqueOpcodes(fusion->fused_instructions_computation()); if (fusion->fusion_kind() == HloInstruction::FusionKind::kLoop) { num_loop_fusions_++; loop_fusion_opcode_histogram_[opcodes]++; } else if (fusion->fusion_kind() == HloInstruction::FusionKind::kInput) { num_input_fusions_++; input_fusion_opcode_histogram_[opcodes]++; } return absl::OkStatus(); } } }
#include "xla/service/gpu/hlo_fusion_stats.h" #include <string> #include <gtest/gtest.h> #include "absl/strings/match.h" #include "xla/service/hlo_parser.h" #include "xla/tests/hlo_test_base.h" #include "xla/tsl/lib/core/status_test_util.h" namespace xla { namespace gpu { namespace { using HloFusionStatsTest = HloTestBase; TEST_F(HloFusionStatsTest, LoopFusionAndReduceFusion) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module scalar_add_computation { scalar_lhs.0 = f32[] parameter(0) scalar_rhs.0 = f32[] parameter(1) ROOT add.0 = f32[] add(scalar_lhs.0, scalar_rhs.0) } fused_select { p1.1 = f32[32,32,32]{2,1,0} parameter(1) c0 = f32[] constant(0) broadcast = f32[32,32,32]{2,1,0} broadcast(f32[] c0), dimensions={} greater-than = pred[32,32,32]{2,1,0} compare(f32[32,32,32]{2,1,0} p1.1, f32[32,32,32]{2,1,0} broadcast), direction=GT p0.1 = f32[32,32,32]{2,1,0} parameter(0) ROOT select = f32[32,32,32]{2,1,0} select(pred[32,32,32]{2,1,0} greater-than, f32[32,32,32]{2,1,0} p0.1, f32[32,32,32]{2,1,0} broadcast) } another_fused_select { p1.1 = f32[32,32,32]{2,1,0} parameter(1) c0 = f32[] constant(0) broadcast = f32[32,32,32]{2,1,0} broadcast(f32[] c0), dimensions={} greater-than = pred[32,32,32]{2,1,0} compare(f32[32,32,32]{2,1,0} p1.1, f32[32,32,32]{2,1,0} broadcast), direction=GT p0.1 = f32[32,32,32]{2,1,0} parameter(0) ROOT select = f32[32,32,32]{2,1,0} select(pred[32,32,32]{2,1,0} greater-than, f32[32,32,32]{2,1,0} p0.1, f32[32,32,32]{2,1,0} broadcast) } fused_reduce { p0.2 = f32[32,32,32]{2,1,0} parameter(0) c1 = f32[] constant(0) r1 = f32[32,32]{1,0} reduce(p0.2, c1), dimensions={2}, to_apply=scalar_add_computation mul = f32[32,32,32]{2,1,0} multiply(p0.2, p0.2) r2 = f32[32,32]{1,0} reduce(mul, c1), dimensions={2}, to_apply=scalar_add_computation ROOT tuple = (f32[32,32]{1,0}, f32[32,32]{1,0}) tuple(r1, r2) } ENTRY reduce { p0 = f32[32,32,32]{2,1,0} parameter(0) p1 = f32[32,32,32]{2,1,0} parameter(1) select = f32[32,32,32]{2,1,0} fusion(p0, p1), kind=kLoop, calls=fused_select select_2 = f32[32,32,32]{2,1,0} fusion(p0, p1), kind=kLoop, calls=another_fused_select fusion = (f32[32,32]{1,0}, f32[32,32]{1,0}) fusion(select), kind=kInput, calls=fused_reduce gte0 = f32[32,32]{1,0} get-tuple-element(fusion), index=0 gte1 = f32[32,32]{1,0} get-tuple-element(fusion), index=1 ROOT root = (f32[32,32]{1,0}, f32[32,32]{1,0}, f32[32,32,32]{2,1,0}, f32[32,32,32]{2,1,0}) tuple(gte1, gte1, select, select_2) })") .value(); HloFusionStatsVisitor fusion_stats_visitor; TF_ASSERT_OK( module.get()->entry_computation()->Accept(&fusion_stats_visitor)); SCOPED_TRACE(module->ToString()); std::string stats = fusion_stats_visitor.ToString(); ASSERT_TRUE(absl::StrContains(stats, "Number of fusion ops: 3")); ASSERT_TRUE(absl::StrContains(stats, "Number of kLoop fusions: 2")); ASSERT_TRUE(absl::StrContains(stats, "{broadcast, compare, select}: 2")); ASSERT_TRUE(absl::StrContains(stats, "Number of kInput fusions: 1")); ASSERT_TRUE(absl::StrContains(stats, "{cwise, reduce, tuple}: 1")); } TEST_F(HloFusionStatsTest, AggregateCwiseOps) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module fused_computation { p0.1 = f32[8,1,5,16,1,2]{5,4,3,2,1,0} parameter(0) mul = f32[8,1,5,16,1,2]{5,4,3,2,1,0} multiply(p0.1, p0.1) ROOT exp = f32[8,1,5,16,1,2]{5,4,3,2,1,0} exponential(mul) } ENTRY entry { p0 = f32[8,1,5,16,1,2]{5,4,3,2,1,0} parameter(0) ROOT fusion = f32[8,1,5,16,1,2]{5,4,3,2,1,0} fusion(p0), kind=kLoop, calls=fused_computation })") .value(); HloFusionStatsVisitor fusion_stats_visitor; TF_ASSERT_OK( module.get()->entry_computation()->Accept(&fusion_stats_visitor)); SCOPED_TRACE(module->ToString()); std::string stats = fusion_stats_visitor.ToString(); ASSERT_TRUE(absl::StrContains(stats, "{cwise}: 1")) << stats; } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/hlo_fusion_stats.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/hlo_fusion_stats_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
298fcdc7-3af6-4de5-9d69-893e38b98c6d
cpp
tensorflow/tensorflow
hlo_fusion_analysis
third_party/xla/xla/service/gpu/hlo_fusion_analysis.cc
third_party/xla/xla/service/gpu/hlo_fusion_analysis_test.cc
#include "xla/service/gpu/hlo_fusion_analysis.h" #include <algorithm> #include <limits> #include <memory> #include <optional> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/inlined_vector.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "llvm/ADT/STLExtras.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/primitive_util.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/hlo_traversal.h" #include "xla/service/gpu/ir_emission_utils.h" #include "xla/service/gpu/reduction_utils.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/stream_executor/device_description.h" namespace xla { namespace gpu { namespace { bool IsInputFusibleNonStridedSlices( const absl::Span<const HloInstructionAdaptor> fusion_roots) { return absl::c_all_of(fusion_roots, [&](const HloInstructionAdaptor& root) { return IsSliceWithUnitStrides(&root.instruction()); }); } bool AllSliceInputsAreCompatible( const absl::Span<const HloInstructionAdaptor> fusion_roots) { const Shape& first_slice_operand_shape = fusion_roots[0].GetOperand(0).shape(); return absl::c_all_of(fusion_roots, [&](const HloInstructionAdaptor& slice) { return ShapeUtil::EqualIgnoringElementType(slice.GetOperand(0).shape(), first_slice_operand_shape); }); } std::optional<TransposeDescription> FindConsistentTransposeHero( const absl::InlinedVector<HloInstructionAdaptor, 2>& hlo_roots, const absl::InlinedVector<HloInstructionAdaptor, 2>& heroes) { std::optional<TransposeDescription> tiled_transpose_hero; std::vector<const HloInstruction*> non_transpose_roots; for (auto [root, hero] : llvm::zip(hlo_roots, heroes)) { if (auto tr = GetDescriptionForTiledTransposeEmitter(hero.instruction())) { if (!tiled_transpose_hero) { tiled_transpose_hero = tr; } else if (!tiled_transpose_hero->IsEquivalent(*tr)) { return std::nullopt; } } else { non_transpose_roots.push_back(&root.instruction()); } } if (!tiled_transpose_hero) return std::nullopt; for (auto* root : non_transpose_roots) { if (!ShapeUtil::IsReshapeOrTransposeBitcast( root->shape(), tiled_transpose_hero->input_shape(), true)) { return std::nullopt; } } return tiled_transpose_hero; } const Shape& GetShape(const HloInstructionAdaptor& adaptor) { return adaptor.shape(); } const Shape& GetShape(const HloInstruction* instruction) { return instruction->shape(); } template <typename Container> int SmallestBitWidth(const Container& args) { int bits = std::numeric_limits<int>::max(); for (const auto& operand : args) { const Shape& shape = GetShape(operand); if (!shape.IsArray()) continue; bits = std::min(bits, shape.element_type() == PRED ? 8 : primitive_util::BitWidth(shape.element_type())); } return bits; } } HloFusionAnalysis::HloFusionAnalysis( FusionBackendConfig fusion_backend_config, std::unique_ptr<HloFusionAdaptor> fusion, absl::InlinedVector<HloInstructionAdaptor, 2> fusion_roots, absl::InlinedVector<HloInstructionAdaptor, 2> fusion_heroes, const se::DeviceDescription* device_info, std::optional<TransposeDescription> tiled_transpose, HloFusionAnalysis::InputOutputInfo input_output_info) : fusion_backend_config_(std::move(fusion_backend_config)), fusion_(std::move(fusion)), fusion_roots_(std::move(fusion_roots)), fusion_heroes_(std::move(fusion_heroes)), device_info_(device_info), tiled_transpose_(tiled_transpose), input_output_info_(std::move(input_output_info)) {} HloFusionAnalysis HloFusionAnalysis::Create( FusionBackendConfig backend_config, std::unique_ptr<HloFusionAdaptor> fusion, const se::DeviceDescription* device_info) { absl::InlinedVector<HloInstructionAdaptor, 2> roots = fusion->GetRoots(); absl::InlinedVector<HloInstructionAdaptor, 2> heroes; for (auto root : roots) { heroes.push_back(FindNonTrivialHero(root)); } InputOutputInfo input_output_info{ SmallestBitWidth(fusion->GetParameters()), SmallestBitWidth(roots), }; std::optional<TransposeDescription> tiled_transpose_hero = FindConsistentTransposeHero(roots, heroes); return HloFusionAnalysis(std::move(backend_config), std::move(fusion), std::move(roots), std::move(heroes), device_info, tiled_transpose_hero, std::move(input_output_info)); } HloFusionAnalysis HloFusionAnalysis::Create( const HloInstruction& instruction, const se::DeviceDescription& device_info) { absl::StatusOr<GpuBackendConfig> gpu_backend_config = instruction.backend_config<GpuBackendConfig>(); FusionBackendConfig fusion_backend_config = gpu_backend_config.ok() ? gpu_backend_config->fusion_backend_config() : FusionBackendConfig::default_instance(); return Create(std::move(fusion_backend_config), HloFusionAdaptor::ForInstruction(&instruction), &device_info); } HloFusionAnalysis HloFusionAnalysis::Create( const HloInstruction& producer, const HloInstruction& consumer, const se::DeviceDescription& device_info) { absl::StatusOr<GpuBackendConfig> gpu_backend_config; if (consumer.has_backend_config()) { gpu_backend_config = consumer.backend_config<GpuBackendConfig>(); } if (!gpu_backend_config.ok() && producer.has_backend_config()) { gpu_backend_config = producer.backend_config<GpuBackendConfig>(); } FusionBackendConfig fusion_backend_config = gpu_backend_config.ok() ? gpu_backend_config->fusion_backend_config() : FusionBackendConfig::default_instance(); return HloFusionAnalysis::Create( std::move(fusion_backend_config), HloFusionAdaptor::ForProducerConsumer(&producer, &consumer), &device_info); } bool HloFusionAnalysis::HasConsistentTransposeHeros() const { return tiled_transpose_.has_value(); } static bool UseConcatenateFusion( absl::Span<const HloInstructionAdaptor> roots, absl::Span<const HloInstructionAdaptor> heroes) { if (heroes.size() != 1) return false; if (heroes.front().opcode() != HloOpcode::kConcatenate) return false; if (roots.front().shape().IsTuple()) return false; if (heroes.front().instruction().operand_count() > 4) return false; return true; } HloFusionAnalysis::EmitterFusionKind HloFusionAnalysis::GetEmitterFusionKind() const { if (fusion_backend_config_.kind() == kCustomFusionKind) { return EmitterFusionKind::kCustomFusion; } if (fusion_backend_config_.kind() == kTritonFusionKind || fusion_backend_config_.kind() == kTritonGemmFusionKind) { return EmitterFusionKind::kTriton; } if (fusion_backend_config_.kind() == kCuDnnFusionKind) { return EmitterFusionKind::kCuDnn; } if (input_output_info_.smallest_input_dtype_bits < 8 || input_output_info_.smallest_output_dtype_bits < 8) { if (fusion_roots_.size() > 1 && IsInputFusibleNonStridedSlices(fusion_roots_) && AllSliceInputsAreCompatible(fusion_roots_)) { return EmitterFusionKind::kInputSlices; } return EmitterFusionKind::kLoop; } std::optional<HloInstructionAdaptor> first_reduce_hero; for (auto [root, hero] : llvm::zip(fusion_roots_, fusion_heroes_)) { if (IsRealReductionHero(root.instruction(), hero.instruction())) { first_reduce_hero = hero; break; } } if (first_reduce_hero.has_value()) { bool valid_shapes = true; Shape hero_operand_shape = first_reduce_hero->GetOperand(0).shape(); for (auto [root, hero] : llvm::zip(fusion_roots_, fusion_heroes_)) { if (root == *first_reduce_hero) { continue; } if (!IsRealReductionHero(root.instruction(), hero.instruction())) { if (ShapeUtil::ElementsIn(root.shape()) != ShapeUtil::ElementsIn(hero_operand_shape)) { valid_shapes = false; break; } } else if (!AreReductionsMultiOutputFusionCompatible( &hero.instruction(), &first_reduce_hero->instruction())) { valid_shapes = false; break; } } if (valid_shapes) { return EmitterFusionKind::kReduction; } } if (HasConsistentTransposeHeros()) { return EmitterFusionKind::kTranspose; } if (fusion_roots_.size() > 1) { if (IsInputFusibleNonStridedSlices(fusion_roots_) && AllSliceInputsAreCompatible(fusion_roots_)) { return EmitterFusionKind::kInputSlices; } return EmitterFusionKind::kLoop; } if (fusion_roots_[0].opcode() == HloOpcode::kScatter) { return EmitterFusionKind::kScatter; } if (UseConcatenateFusion(fusion_roots_, fusion_heroes_)) { return EmitterFusionKind::kConcatenate; } return EmitterFusionKind::kLoop; } const HloInstruction* HloFusionAnalysis::FindHeroReduction() const { if (GetEmitterFusionKind() != EmitterFusionKind::kReduction) { return nullptr; } const auto& roots = fusion_roots(); CHECK(!roots.empty()); for (auto [root, hero] : llvm::zip(roots, fusion_heroes_)) { if (IsRealReductionHero(root.instruction(), hero.instruction())) { return &hero.instruction(); } } LOG(FATAL) << "Did not find a hero reduction"; } } }
#include "xla/service/gpu/hlo_fusion_analysis.h" #include <gtest/gtest.h> #include "xla/protobuf_util.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/gpu_device_info_for_tests.h" #include "xla/service/gpu/hlo_traversal.h" #include "xla/service/gpu/ir_emission_utils.h" #include "xla/stream_executor/device_description.h" #include "xla/stream_executor/device_description.pb.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/statusor.h" namespace xla::gpu { namespace { class HloFusionAnalysisTest : public HloTestBase {}; TEST_F(HloFusionAnalysisTest, DoesNotPeekOutsideBoundary) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"( HloModule test_module add { p0 = f32[] parameter(0) p1 = f32[] parameter(1) ROOT add = f32[] add(p0, p1) } ENTRY main { %p0 = f32[1024] parameter(0) %p1 = f32[] parameter(1) %reduce = f32[] reduce(%p0, %p1), dimensions={0}, to_apply=add ROOT %bitcast = s32[] bitcast(%reduce) })")); auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo(); auto* root = module->entry_computation()->root_instruction(); auto analysis = HloFusionAnalysis::Create(*root, device_info); EXPECT_EQ(analysis.GetEmitterFusionKind(), HloFusionAnalysis::EmitterFusionKind::kLoop); auto analysis_fused = HloFusionAnalysis::Create(*root->operand(0), *root, device_info); EXPECT_EQ(analysis_fused.GetEmitterFusionKind(), HloFusionAnalysis::EmitterFusionKind::kReduction); } TEST_F(HloFusionAnalysisTest, ReductionWithMultipleUsers) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"( HloModule test_module add { p0 = f32[] parameter(0) p1 = f32[] parameter(1) ROOT add = f32[] add(p0, p1) } fused_computation { %p0 = f32[1024] parameter(0) %p1 = f32[] parameter(1) %reduce = f32[] reduce(%p0, %p1), dimensions={0}, to_apply=add %negate = f32[] negate(%reduce) %log = f32[] log(%reduce) ROOT %tuple = (f32[], f32[]) tuple(%negate, %log) } ENTRY main { %p0 = f32[1024] parameter(0) %p1 = f32[] parameter(1) ROOT %fusion = (f32[], f32[]) fusion(%p0, %p1), kind=kLoop, calls=fused_computation })")); auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo(); auto analysis = HloFusionAnalysis::Create( FusionBackendConfig::default_instance(), HloFusionAdaptor::ForInstruction( module->entry_computation()->root_instruction()), &device_info); EXPECT_EQ(analysis.GetEmitterFusionKind(), HloFusionAnalysis::EmitterFusionKind::kReduction); } TEST_F(HloFusionAnalysisTest, ReductionEpilogueFusion) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"( HloModule test_module add { p0 = f32[] parameter(0) p1 = f32[] parameter(1) ROOT add = f32[] add(p0, p1) } fused_computation { %p0 = f32[1024] parameter(0) %p1 = f32[] parameter(1) %reduce = f32[] reduce(%p0, %p1), dimensions={0}, to_apply=add ROOT %negate = f32[] negate(%reduce) } ENTRY main { %p0 = f32[1024] parameter(0) %p1 = f32[] parameter(1) ROOT %fusion = f32[] fusion(%p0, %p1), kind=kInput, calls=fused_computation })")); auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo(); auto* root = module->entry_computation()->root_instruction(); auto analysis = HloFusionAnalysis::Create( FusionBackendConfig::default_instance(), HloFusionAdaptor::ForInstruction(root), &device_info); EXPECT_EQ(analysis.GetEmitterFusionKind(), HloFusionAnalysis::EmitterFusionKind::kReduction); } TEST_F(HloFusionAnalysisTest, ReductionEpilogueFusionPartiallyFused) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"( HloModule test_module add { p0 = f32[] parameter(0) p1 = f32[] parameter(1) ROOT add = f32[] add(p0, p1) } fusion { %p0 = f32[1024] parameter(0) %p1 = f32[] parameter(1) ROOT %reduce = f32[] reduce(%p0, %p1), dimensions={0}, to_apply=add } ENTRY main { %p0 = f32[1024] parameter(0) %p1 = f32[] parameter(1) %fusion = f32[] fusion(%p0, %p1), kind=kInput, calls=fusion ROOT %negate = f32[] negate(%fusion) })")); auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo(); auto* root = module->entry_computation()->root_instruction(); auto analysis = HloFusionAnalysis::Create(*root->operand(0), *root, device_info); EXPECT_EQ(analysis.GetEmitterFusionKind(), HloFusionAnalysis::EmitterFusionKind::kReduction); } TEST_F(HloFusionAnalysisTest, ReductionEpilogueFusionPartiallyFusedInConsumer) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"( HloModule test_module add { p0 = f32[] parameter(0) p1 = f32[] parameter(1) ROOT add = f32[] add(p0, p1) } fusion { %p0 = f32[] parameter(0) ROOT %negate = f32[] negate(%p0) } ENTRY main { %p0 = f32[1024] parameter(0) %p1 = f32[] parameter(1) %reduce = f32[] reduce(%p0, %p1), dimensions={0}, to_apply=add ROOT %fusion = f32[] fusion(%reduce), kind=kInput, calls=fusion })")); auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo(); auto* root = module->entry_computation()->root_instruction(); auto analysis = HloFusionAnalysis::Create(*root->operand(0), *root, device_info); EXPECT_EQ(analysis.GetEmitterFusionKind(), HloFusionAnalysis::EmitterFusionKind::kReduction); } TEST_F(HloFusionAnalysisTest, ReductionEpilogueFusionPartiallyFusedInBoth) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"( HloModule test_module add { p0 = f32[] parameter(0) p1 = f32[] parameter(1) ROOT add = f32[] add(p0, p1) } fusion.1 { %p0 = f32[1024] parameter(0) %p1 = f32[] parameter(1) ROOT %reduce = f32[] reduce(%p0, %p1), dimensions={0}, to_apply=add } fusion.2 { %p0 = f32[] parameter(0) ROOT %negate = f32[] negate(%p0) } ENTRY main { %p0 = f32[1024] parameter(0) %p1 = f32[] parameter(1) %fusion.1 = f32[] fusion(%p0, %p1), kind=kInput, calls=fusion.1 ROOT %fusion.2 = f32[] fusion(%fusion.1), kind=kInput, calls=fusion.2 })")); auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo(); auto* root = module->entry_computation()->root_instruction(); auto analysis = HloFusionAnalysis::Create(*root->operand(0), *root, device_info); EXPECT_EQ(analysis.GetEmitterFusionKind(), HloFusionAnalysis::EmitterFusionKind::kReduction); } TEST_F(HloFusionAnalysisTest, ReduceMultiOutputFusionWithTransposeBitcast) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"( HloModule test_module add { p0 = f32[] parameter(0) p1 = f32[] parameter(1) ROOT add = f32[] add(p0, p1) } fusion { %p0 = f32[1024, 512]{1,0} parameter(0) %p1 = f32[] parameter(1) %reduce = f32[1024]{0} reduce(%p0, %p1), dimensions={1}, to_apply=add %bitcast = f32[512, 1024]{0,1} bitcast(%p0) ROOT res = (f32[1024]{0}, f32[512, 1024]{0,1}) tuple(%reduce, %bitcast) } ENTRY main { %p0 = f32[1024, 512]{1,0} parameter(0) %p1 = f32[] parameter(1) ROOT %fusion = (f32[1024]{0}, f32[512, 1024]{0,1}) fusion(%p0, %p1), kind=kInput, calls=fusion })")); auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo(); auto* root = module->entry_computation()->root_instruction(); auto analysis = HloFusionAnalysis::Create(*root, device_info); EXPECT_EQ(analysis.GetEmitterFusionKind(), HloFusionAnalysis::EmitterFusionKind::kReduction); } TEST_F(HloFusionAnalysisTest, InvalidReduceMultiOutputFusion) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"( HloModule test_module add { p0 = f32[] parameter(0) p1 = f32[] parameter(1) ROOT add = f32[] add(p0, p1) } fusion { %p0 = f32[1024, 1024]{1,0} parameter(0) %p1 = f32[] parameter(1) %reduce = f32[1024]{0} reduce(%p0, %p1), dimensions={0}, to_apply=add %reduce2 = f32[1024]{0} reduce(%p0, %p1), dimensions={1}, to_apply=add ROOT res = (f32[1024]{0}, f32[1024]{0}) tuple(reduce, reduce2) } ENTRY main { %p0 = f32[1024, 1024]{1,0} parameter(0) %p1 = f32[] parameter(1) ROOT %fusion = (f32[1024]{0}, f32[1024]{0}) fusion(%p0, %p1), kind=kInput, calls=fusion })")); auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo(); auto* root = module->entry_computation()->root_instruction(); auto analysis = HloFusionAnalysis::Create(*root, device_info); EXPECT_EQ(analysis.GetEmitterFusionKind(), HloFusionAnalysis::EmitterFusionKind::kLoop); } TEST_F(HloFusionAnalysisTest, InvalidDevice) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"( HloModule test_module add { p0 = f32[] parameter(0) p1 = f32[] parameter(1) ROOT add = f32[] add(p0, p1) } ENTRY main { %p0 = f32[1024,128] parameter(0) %p1 = f32[] parameter(1) %reduce = f32[128] reduce(%p0, %p1), dimensions={0}, to_apply=add ROOT %bitcast = s32[128] bitcast(%reduce) })")); stream_executor::GpuDeviceInfoProto device_info_proto; stream_executor::DeviceDescription device_info(device_info_proto); auto* root = module->entry_computation()->root_instruction(); auto analysis_fused = HloFusionAnalysis::Create(*root->operand(0), *root, device_info); EXPECT_EQ(analysis_fused.GetEmitterFusionKind(), HloFusionAnalysis::EmitterFusionKind::kReduction); } TEST_F(HloFusionAnalysisTest, ConcatFusion) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"( HloModule test_module fused_computation { %p0 = f32[128] parameter(0) %p1 = f32[128] parameter(1) %add = f32[128] add(p0, p0) %concat = f32[256] concatenate(%add, %p1), dimensions={0} ROOT %negate = f32[256] negate(%concat) } ENTRY main { %p0 = f32[128] parameter(0) %p1 = f32[128] parameter(1) ROOT %fusion = f32[256] fusion(%p0, %p1), kind=kInput, calls=fused_computation })")); auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo(); auto* root = module->entry_computation()->root_instruction(); auto analysis = HloFusionAnalysis::Create( FusionBackendConfig::default_instance(), HloFusionAdaptor::ForInstruction(root), &device_info); EXPECT_EQ(analysis.GetEmitterFusionKind(), HloFusionAnalysis::EmitterFusionKind::kConcatenate); } TEST_F(HloFusionAnalysisTest, ExtractValidGpuBackendConfig) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"( HloModule module fused_computation.1 { %x = s32[64] parameter(0) %y = s32[64] parameter(1) ROOT %root = s32[64] add(%x, %y) } fused_computation.2 { %x = s32[64] parameter(0) %y = s32[64] parameter(1) ROOT %root = s32[64] add(%x, %y) } ENTRY entry { %x = s32[64] parameter(0) %y = s32[64] parameter(1) %fusion.1 = s32[64] fusion(%x, %y), kind=kLoop, calls=fused_computation.1, backend_config={"fusion_backend_config": {kind: "__triton"}} ROOT %fusion.2 = s32[64] fusion(%fusion.1, %y), kind=kLoop, calls=fused_computation.2 })")); auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo(); auto* consumer = module->entry_computation()->root_instruction(); auto* producer = consumer->operand(0); auto producer_analysis = HloFusionAnalysis::Create(*producer, device_info); EXPECT_EQ(producer_analysis.fusion_backend_config().kind(), kTritonFusionKind); auto producer_consumer_analysis = HloFusionAnalysis::Create(*producer, *consumer, device_info); EXPECT_EQ(producer_consumer_analysis.fusion_backend_config().kind(), kTritonFusionKind); } TEST_F(HloFusionAnalysisTest, InvalidGpuBackendConfig_SingleInstruction_Ignored) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"( HloModule module ENTRY entry { %x = s32[64,64,64] parameter(0) %y = s32[64,64,64] parameter(1) ROOT %root = s32[64,128,64] concatenate(x, y), dimensions={1}, backend_config={"outer_dimension_partitions": ["1"]} })")); auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo(); auto* root = module->entry_computation()->root_instruction(); auto analysis = HloFusionAnalysis::Create(*root, device_info); EXPECT_TRUE( protobuf_util::ProtobufEquals(analysis.fusion_backend_config(), FusionBackendConfig::default_instance())); } TEST_F(HloFusionAnalysisTest, InvalidGpuBackendConfig_ProducerConsumer_Ignored) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"( HloModule module fused_computation { %x = s32[64] parameter(0) %y = s32[64] parameter(1) ROOT %root = s32[64] add(%x, %y) } ENTRY entry { %x = s32[64] parameter(0) %y = s32[64] parameter(1) %fusion = s32[64] fusion(%x, %y), kind=kLoop, calls=fused_computation, backend_config={"invalid_field": "some_value"} ROOT %root = s32[128] concatenate(fusion, y), dimensions={0}, backend_config={"invalid_field": "some_value"} })")); auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo(); auto* consumer = module->entry_computation()->root_instruction(); auto* producer = consumer->operand(0); auto analysis = HloFusionAnalysis::Create(*producer, *consumer, device_info); EXPECT_TRUE( protobuf_util::ProtobufEquals(analysis.fusion_backend_config(), FusionBackendConfig::default_instance())); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/hlo_fusion_analysis.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/hlo_fusion_analysis_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
58f7e05d-6f1d-41ca-932f-498558a8bde2
cpp
tensorflow/tensorflow
nvptx_compiler
third_party/xla/xla/service/gpu/nvptx_compiler.cc
third_party/xla/xla/service/gpu/nvptx_compiler_test.cc
#include "xla/service/gpu/nvptx_compiler.h" #include <algorithm> #include <array> #include <cstdint> #include <fstream> #include <iterator> #include <memory> #include <string> #include <tuple> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/base/call_once.h" #include "absl/cleanup/cleanup.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/strings/match.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" #include "absl/strings/str_join.h" #include "absl/strings/string_view.h" #include "absl/synchronization/mutex.h" #include "absl/types/span.h" #include "llvm/IRReader/IRReader.h" #include "llvm/Support/SourceMgr.h" #include "llvm/Support/raw_ostream.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/pass/hlo_pass_fix.h" #include "xla/hlo/pass/hlo_pass_pipeline.h" #include "xla/pjrt/distributed/key_value_store_interface.h" #include "xla/service/algebraic_simplifier.h" #include "xla/service/call_inliner.h" #include "xla/service/convert_mover.h" #include "xla/service/dot_dimension_merger.h" #include "xla/service/dump.h" #include "xla/service/float_normalization.h" #include "xla/service/float_support.h" #include "xla/service/gpu/autotuning/autotuner_util.h" #include "xla/service/gpu/autotuning/conv_algorithm_picker.h" #include "xla/service/gpu/autotuning/gemm_algorithm_picker.h" #include "xla/service/gpu/autotuning/gemm_fusion_autotuner.h" #include "xla/service/gpu/buffer_sharing.h" #include "xla/service/gpu/cublas_padding_requirements.h" #include "xla/service/gpu/gpu_asm_opts_util.h" #include "xla/service/gpu/gpu_compiler.h" #include "xla/service/gpu/ir_emission_utils.h" #include "xla/service/gpu/llvm_gpu_backend/gpu_backend_lib.h" #include "xla/service/gpu/metrics.h" #include "xla/service/gpu/target_constants.h" #include "xla/service/gpu/transforms/algebraic_simplifier.h" #include "xla/service/gpu/transforms/conv_padding_legalization.h" #include "xla/service/gpu/transforms/conv_rewriter.h" #include "xla/service/gpu/transforms/cublas_pad_for_gemms.h" #include "xla/service/gpu/transforms/cudnn_custom_call_compiler.h" #include "xla/service/gpu/transforms/cudnn_fused_conv_rewriter.h" #include "xla/service/gpu/transforms/cudnn_fused_mha_rewriter.h" #include "xla/service/gpu/transforms/cudnn_fused_mha_transpose_fusion.h" #include "xla/service/gpu/transforms/cudnn_fusion_compiler.h" #include "xla/service/gpu/transforms/cudnn_norm_rewriter.h" #include "xla/service/gpu/transforms/cudnn_pad_for_convolutions.h" #include "xla/service/gpu/transforms/cudnn_simplify_padding.h" #include "xla/service/gpu/transforms/cudnn_vectorize_convolutions.h" #include "xla/service/gpu/transforms/dot_sparsity_rewriter.h" #include "xla/service/gpu/transforms/gpusolver_rewriter.h" #include "xla/service/gpu/transforms/sort_rewriter.h" #include "xla/service/gpu/transforms/triangular_solve_rewriter.h" #include "xla/service/hlo_constant_folding.h" #include "xla/service/hlo_cse.h" #include "xla/service/hlo_dataflow_analysis.h" #include "xla/service/hlo_dce.h" #include "xla/service/hlo_module_config.h" #include "xla/service/hlo_verifier.h" #include "xla/service/llvm_ir/llvm_util.h" #include "xla/service/reshape_mover.h" #include "xla/service/tuple_simplifier.h" #include "xla/stream_executor/cuda/cuda_asm_compiler.h" #include "xla/stream_executor/cuda/cuda_diagnostics.h" #include "xla/stream_executor/cuda/cuda_driver.h" #include "xla/stream_executor/cuda/cuda_platform_id.h" #include "xla/stream_executor/cuda/nvjitlink.h" #include "xla/stream_executor/cuda/nvjitlink_support.h" #include "xla/stream_executor/cuda/ptx_compilation_method.h" #include "xla/stream_executor/cuda/ptx_compiler.h" #include "xla/stream_executor/cuda/ptx_compiler_support.h" #include "xla/stream_executor/cuda/ptx_linking_method.h" #include "xla/stream_executor/device_description.h" #include "xla/stream_executor/device_memory_allocator.h" #include "xla/stream_executor/dnn.h" #include "xla/stream_executor/gpu/gpu_asm_opts.h" #include "xla/stream_executor/gpu/gpu_driver.h" #include "xla/stream_executor/gpu/gpu_executor.h" #include "xla/stream_executor/semantic_version.h" #include "xla/stream_executor/stream_executor.h" #include "xla/tsl/util/env_var.h" #include "xla/util.h" #include "xla/xla.pb.h" #include "xla/xla_data.pb.h" #include "tsl/platform/env.h" #include "tsl/platform/errors.h" #include "tsl/platform/path.h" #include "tsl/platform/status.h" #include "tsl/platform/statusor.h" #include "tsl/platform/threadpool.h" #include "tsl/profiler/lib/scoped_annotation.h" #include "tsl/profiler/lib/traceme.h" namespace xla { namespace gpu { namespace { class ConvBfloat16Support : public FloatSupport { public: explicit ConvBfloat16Support( se::dnn::VersionInfo cudnn_version, se::CudaComputeCapability cuda_compute_capability) : FloatSupport(BF16), is_conv_bf16_supported_((cudnn_version.major_version() > 8 || (cudnn_version.major_version() == 8 && cudnn_version.minor_version() >= 2)) && cuda_compute_capability.IsAtLeast( se::CudaComputeCapability::AMPERE)) {} bool SupportsLowPrecisionOperand(const HloInstruction& hlo, int64_t operand_index) const override { return (hlo.opcode() != HloOpcode::kConvolution) || is_conv_bf16_supported_; } bool SupportsLowPrecisionOutput(const HloInstruction& hlo) const override { return (hlo.opcode() != HloOpcode::kConvolution) || is_conv_bf16_supported_; } bool SupportsMixedPrecisions(const HloInstruction& hlo) const override { return (hlo.opcode() != HloOpcode::kConvolution); } private: bool is_conv_bf16_supported_; }; class MatmulBfloat16Support : public FloatSupport { public: explicit MatmulBfloat16Support( se::CudaComputeCapability cuda_compute_capability) : FloatSupport(BF16), is_matmul_bf16_supported_(cuda_compute_capability.IsAtLeast( se::CudaComputeCapability::AMPERE)) {} bool SupportsLowPrecisionOperand(const HloInstruction& hlo, int64_t operand_index) const override { return (hlo.opcode() != HloOpcode::kDot) || is_matmul_bf16_supported_; } bool SupportsLowPrecisionOutput(const HloInstruction& hlo) const override { return (hlo.opcode() != HloOpcode::kDot) || is_matmul_bf16_supported_; } bool SupportsMixedPrecisions(const HloInstruction& hlo) const override { return true; } private: bool is_matmul_bf16_supported_; }; } absl::Status NVPTXCompiler::OptimizeHloConvolutionCanonicalization( HloModule* hlo_module, se::GpuComputeCapability gpu_version, se::dnn::VersionInfo dnn_version, se::DeviceMemoryAllocator* device_allocator, const se::SemanticVersion& toolkit_version) { auto cuda_compute_capability = std::get<se::CudaComputeCapability>(gpu_version); HloPassPipeline pipeline("conv_canonicalization"); pipeline.AddInvariantCheckerDebug<HloVerifier>( false, false); ConvBfloat16Support conv_bf16_support(dnn_version, cuda_compute_capability); pipeline.AddPass<FloatNormalization>(&conv_bf16_support); MatmulBfloat16Support matmul_bf16_support(cuda_compute_capability); pipeline.AddPass<FloatNormalization>(&matmul_bf16_support); pipeline.AddPass<GpusolverRewriter>(); if (!hlo_module->config() .debug_options() .xla_gpu_experimental_disable_binary_libraries()) { pipeline.AddPass<ConvRewriter>(cuda_compute_capability); pipeline.AddPass<CudnnFusedConvRewriter>(cuda_compute_capability, dnn_version, toolkit_version); pipeline.AddPass<ConvPaddingLegalization>(); pipeline.AddPass<CudnnPadForConvolutions>(cuda_compute_capability); pipeline.AddPass<CudnnVectorizeConvolutions>(cuda_compute_capability, dnn_version); } pipeline.AddPass<CallInliner>(); pipeline.AddPass<TupleSimplifier>(); AlgebraicSimplifierOptions algsimp_options = GetAlgebraicSimplifierOptions(hlo_module->config()); algsimp_options.set_supports_non_canonical_dots(false); algsimp_options.set_enable_conv_operand_swap(false); algsimp_options.set_enable_unconditional_reduce_of_concat_replacement(false); pipeline.AddPass<HloPassFix<GpuAlgebraicSimplifier>>(algsimp_options, gpu_version); if (!hlo_module->config() .debug_options() .xla_gpu_experimental_disable_binary_libraries()) { pipeline.AddPass<CudnnSimplifyPadding>(); } [&, &pipeline = pipeline.AddPass<HloPassFix<HloPassPipeline>>( "reshape_mover_after_conv_canonicalization")] { ReshapeMoverOptions reshape_mover_options; reshape_mover_options.reshape_of_1d_broadcast_is_cheap = true; pipeline.AddPass<ReshapeMover>(reshape_mover_options); pipeline.AddPass<GpuAlgebraicSimplifier>(algsimp_options, gpu_version); }(); [&, &pipeline = pipeline.AddPass<HloPassFix<HloPassPipeline>>( "simplify_after_conv_canonicalization")] { pipeline.AddPass<ConvertMover>(); pipeline.AddPass<GpuAlgebraicSimplifier>(algsimp_options, gpu_version); }(); pipeline.AddPass<HloConstantFolding>(); TF_RETURN_IF_ERROR(pipeline.Run(hlo_module).status()); return absl::OkStatus(); } absl::Status NVPTXCompiler::OptimizeHloPostLayoutAssignment( HloModule* hlo_module, se::StreamExecutor* stream_exec, const CompileOptions& options, const TargetConfig& gpu_target_config, tsl::thread::ThreadPool* thread_pool) { auto cuda_compute_capability = std::get<se::CudaComputeCapability>( gpu_target_config.device_description.gpu_compute_capability()); if (hlo_module->config().debug_options().xla_gpu_enable_cudnn_fmha() && !hlo_module->config() .debug_options() .xla_gpu_experimental_disable_binary_libraries()) { HloPassPipeline mha_fusion_pipeline( "nvptx cudnn multi-headed attention fusion"); AlgebraicSimplifierOptions alg_sim_options = GetAlgebraicSimplifierOptions(hlo_module->config()); alg_sim_options.set_supports_non_canonical_dots(false); alg_sim_options.set_is_layout_sensitive(true); alg_sim_options.set_enable_conv_operand_swap(false); alg_sim_options.set_minmax_propagate_nan( !hlo_module->config().debug_options().xla_gpu_enable_fast_min_max()); alg_sim_options.set_enable_unconditional_reduce_of_concat_replacement( false); mha_fusion_pipeline.AddPass<HloCSE>(true); se::GpuComputeCapability gpu_version = gpu_target_config.device_description.gpu_compute_capability(); mha_fusion_pipeline.AddPass<HloPassFix<GpuAlgebraicSimplifier>>( alg_sim_options, gpu_version); mha_fusion_pipeline.AddPass<HloCSE>(true); if (stream_exec) { mha_fusion_pipeline.AddPass<CudnnFusedMHARewriter>( cuda_compute_capability, stream_exec); } else { mha_fusion_pipeline.AddPass<CudnnFusedMHARewriter>( cuda_compute_capability, gpu_target_config.dnn_version_info); } mha_fusion_pipeline.AddPass<GpuAlgebraicSimplifier>(alg_sim_options, gpu_version); mha_fusion_pipeline.AddPass<CudnnFusedMHATransposeFusion>(); mha_fusion_pipeline.AddPass<HloDCE>(); mha_fusion_pipeline.AddPass<HloCSE>(true); TF_RETURN_IF_ERROR(mha_fusion_pipeline.Run(hlo_module).status()); } HloPassPipeline pre_pipeline("nvptx post-layout_assignment part 1"); if (hlo_module->config().debug_options().xla_gpu_enable_cudnn_layer_norm() && !hlo_module->config() .debug_options() .xla_gpu_experimental_disable_binary_libraries()) { pre_pipeline.AddPass<CudnnNormRewriter>(cuda_compute_capability); } pre_pipeline.AddPass<DotDimensionMerger>(); pre_pipeline.AddPass<DotSparsityRewriter>(); if (!hlo_module->config() .debug_options() .xla_gpu_experimental_disable_binary_libraries()) { for (const CublasPaddingRequirement& requirement : CublasPaddingRequirements) { if (cuda_compute_capability.IsAtLeast( requirement.min_compute_capability)) { pre_pipeline.AddPass<CublasPadForGemms>(cuda_compute_capability, requirement.data_type, requirement.multiple_of); } } } pre_pipeline.AddPass<HloConstantFolding>(); TF_RETURN_IF_ERROR(pre_pipeline.Run(hlo_module).status()); TF_RETURN_IF_ERROR(GpuCompiler::OptimizeHloPostLayoutAssignment( hlo_module, stream_exec, options, gpu_target_config, thread_pool)); HloPassPipeline post_pipeline("nvptx post-layout_assignment part 2"); post_pipeline.AddPass<TriangularSolveRewriter>(); TF_RETURN_IF_ERROR(post_pipeline.Run(hlo_module).status()); return absl::OkStatus(); } bool NVPTXCompiler::RequiresCollectiveScheduleLinearizer( const HloModule* module, se::StreamExecutor* stream_exec) { if (stream_exec == nullptr || !GpuConvAlgorithmPicker::IsEnabled(module)) { return false; } for (const HloComputation* comp : module->MakeNonfusionComputations()) { for (const HloInstruction* inst : comp->instructions()) { if (GpuConvAlgorithmPicker::IsCandidate(inst)) { return true; } } } return false; } absl::Status NVPTXCompiler::AddConvAndGemmAutotuningPasses( HloPassPipeline* pipeline, const se::GpuComputeCapability& gpu_version, const CompileOptions& options, HloModule* hlo_module, AutotuneConfig& autotune_config, tsl::thread::ThreadPool* thread_pool) { if (hlo_module->config() .debug_options() .xla_gpu_experimental_disable_binary_libraries()) { return absl::OkStatus(); } if (GpuConvAlgorithmPicker::IsEnabled(hlo_module)) { pipeline->AddPass<GpuConvAlgorithmPicker>(autotune_config); } if (!std::get<se::CudaComputeCapability>(gpu_version).IsAtLeastAmpere() || options.is_autotuning_compilation) { pipeline->AddPass<GemmAlgorithmPicker>(autotune_config); } return absl::OkStatus(); } absl::Status NVPTXCompiler::AddGemmFusionAutotuningPasses( HloPassPipeline* pipeline, HloModule* hlo_module, AutotuneConfig& autotune_config, tsl::thread::ThreadPool* thread_pool, const MultiProcessKeyValueStore& key_value_store, const se::SemanticVersion& toolkit_version) { pipeline->AddPass<GemmFusionAutotuner>(autotune_config, toolkit_version, thread_pool, key_value_store); return absl::OkStatus(); } absl::Status NVPTXCompiler::AddCustomKernelReplacementPasses( HloPassPipeline* pipeline, const DebugOptions& debug_options) { if (debug_options.xla_gpu_enable_cub_radix_sort()) { pipeline->AddPass<SortRewriter>(); } return absl::OkStatus(); } absl::Status NVPTXCompiler::RunCudnnCompilerPasses( HloModule* module, se::StreamExecutor* stream_exec, BinaryMap* dnn_compiled_graphs) { if (module->config() .debug_options() .xla_gpu_experimental_disable_binary_libraries()) { return absl::OkStatus(); } tsl::profiler::ScopedAnnotation annotation([&] { return absl::StrFormat("XlaCompileCudnnFusion:#module=%s,program_id=%d#", module->name(), module->unique_id()); }); CuDnnFusionCompiler fusion_compiler(*stream_exec, *dnn_compiled_graphs); TF_RETURN_IF_ERROR(fusion_compiler.Run(module).status()); CuDnnCustomCallCompiler call_compiler(*stream_exec, *dnn_compiled_graphs); return call_compiler.Run(module).status(); } namespace { bool MaybeLoadPtxFromFile(const HloModuleConfig module_config, const HloModule* module, std::string* ptx) { std::string prefix = xla::FilenameFor(*module, "", *ptx); std::string matched_filename; for (const std::string& full_filename : module_config.debug_options().xla_gpu_ptx_file()) { auto filename = tsl::io::Basename(full_filename); if (absl::StartsWith(filename, prefix)) { matched_filename = full_filename; VLOG(1) << "RunBackend() - Will load PTX from file: " << full_filename; break; } } if (!module_config.debug_options().xla_gpu_ptx_file().empty() && matched_filename.empty()) { VLOG(1) << "RunBackend() - For module with prefix '" << prefix << "', we did not found a PTX file to load."; } if (!matched_filename.empty()) { std::ifstream ifs(matched_filename, std::ifstream::in); *ptx = std::string(std::istreambuf_iterator<char>(ifs), std::istreambuf_iterator<char>()); CHECK(!ptx->empty()) << "Empty or non existing PTX file: " << matched_filename; return true; } return false; } std::unique_ptr<llvm::Module> MaybeLoadLLVMFromFile(const HloModule* module, llvm::Module* llvm_module) { if (module == nullptr) { return nullptr; } std::string prefix = xla::FilenameFor(*module, "", ""); auto xla_gpu_llvm_ir_file = module->config().debug_options().xla_gpu_llvm_ir_file(); auto matched_filename = absl::c_find_if( xla_gpu_llvm_ir_file, [prefix](const std::string& full_filename) { return absl::StartsWith(tsl::io::Basename(full_filename), prefix); }); if (!xla_gpu_llvm_ir_file.empty() && matched_filename == std::end(xla_gpu_llvm_ir_file)) { VLOG(1) << "RunBackend() - For module with prefix '" << prefix << "', we did not found a LLVM file to load."; } if (matched_filename != std::end(xla_gpu_llvm_ir_file)) { VLOG(1) << "RunBackend() - Will load LLVM from file: " << *matched_filename; llvm::LLVMContext& context = llvm_module->getContext(); llvm::SMDiagnostic err; std::unique_ptr<llvm::Module> loaded_module = llvm::parseIRFile(*matched_filename, err, context); if (!loaded_module) { err.print("ERR", llvm::errs()); LOG(FATAL) << "Failed to load an LLVM file. It is probably invalid LLVM."; } llvm_ir::DumpIrIfEnabled(*module, *loaded_module, false); return loaded_module; } return nullptr; } } void WarnIfBadDriverJITVersion() { static absl::once_flag run_once; absl::call_once(run_once, [] { auto version_or_status = se::cuda::Diagnostician::FindKernelDriverVersion(); if (!version_or_status.ok()) { LOG(WARNING) << "Couldn't read CUDA driver version."; return; } se::cuda::DriverVersion version = version_or_status.value(); if (version < std::make_tuple(396, 20, 0)) { LOG(WARNING) << "*** WARNING *** Invoking the PTX->SASS JIT from driver version " << se::cuda::DriverVersionToString(version) << ", which is older than 396.20.0. These versions are known to " "miscompile XLA code, leading to incorrect results or " "invalid-address errors.\nXLA only uses the driver JIT if it " "cannot find ptxas; you don't need to update your driver if " "you can point XLA to ptxas 9.2.88 or newer."; } }); } NVPTXCompiler::NVPTXCompiler() : GpuCompiler(stream_executor::cuda::kCudaPlatformId, nvptx::TargetTriple(), nvptx::DataLayout()) {} HloDataflowAnalysis::CanShareBuffer NVPTXCompiler::GetCanShareBuffer() const { return &CanShareBufferHint; } constexpr const uint8_t kPtxPrefix[] = {'P', 'T', 'X', ':', ' '}; absl::StatusOr<GpuCompiler::BackendCompileResult> NVPTXCompiler::CompileTargetBinary(const HloModuleConfig& module_config, llvm::Module* llvm_module, se::GpuComputeCapability gpu_version, bool relocatable, const HloModule* debug_module, const CompileOptions& options) { std::unique_ptr<llvm::Module> loaded_module = MaybeLoadLLVMFromFile(debug_module, llvm_module); llvm::Module* selected_module = nullptr; if (loaded_module) { selected_module = loaded_module.get(); } else { selected_module = llvm_module; } std::string ptx; if (!(debug_module && MaybeLoadPtxFromFile(module_config, debug_module, &ptx))) { XLA_SCOPED_LOGGING_TIMER_IF( absl::StrCat( "NVPTXCompiler::CompileTargetBinary - CompileToPtx for ", (debug_module != nullptr ? debug_module->name() : "(unknown")), !options.is_autotuning_compilation); uint64_t start_usecs = tsl::Env::Default()->NowMicros(); TF_ASSIGN_OR_RETURN(ptx, nvptx::CompileToPtx(selected_module, gpu_version, module_config.debug_options())); uint64_t end_usecs = tsl::Env::Default()->NowMicros(); RecordLlvmPassesAndLlvmToPtxDuration(end_usecs - start_usecs); } TF_ASSIGN_OR_RETURN(se::PtxLinkingMethod linking_method, ChooseLinkingMethod(module_config.debug_options())); if (linking_method == se::PtxLinkingMethod::kNvJitLink && relocatable) { VLOG(2) << "Deferring the PTX to CUBIN compilation of the relocatable " "module to the linking step."; std::vector<uint8_t> binary; if (!ptx.empty()) { binary.reserve(sizeof(kPtxPrefix) + ptx.size() + 1); binary.insert(binary.end(), kPtxPrefix, kPtxPrefix + sizeof(kPtxPrefix)); binary.insert(binary.end(), ptx.begin(), ptx.end()); binary.emplace_back('\0'); } return BackendCompileResult{std::move(ptx), std::move(binary)}; } absl::StatusOr<std::vector<uint8_t>> maybe_cubin = CompileGpuAsmOrGetCachedResult( ptx, std::get<se::CudaComputeCapability>(gpu_version), module_config, (debug_module != nullptr ? debug_module->name() : "(unknown)"), relocatable, options); if (!maybe_cubin.ok()) { return maybe_cubin.status(); } return BackendCompileResult{std::move(ptx), std::move(maybe_cubin.value())}; } using stream_executor::PtxCompilationMethod; std::vector<PtxCompilationMethod> GetSupportedCompilationMethods() { std::vector<PtxCompilationMethod> methods; if (se::IsLibNvPtxCompilerSupported()) { methods.emplace_back(PtxCompilationMethod::kNvPtxCompiler); } if (se::IsLibNvJitLinkSupported()) { methods.emplace_back(PtxCompilationMethod::kNvJitLink); } methods.emplace_back(PtxCompilationMethod::kPtxas); return methods; } absl::StatusOr<PtxCompilationMethod> ChooseCompilationMethod( absl::Span<const PtxCompilationMethod> available_compilation_methods, const DebugOptions& debug_options, bool relocatable) { std::vector<PtxCompilationMethod> compilation_methods( available_compilation_methods.begin(), available_compilation_methods.end()); VLOG(2) << "Available compilation methods: " << absl::StrJoin(compilation_methods, ", "); auto remove_compilation_method = [&](PtxCompilationMethod method) { auto it = absl::c_find(compilation_methods, method); if (it != compilation_methods.end()) { compilation_methods.erase(it); } }; if (!debug_options.xla_gpu_enable_libnvjitlink()) { VLOG(3) << "Discarding NvJitLink since it is disabled."; remove_compilation_method(PtxCompilationMethod::kNvJitLink); } if (!debug_options.xla_gpu_enable_libnvptxcompiler()) { VLOG(3) << "Discarding NvPtxCompiler since it is disabled."; remove_compilation_method(PtxCompilationMethod::kNvPtxCompiler); } VLOG(2) << "Supported and enabled compilation methods: " << absl::StrJoin(compilation_methods, ", "); if (relocatable && absl::c_linear_search(compilation_methods, PtxCompilationMethod::kNvJitLink)) { VLOG(3) << "Discarding NvJitLink since it can't produce the requested " "relocatable CUBIN."; remove_compilation_method(PtxCompilationMethod::kNvJitLink); } VLOG(2) << "Considered compilation methods: " << absl::StrJoin(compilation_methods, ", "); if (compilation_methods.empty()) { return absl::UnavailableError( "No supported compilation method is available."); } return compilation_methods.front(); } static absl::StatusOr<std::vector<uint8_t>> AssembleOptionsAndCompile( const std::string& ptx, se::CudaComputeCapability cc, const HloModuleConfig& hlo_module_config, GpuCompiler::CompileOptions options, bool relocatable) { if (ptx.empty()) { return std::vector<uint8_t>(); } se::GpuAsmOpts ptxas_config = PtxOptsFromDebugOptions(hlo_module_config.debug_options()); if (relocatable) { ptxas_config.extra_flags.push_back("-c"); } uint64_t start_usecs = tsl::Env::Default()->NowMicros(); bool cancel_if_reg_spill = hlo_module_config.debug_options() .xla_gpu_filter_kernels_spilling_registers_on_autotuning() && options.is_autotuning_compilation; std::vector<PtxCompilationMethod> supported_compilation_methods = GetSupportedCompilationMethods(); TF_ASSIGN_OR_RETURN( PtxCompilationMethod compilation_method, ChooseCompilationMethod(supported_compilation_methods, hlo_module_config.debug_options(), relocatable)); VLOG(2) << "Using compilation method: " << compilation_method; absl::StatusOr<std::vector<uint8_t>> maybe_cubin = [&] { switch (compilation_method) { case PtxCompilationMethod::kNvJitLink: return se::CompileAndLinkUsingLibNvJitLink( cc.major, cc.minor, {se::NvJitLinkInput{ se::NvJitLinkInput::Type::kPtx, absl::Span<const uint8_t>{ reinterpret_cast<const uint8_t*>(ptx.c_str()), ptx.size() + 1 }}}, ptxas_config, cancel_if_reg_spill); case PtxCompilationMethod::kNvPtxCompiler: return se::CompileGpuAsmUsingLibNvPtxCompiler( cc.major, cc.minor, ptx.c_str(), ptxas_config, cancel_if_reg_spill); case PtxCompilationMethod::kPtxas: return se::CompileGpuAsmUsingPtxAs(cc.major, cc.minor, ptx.c_str(), ptxas_config, cancel_if_reg_spill); } }(); if (maybe_cubin.ok()) { uint64_t end_usecs = tsl::Env::Default()->NowMicros(); RecordPtxToCubinDuration(end_usecs - start_usecs); VLOG(1) << "Compiled PTX size: " << ptx.size() << "bytes. CUBIN size: " << maybe_cubin.value().size() << "bytes."; return maybe_cubin; } if (maybe_cubin.status().code() == absl::StatusCode::kNotFound) { if (!hlo_module_config.debug_options() .xla_gpu_unsafe_fallback_to_driver_on_ptxas_not_found()) { LOG(WARNING) << nvptx::CantFindCudaMessage( "Can't find ptxas binary in ${CUDA_DIR}/bin. Custom ptxas " "location can be specified using $PATH.", hlo_module_config.debug_options().xla_gpu_cuda_data_dir()); LOG(FATAL) << "Can't find ptxas binary. You can pass the flag " "--xla_gpu_unsafe_fallback_to_driver_on_ptxas_not_found " "to use the GPU driver for compiling ptx instead. However " "this option is discouraged and can lead to increased " "memory consumptions and other subtle runtime issues."; } LOG_FIRST_N(WARNING, 1) << nvptx::CantFindCudaMessage( "Can't find ptxas binary in ${CUDA_DIR}/bin. Will back to " "the GPU driver for PTX -> sass compilation. This is OK so " "long as you don't see a warning below about an out-of-date " "driver version. Custom ptxas location can be specified " "using $PATH.", hlo_module_config.debug_options().xla_gpu_cuda_data_dir()); WarnIfBadDriverJITVersion(); return maybe_cubin; } if (maybe_cubin.status().code() == absl::StatusCode::kCancelled) { return maybe_cubin; } if (maybe_cubin.status().code() == absl::StatusCode::kResourceExhausted) { return maybe_cubin; } if (maybe_cubin.status().code() != absl::StatusCode::kUnimplemented) { return AppendStatus( maybe_cubin.status(), "If the error message indicates that a file could not be written, " "please verify that sufficient filesystem space is provided."); } return maybe_cubin; } absl::StatusOr<std::vector<uint8_t>> NVPTXCompiler::CompileGpuAsmOrGetCachedResult( const std::string& ptx, se::CudaComputeCapability cc, const HloModuleConfig& hlo_module_config, absl::string_view module_name, bool relocatable, const CompileOptions& options) { XLA_SCOPED_LOGGING_TIMER_IF( absl::StrCat("NVPTXCompiler::CompileGpuAsmOrGetCachedResult for ", module_name), !options.is_autotuning_compilation); tsl::profiler::ScopedAnnotation annotation([&] { return absl::StrFormat("XlaCompileGpuAsm:#module=%s#", module_name); }); tsl::profiler::TraceMe activity("PTX->CUBIN", tsl::profiler::TraceMeLevel::kInfo); CompilationCacheValue* cache_value = nullptr; bool inserted = [&] { auto flags = CompilationCacheFlags{ hlo_module_config.debug_options() .xla_gpu_filter_kernels_spilling_registers_on_autotuning()}; absl::MutexLock lock(&mutex_); auto [iter, inserted] = compilation_cache_.emplace( std::piecewise_construct, std::forward_as_tuple(ptx, cc.major, cc.minor, relocatable, flags), std::forward_as_tuple()); cache_value = &iter->second; return inserted; }(); absl::MutexLock lock(&cache_value->mutex); if (inserted) { CHECK(!cache_value->compilation_done); absl::Cleanup mark_compilation_as_done = [cache_value] { cache_value->compilation_done = true; cache_value->compilation_done_cv.SignalAll(); }; cache_value->maybe_cubin = AssembleOptionsAndCompile( ptx, cc, hlo_module_config, options, relocatable); return cache_value->maybe_cubin; } while (!cache_value->compilation_done) { cache_value->compilation_done_cv.Wait(&cache_value->mutex); } return cache_value->maybe_cubin; } static bool IsNvlinkEnabled() { const bool use_nvlink_by_default = #ifdef TF_DISABLE_NVLINK_BY_DEFAULT false; #else true; #endif bool use_nvlink; TF_CHECK_OK(tsl::ReadBoolFromEnvVar("TF_USE_NVLINK_FOR_PARALLEL_COMPILATION", use_nvlink_by_default, &use_nvlink)); return use_nvlink; } static absl::StatusOr<stream_executor::SemanticVersion> GetAsmCompilerVersion( const DebugOptions& debug_options, const std::string& preferred_cuda_dir) { if (debug_options.xla_gpu_enable_libnvptxcompiler() && se::IsLibNvPtxCompilerSupported()) { return stream_executor::GetLibNvPtxCompilerVersion(); } return se::GetAsmCompilerVersion(preferred_cuda_dir); } absl::StatusOr<se::PtxLinkingMethod> NVPTXCompiler::ChooseLinkingMethod( const DebugOptions& debug_options) { se::GpuAsmOpts ptxas_config = PtxOptsFromDebugOptions(debug_options); std::string& preferred_cuda_dir = ptxas_config.preferred_cuda_dir; using LinkingMethod = se::PtxLinkingMethod; if (stream_executor::IsLibNvJitLinkSupported() && debug_options.xla_gpu_enable_libnvjitlink()) { return se::PtxLinkingMethod::kNvJitLink; } TF_ASSIGN_OR_RETURN(auto asm_compiler_version, GetAsmCompilerVersion(debug_options, preferred_cuda_dir)); auto nvlink_version = stream_executor::GetNvLinkVersion(preferred_cuda_dir); if (IsNvlinkEnabled() && nvlink_version.ok() && nvlink_version.value() >= asm_compiler_version) { return LinkingMethod::kNvLink; } int ptxas_version = asm_compiler_version.major() * 1000 + asm_compiler_version.minor() * 10; TF_ASSIGN_OR_RETURN(int driver_version, se::gpu::GpuDriver::GetDriverVersion()); if (driver_version >= ptxas_version) { return LinkingMethod::kDriver; } LOG_FIRST_N(WARNING, 1) << "The NVIDIA driver's CUDA version is " << absl::StrFormat("%d.%d", driver_version / 1000, (driver_version % 1000) / 10) << " which is older than the PTX compiler version " << asm_compiler_version << ". Because the driver is older than the PTX compiler version, XLA is " "disabling parallel compilation, which may slow down compilation. " "You should update your NVIDIA driver or use the NVIDIA-provided " "CUDA forward compatibility packages."; return se::PtxLinkingMethod::kNone; } absl::StatusOr<bool> NVPTXCompiler::CanUseLinkModules( const HloModuleConfig& hlo_module_config) { TF_ASSIGN_OR_RETURN(se::PtxLinkingMethod linking_method, ChooseLinkingMethod(hlo_module_config.debug_options())); return linking_method != se::PtxLinkingMethod::kNone; } absl::StatusOr<std::vector<uint8_t>> NVPTXCompiler::LinkModules( se::GpuComputeCapability compute_capability, se::StreamExecutor* stream_exec, std::vector<std::vector<uint8_t>> modules, const DebugOptions& debug_options) { if (modules.empty()) return std::vector<uint8_t>{}; auto cc = std::get<stream_executor::CudaComputeCapability>(compute_capability); TF_ASSIGN_OR_RETURN(se::PtxLinkingMethod linking_method, ChooseLinkingMethod(debug_options)); VLOG(1) << "Linking " << modules.size() << " modules with linking method: " << linking_method; if (linking_method == se::PtxLinkingMethod::kNvJitLink) { const auto module_contains_ptx = [](const std::vector<uint8_t>& module) -> bool { return module.size() >= sizeof(kPtxPrefix) && std::equal(std::begin(kPtxPrefix), std::end(kPtxPrefix), std::begin(module)); }; std::vector<stream_executor::NvJitLinkInput> nvjitlink_inputs; nvjitlink_inputs.reserve(modules.size()); for (std::vector<uint8_t>& module : modules) { if (module_contains_ptx(module)) { nvjitlink_inputs.push_back( {se::NvJitLinkInput::Type::kPtx, absl::Span<const uint8_t>(module).subspan(sizeof(kPtxPrefix))}); } else { nvjitlink_inputs.push_back({se::NvJitLinkInput::Type::kCubin, module}); } } se::GpuAsmOpts ptxas_config = PtxOptsFromDebugOptions(debug_options); return stream_executor::CompileAndLinkUsingLibNvJitLink( cc.major, cc.minor, nvjitlink_inputs, ptxas_config, false); } std::vector<stream_executor::CubinOrPTXImage> cubin_images; cubin_images.reserve(modules.size()); for (std::vector<uint8_t>& module : modules) { { std::string profile = absl::StrCat("sm_", cc.major, cc.minor); cubin_images.push_back({std::move(profile), std::move(module)}); } } if (linking_method == se::PtxLinkingMethod::kNvLink) { return LinkUsingNvlink(cc, debug_options.xla_gpu_cuda_data_dir(), cubin_images); } return LinkGpuAsm(cc, se::gpu::ExtractGpuExecutor(stream_exec)->gpu_context(), cubin_images); } } }
#include "xla/service/gpu/nvptx_compiler.h" #include <cstdint> #include <memory> #include <gtest/gtest.h> #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_query.h" #include "xla/service/backend.h" #include "xla/service/buffer_assignment.h" #include "xla/service/buffer_value.h" #include "xla/service/gpu/gpu_constants.h" #include "xla/service/gpu/gpu_hlo_schedule.h" #include "xla/service/gpu/gpu_latency_hiding_scheduler.h" #include "xla/service/hlo_ordering.h" #include "xla/service/logical_buffer.h" #include "xla/stream_executor/device_description.h" #include "xla/tests/hlo_test_base.h" #include "xla/tsl/lib/core/status_test_util.h" #include "xla/util.h" #include "xla/xla.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { int64_t CountCopies(const HloComputation& computation) { int64_t count = 0; for (const auto& instruction : computation.instructions()) { if (instruction->opcode() == HloOpcode::kCopy) { count++; } } return count; } int64_t CountCopies(const HloModule& module) { int64_t count = 0; for (const auto& computation : module.computations()) { count += CountCopies(*computation); } return count; } class NVPTXCompilerTest : public HloTestBase { public: absl::StatusOr<std::unique_ptr<BufferAssignment>> AssignBuffers( HloModule* module) { constexpr uint64_t pointer_size = 4; const se::DeviceDescription& gpu_device_info = backend().default_stream_executor()->GetDeviceDescription(); TF_RETURN_IF_ERROR( ScheduleGpuModule(module, pointer_size, gpu_device_info).status()); auto buffer_size_bytes_function = [](const BufferValue& buffer_value) -> int64_t { return GetSizeOfShape(buffer_value.shape(), pointer_size); }; return BufferAssigner::Run( module, std::make_unique<SequentialHloOrdering>(module->schedule()), buffer_size_bytes_function, [](LogicalBuffer::Color) { return kXlaAllocatedBufferAlignBytes; }); } }; class NVPTXCompilerTestTriton : public NVPTXCompilerTest { public: DebugOptions GetDebugOptionsForTest() override { DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest(); debug_options.set_xla_gpu_cublas_fallback(false); return debug_options; } }; TEST_F(NVPTXCompilerTest, AllReducePerformedInplace) { const absl::string_view hlo_string = R"( HloModule Module, input_output_alias={ {}: (0, {}, may-alias) } summit { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT add = f32[] add(lhs, rhs) } ENTRY entry { param0 = f32[128] parameter(0) ROOT allreduce = f32[128] all-reduce(param0), replica_groups={}, to_apply=summit } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(auto buffer_assignment, AssignBuffers(module.get())); HloInstruction* all_reduce = module->entry_computation()->root_instruction(); EXPECT_TRUE(buffer_assignment->SharesTopLevelSlice(all_reduce, all_reduce->operand(0))); } TEST_F(NVPTXCompilerTest, AllReducePerformedInplaceTwoOperands) { const absl::string_view hlo_string = R"( HloModule Module, input_output_alias={ {0}: (0, {}, may-alias), {1}: (1, {}, may-alias) } summit { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT add = f32[] add(lhs, rhs) } ENTRY entry { param0 = f32[128] parameter(0) param1 = f32[128] parameter(1) ROOT allreduce = (f32[128], f32[128]) all-reduce(param0, param1), replica_groups={}, to_apply=summit } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); TF_ASSERT_OK_AND_ASSIGN(auto buffer_assignment, AssignBuffers(module.get())); HloInstruction* all_reduce = module->entry_computation()->root_instruction(); EXPECT_TRUE(buffer_assignment->SharesSliceAtIndex( all_reduce, {0}, all_reduce->operand(0), {})); EXPECT_TRUE(buffer_assignment->SharesSliceAtIndex( all_reduce, {1}, all_reduce->operand(1), {})); } TEST_F(NVPTXCompilerTestTriton, DotDimensionAreSortedBeforePaddingForCublasEnablingTritonFusion) { const absl::string_view hlo_string = R"( ENTRY e { p0 = f16[11,22,33,44] parameter(0) p1 = s8[11,22,33,44] parameter(1) p1c = f16[11,22,33,44] convert(p1) ROOT d = f16[11,22,44,44] dot(p0, p1c), lhs_batch_dims={0,1}, lhs_contracting_dims={2}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} })"; se::CudaComputeCapability cc = backend() .default_stream_executor() ->GetDeviceDescription() .cuda_compute_capability(); if (cc.IsAtLeastAmpere()) { MatchOptimizedHlo(hlo_string, R"( ; CHECK: ENTRY ; CHECK-NEXT: parameter ; CHECK-NEXT: parameter ; CHECK-NEXT: __triton_gemm )"); } else { MatchOptimizedHlo(hlo_string, R"( ; CHECK-NOT: triton )"); } } TEST_F(NVPTXCompilerTest, RemovesUnnecessaryCopyInPostSchedulingPipelines) { const absl::string_view hlo_text = R"( HloModule all_gather_overlapping, is_scheduled=true condition { input_tuple = (f32[1,128], f32[2,128], pred[]) parameter(0) ROOT cond = pred[] get-tuple-element(input_tuple), index=2 } body { c0 = f32[] constant(0) splat_c0 = f32[1,128] broadcast(c0), dimensions={} input_tuple = (f32[1,128], f32[2,128], pred[]) parameter(0) param_0 = f32[1,128] get-tuple-element(input_tuple), index=0 add = f32[1,128] add(splat_c0, param_0) param_1 = f32[2,128] get-tuple-element(input_tuple), index=1 c1_s32 = s32[] constant(1) c0_s32 = s32[] constant(0) dynamic-slice = f32[1,128] dynamic-slice(param_1, c1_s32, c0_s32), dynamic_slice_sizes={1,128} all-gather-start = (f32[1,128], f32[2,128]) all-gather-start(add), channel_id=1337, replica_groups={{0,1}}, dimensions={0}, use_global_device_ids=true all-gather-done = f32[2,128] all-gather-done(all-gather-start) copy = f32[2,128] copy(all-gather-done) cond = pred[] get-tuple-element(input_tuple), index=2 ROOT output_tuple = (f32[1,128], f32[2,128], pred[]) tuple(dynamic-slice, copy, cond) } ENTRY main { param_0 = f32[1,128] parameter(0) param_1 = f32[2,128] parameter(1) param_2 = pred[] parameter(2) copy_param_0 = f32[1,128] copy(param_0) copy_param_1 = f32[2,128] copy(param_1) tuple = (f32[1,128], f32[2,128], pred[]) tuple(copy_param_0, copy_param_1, param_2) while = (f32[1,128], f32[2,128], pred[]) while(tuple), condition=condition, body=body get-tuple-element = f32[1,128]{1,0} get-tuple-element((f32[1,128]{1,0}, f32[2,128]{1,0}, pred[]) while), index=0 get-tuple-element.1 = f32[2,128]{1,0} get-tuple-element((f32[1,128]{1,0}, f32[2,128]{1,0}, pred[]) while), index=1 get-tuple-element.2 = pred[] get-tuple-element((f32[1,128]{1,0}, f32[2,128]{1,0}, pred[]) while), index=2 copy.3 = pred[] copy(pred[] get-tuple-element.2) ROOT tuple.2 = (f32[1,128]{1,0}, f32[2,128]{1,0}, pred[]) tuple(f32[1,128]{1,0} get-tuple-element, f32[2,128]{1,0} get-tuple-element.1, pred[] copy.3) } )"; auto module = ParseAndReturnVerifiedModule(hlo_text).value(); EXPECT_EQ(CountCopies(*module), 4); const HloInstruction* while_op = hlo_query::GetFirstInstructionWithOpcode( *module->entry_computation(), HloOpcode::kWhile); EXPECT_EQ(while_op->while_body()->root_instruction()->operand(1)->opcode(), HloOpcode::kCopy); NVPTXCompiler compiler; TF_EXPECT_OK(compiler.RunPostSchedulingPipelines( module.get(), 100000, backend().default_stream_executor()->GetDeviceDescription())); EXPECT_EQ(CountCopies(*module), 3); while_op = hlo_query::GetFirstInstructionWithOpcode( *module->entry_computation(), HloOpcode::kWhile); EXPECT_EQ(while_op->while_body()->root_instruction()->operand(1)->opcode(), HloOpcode::kAllGatherDone); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/nvptx_compiler.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/nvptx_compiler_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
b6ff3b65-9b13-44b3-8470-4edfba40e5bf
cpp
tensorflow/tensorflow
conv_layout_normalization
third_party/xla/xla/service/gpu/conv_layout_normalization.cc
third_party/xla/xla/service/gpu/conv_layout_normalization_test.cc
#include "xla/service/gpu/conv_layout_normalization.h" #include <cstdint> #include <optional> #include <vector> #include "absl/status/statusor.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/service/gpu/cublas_cudnn.h" #include "xla/service/hlo_creation_utils.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/status_macros.h" #include "xla/util.h" #include "tsl/platform/protobuf.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { absl::StatusOr<std::optional<HloInstruction*>> UpdateLayoutForCudnnConvolution( HloCustomCallInstruction* hlo) { HloInstruction* lhs = hlo->mutable_operand(0); HloInstruction* rhs = hlo->mutable_operand(1); const ConvolutionDimensionNumbers& dim_numbers = hlo->convolution_dimension_numbers(); auto transpose_dim = [&](int64_t dim, const Shape& unnormalized_shape) { return unnormalized_shape.rank() - FindIndex(unnormalized_shape.layout().minor_to_major(), dim) - 1; }; auto transpose_dims = [&](tsl::protobuf::RepeatedField<int64_t>& dims, const Shape& unnormalized_shape) { for (auto& dim : dims) { dim = transpose_dim(dim, unnormalized_shape); } }; const Shape& conv_output_shape = hlo->shape().IsTuple() ? hlo->shape().tuple_shapes(0) : hlo->shape(); Shape input_shape, filter_shape, output_shape; TF_ASSIGN_OR_RETURN( gpu::CudnnConvKind conv_kind, gpu::GetCudnnConvKind(Cast<HloCustomCallInstruction>(hlo))); switch (conv_kind) { case gpu::CudnnConvKind::kForward: case gpu::CudnnConvKind::kForwardActivation: case gpu::CudnnConvKind::kForwardGraph: { input_shape = lhs->shape(); filter_shape = rhs->shape(); output_shape = conv_output_shape; break; } case gpu::CudnnConvKind::kBackwardInput: { filter_shape = rhs->shape(); output_shape = lhs->shape(); input_shape = conv_output_shape; break; } case gpu::CudnnConvKind::kBackwardFilter: { input_shape = lhs->shape(); output_shape = rhs->shape(); filter_shape = conv_output_shape; break; } } ConvolutionDimensionNumbers new_dim_numbers = dim_numbers; new_dim_numbers.set_input_batch_dimension( transpose_dim(dim_numbers.input_batch_dimension(), input_shape)); new_dim_numbers.set_input_feature_dimension( transpose_dim(dim_numbers.input_feature_dimension(), input_shape)); transpose_dims(*new_dim_numbers.mutable_input_spatial_dimensions(), input_shape); new_dim_numbers.set_kernel_input_feature_dimension(transpose_dim( dim_numbers.kernel_input_feature_dimension(), filter_shape)); new_dim_numbers.set_kernel_output_feature_dimension(transpose_dim( dim_numbers.kernel_output_feature_dimension(), filter_shape)); transpose_dims(*new_dim_numbers.mutable_kernel_spatial_dimensions(), filter_shape); new_dim_numbers.set_output_batch_dimension( transpose_dim(dim_numbers.output_batch_dimension(), output_shape)); new_dim_numbers.set_output_feature_dimension( transpose_dim(dim_numbers.output_feature_dimension(), output_shape)); transpose_dims(*new_dim_numbers.mutable_output_spatial_dimensions(), output_shape); Shape normalized_shape; if (hlo->shape().IsTuple()) { TF_RET_CHECK(hlo->shape().tuple_shapes().back().rank() == 1) << "The last element in the tuple returned by a convolution Custom " "Call is expected to be an " "allocator of rank one"; std::vector<Shape> new_tuple_shape; for (const Shape& tuple_shape : hlo->shape().tuple_shapes()) { new_tuple_shape.emplace_back( ShapeUtil::MakeShapeWithDescendingLayoutAndSamePhysicalLayout( tuple_shape)); } normalized_shape = ShapeUtil::MakeTupleShape(new_tuple_shape); } else { normalized_shape = ShapeUtil::MakeShapeWithDescendingLayoutAndSamePhysicalLayout( hlo->shape()); } std::vector<HloInstruction*> normalized_operands; bool performed_normalization = false; for (int idx = 0; idx < hlo->operand_count(); idx++) { HloInstruction* op = hlo->mutable_operand(idx); const Shape& s = op->shape(); Shape s_reordered = ShapeUtil::MakeShapeWithDescendingLayoutAndSamePhysicalLayout(s); normalized_operands.emplace_back(MakeBitcastHlo(op, s_reordered)); } if (!performed_normalization && ShapeUtil::Equal(normalized_shape, hlo->shape()) && ConvolutionDimensionNumbersToString(new_dim_numbers) == ConvolutionDimensionNumbersToString(dim_numbers)) { return std::nullopt; } HloInstruction* normalized_conv = hlo->parent()->AddInstruction( HloInstruction::CreateCustomCall(normalized_shape, normalized_operands, hlo->custom_call_target()), &hlo->metadata()); normalized_conv->set_window(hlo->window()); normalized_conv->set_convolution_dimension_numbers(new_dim_numbers); normalized_conv->set_feature_group_count(hlo->feature_group_count()); normalized_conv->set_raw_backend_config_string( hlo->raw_backend_config_string()); *normalized_conv->mutable_precision_config() = hlo->precision_config(); normalized_conv->parent()->parent()->SetAndUniquifyInstrName(normalized_conv, hlo->name()); HloInstruction* bc_to_orig; if (normalized_conv->shape().IsTuple()) { std::vector<HloInstruction*> tuple_elements( normalized_conv->shape().tuple_shapes_size()); for (int i = 0; i < normalized_conv->shape().tuple_shapes_size(); ++i) { TF_ASSIGN_OR_RETURN(HloInstruction * normalized_out, MakeGetTupleElementHlo(normalized_conv, i)); tuple_elements[i] = MakeBitcastHlo(normalized_out, hlo->shape().tuple_shapes(i)); } bc_to_orig = MaybeMakeTuple(tuple_elements); } else { bc_to_orig = MakeBitcastHlo(normalized_conv, hlo->shape()); } return bc_to_orig; } } absl::StatusOr<std::optional<HloInstruction*>> NormalizeLayoutForGpuCustomCalls( HloCustomCallInstruction* hlo) { if (IsCustomCallToDnnConvolution(*hlo)) { TF_ASSIGN_OR_RETURN(std::optional<HloInstruction*> bc_to_orig, UpdateLayoutForCudnnConvolution(hlo)); return bc_to_orig; } return std::nullopt; } } }
#include "xla/error_spec.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/tests/hlo_test_base.h" #include "xla/tests/test_macros.h" #include "tsl/platform/test.h" namespace xla { namespace gpu { namespace { class ConvolutionLayoutNormalizationTest : public HloTestBase { public: se::CudaComputeCapability GetCudaComputeCapability() { return backend() .default_stream_executor() ->GetDeviceDescription() .cuda_compute_capability(); } }; TEST_F(ConvolutionLayoutNormalizationTest, BackwardInput) { const char* hlo = R"( HloModule TestModule %TestComputation1 (param_0: f32[1,20,257], param_1: f32[31,257,136]) -> (f32[1,23,136], u8[0]) { %param_0 = f32[1,20,257]{2,1,0} parameter(0) %copy.3 = f32[1,20,257]{1,2,0} copy(f32[1,20,257]{2,1,0} %param_0) %param_1 = f32[31,257,136]{2,1,0} parameter(1) %copy.4 = f32[31,257,136]{0,2,1} copy(f32[31,257,136]{2,1,0} %param_1) %custom-call.1 = (f32[1,23,136]{1,2,0}, u8[0]{0}) custom-call(f32[1,20,257]{1,2,0} %copy.3, f32[31,257,136]{0,2,1} %copy.4), window={size=31 stride=2 pad=23_23}, dim_labels=b0f_0oi->b0f, custom_call_target="__cudnn$convBackwardInput", backend_config={"cudnn_conv_backend_config":{conv_result_scale:1}} %get-tuple-element.2 = f32[1,23,136]{1,2,0} get-tuple-element((f32[1,23,136]{1,2,0}, u8[0]{0}) %custom-call.1), index=0 %copy.5 = f32[1,23,136]{2,1,0} copy(f32[1,23,136]{1,2,0} %get-tuple-element.2) %get-tuple-element.3 = u8[0]{0} get-tuple-element((f32[1,23,136]{1,2,0}, u8[0]{0}) %custom-call.1), index=1 ROOT %tuple.1 = (f32[1,23,136]{2,1,0}, u8[0]{0}) tuple(f32[1,23,136]{2,1,0} %copy.5, u8[0]{0} %get-tuple-element.3) } )"; MatchOptimizedHlo(hlo, R"( )"); } TEST_F(ConvolutionLayoutNormalizationTest, Forward) { const char* hlo = R"( HloModule TestModule ENTRY %TestComputation { %param_0 = f32[2,128,1,378]{3,2,1,0} parameter(0) %param_1 = f32[1,5,128,128]{1,0,2,3} parameter(1) ROOT %custom-call.1 = (f32[2,128,1,378]{3,2,1,0}, u8[0]{0}) custom-call(%param_0, %param_1), window={size=1x5 pad=0_0x2_2}, dim_labels=bf01_01io->bf01, custom_call_target="__cudnn$convForward", backend_config={"cudnn_conv_backend_config":{conv_result_scale:1}} } )"; MatchOptimizedHlo(hlo, R"( )"); } TEST_F(ConvolutionLayoutNormalizationTest, DISABLED_ON_GPU_ROCM(FusedConv3D)) { const char* hlo = R"( HloModule TestModule ENTRY TestComputation { %p0 = f32[8,4,5,5,1] parameter(0) %p1 = f32[3,3,3,1,32] parameter(1) %conv = f32[8,4,5,5,32] convolution(p0, p1), window={size=3x3x3 pad=1_1x1_1x1_1}, dim_labels=b012f_012io->b012f %bias = f32[32] parameter(2) %broadcasted_bias = f32[8,4,5,5,32] broadcast(%bias), dimensions={4} %add = f32[8,4,5,5,32] add(%conv, %broadcasted_bias) %zero = f32[] constant(0) %zeros = f32[8,4,5,5,32] broadcast(%zero), dimensions={} ROOT relu = f32[8,4,5,5,32] maximum(%zeros, %add) } )"; MatchOptimizedHlo(hlo, R"( )"); } TEST_F(ConvolutionLayoutNormalizationTest, GraphConvF8) { if (!GetCudaComputeCapability().IsAtLeast( se::CudaComputeCapability::HOPPER)) { GTEST_SKIP() << "FP8 convolutions require Hopper or newer architecture."; } const char* hlo = R"( HloModule Test ENTRY %Test (input.1: f8e4m3fn[2,1,378,128], filter.1: f8e4m3fn[1,128,128,5], input_scale.1: f32[], filter_scale.1: f32[], z_scale.1: f32[]) -> (f8e4m3fn[2,1,378,128], f32[], u8[0]{0}) { %input.1 = f8e4m3fn[2,1,378,128]{3,2,1,0} parameter(0) %filter.1 = f8e4m3fn[128,1,5,128]{1,0,2,3} parameter(1) %input_scale.1 = f32[] parameter(2) %filter_scale.1 = f32[] parameter(3) %z_scale.1 = f32[] parameter(4) ROOT %cudnn-conv.3.0 = (f8e4m3fn[2,1,378,128]{3,2,1,0}, f32[], u8[0]{0}) custom-call(%input.1, %filter.1, %input_scale.1, %filter_scale.1, %z_scale.1), window={size=1x5 pad=0_0x2_2}, dim_labels=b01f_o01i->b01f, custom_call_target="__cudnn$convForwardGraph", backend_config={"cudnn_conv_backend_config":{"conv_result_scale":1,"serialized_graph":"28:[f32]conv();30:[f32]scale(28);32:[f32]scale(30);16:[f8e4m3fn]scale(32);25:[f32]amax(32);"}} })"; MatchOptimizedHlo(hlo, R"( )"); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/conv_layout_normalization.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/conv_layout_normalization_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
a56bb79d-f735-498e-9b14-cd281b406b7b
cpp
tensorflow/tensorflow
hlo_traversal
third_party/xla/xla/service/gpu/hlo_traversal.cc
third_party/xla/xla/service/gpu/hlo_traversal_test.cc
#include "xla/service/gpu/hlo_traversal.h" #include <algorithm> #include <cstdint> #include <functional> #include <iterator> #include <memory> #include <optional> #include <queue> #include <sstream> #include <string> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_set.h" #include "absl/container/inlined_vector.h" #include "absl/log/check.h" #include "absl/memory/memory.h" #include "absl/types/span.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" namespace xla { namespace gpu { namespace { template <typename F> void ResolveUsers(const HloInstruction* value, const HloInstruction* user, const HloFusionAdaptor& fusion_adaptor, F&& add_user) { if (user->opcode() == HloOpcode::kTuple && user->IsRoot()) { if (auto* fusion = user->parent()->FusionInstruction()) { for (const auto* gte : fusion->users()) { if (gte->opcode() != HloOpcode::kGetTupleElement) { if (fusion_adaptor.ContainsInstruction(value)) { add_user(gte); } continue; } for (const auto* gte_user : gte->users()) { ResolveUsers(gte, gte_user, fusion_adaptor, add_user); } } } } else if (fusion_adaptor.ContainsInstruction(user) && user->opcode() == HloOpcode::kFusion) { auto* param = user->fused_parameter(user->operand_index(value)); for (const auto* param_user : param->users()) { add_user(param_user); } } else if (fusion_adaptor.ContainsInstruction(user)) { add_user(user); } } const HloInstruction* ResolveOperand(const HloInstruction* operand, const HloFusionAdaptor& fusion_adaptor) { if (operand->opcode() == HloOpcode::kGetTupleElement && operand->operand(0)->opcode() == HloOpcode::kFusion && operand->operand(0)->fused_expression_root()->opcode() == HloOpcode::kTuple && fusion_adaptor.ContainsInstruction(operand->operand(0))) { return operand->operand(0)->fused_expression_root()->operand( operand->tuple_index()); } if (!fusion_adaptor.ContainsInstruction(operand)) { return operand; } if (operand->opcode() == HloOpcode::kFusion) { return operand->fused_expression_root(); } if (operand->opcode() == HloOpcode::kParameter) { if (auto* fusion = operand->parent()->FusionInstruction()) { return ResolveOperand(fusion->operand(operand->parameter_number()), fusion_adaptor); } } return operand; } } class SingleInstructionFusion : public internal::HloFusionInstructionAdaptor { public: explicit SingleInstructionFusion(const HloInstruction* instruction, const HloFusionAdaptor* parent) : instruction_(instruction), parent_(parent) { CHECK_NE(instruction->opcode(), HloOpcode::kFusion) << "Use HloComputationFusion"; } bool ContainsInstruction(const HloInstruction* instruction) const override { return instruction == instruction_; } absl::InlinedVector<HloInstructionAdaptor, 2> GetRoots() const override { return {HloInstructionAdaptor{*instruction_, parent_}}; } absl::InlinedVector<const HloInstruction*, 2> GetParameters() const override { const auto& operands = instruction_->operands(); return absl::InlinedVector<const HloInstruction*, 2>(operands.begin(), operands.end()); } const HloInstruction& FusionInstruction() const override { return *instruction_; } absl::InlinedVector<HloInstructionAdaptor, 2> MakeInstructionPostOrder() const override { return {HloInstructionAdaptor{*instruction_, parent_}}; } void ForEach( const std::function<void(HloInstructionAdaptor)>& fn) const override { fn(HloInstructionAdaptor{*instruction_, parent_}); } std::string ToString() const override { return instruction_->ToString(); } private: const HloInstruction* instruction_; const HloFusionAdaptor* parent_; }; class HloComputationFusion : public internal::HloFusionInstructionAdaptor { public: explicit HloComputationFusion(const HloComputation* computation, const HloFusionAdaptor* parent) : computation_(computation), parent_(parent) { CHECK(computation->IsFusionComputation()); roots_ = FindRoots(computation); } absl::InlinedVector<HloInstructionAdaptor, 2> FindRoots( const HloComputation* computation) { absl::InlinedVector<HloInstructionAdaptor, 2> roots; std::function<void(const HloInstruction*)> get_roots; get_roots = [&](const HloInstruction* instr) { if (instr->opcode() == HloOpcode::kTuple) { for (const auto* operand : instr->operands()) { get_roots(operand); } } else { HloInstructionAdaptor wrapped{*instr, parent_}; roots.push_back(wrapped); } }; get_roots(computation->root_instruction()); return roots; } bool ContainsInstruction(const HloInstruction* instruction) const override { return instruction->parent() == computation_ || instruction == computation_->FusionInstruction(); } absl::InlinedVector<HloInstructionAdaptor, 2> GetRoots() const override { CHECK(!roots_.empty()) << "No roots found in the computation. HloFusionAdaptor was likely " "created for a non-fusion computation: " << computation_->ToString(); return roots_; } absl::InlinedVector<const HloInstruction*, 2> GetParameters() const override { const auto& operands = computation_->FusionInstruction()->operands(); return absl::InlinedVector<const HloInstruction*, 2>(operands.begin(), operands.end()); } const HloInstruction& FusionInstruction() const override { return *computation_->FusionInstruction(); } absl::InlinedVector<HloInstructionAdaptor, 2> MakeInstructionPostOrder() const override { auto post_order = computation_->MakeInstructionPostOrder(); absl::InlinedVector<HloInstructionAdaptor, 2> result; result.reserve(post_order.size() - computation_->num_parameters()); for (auto* instr : post_order) { if (instr->opcode() == HloOpcode::kParameter || (instr->opcode() == HloOpcode::kTuple && instr->IsRoot())) { continue; } result.emplace_back(*instr, parent_); } return result; } void ForEach( const std::function<void(HloInstructionAdaptor)>& fn) const override { for (const HloInstruction* instr : computation_->instructions()) { if (instr->opcode() == HloOpcode::kParameter || instr->opcode() == HloOpcode::kTuple || instr->opcode() == HloOpcode::kGetTupleElement) { continue; } fn(HloInstructionAdaptor{*instr, parent_}); } } std::string ToString() const override { return computation_->ToString(); } private: const HloComputation* computation_; absl::InlinedVector<HloInstructionAdaptor, 2> roots_; const HloFusionAdaptor* parent_; }; std::unique_ptr<HloFusionAdaptor> HloFusionAdaptor::ForInstruction( const HloInstruction* instruction) { if (instruction->opcode() == HloOpcode::kFusion) { return ForComputation(instruction->fused_instructions_computation()); } auto fusion_adaptor = absl::WrapUnique(new HloFusionAdaptor); fusion_adaptor->AddInstruction(instruction); return fusion_adaptor; } std::unique_ptr<HloFusionAdaptor> HloFusionAdaptor::ForProducerConsumer( const HloInstruction* producer, const HloInstruction* consumer) { auto fusion_adaptor = absl::WrapUnique(new HloFusionAdaptor); fusion_adaptor->AddInstruction(producer); fusion_adaptor->AddInstruction(consumer); return fusion_adaptor; } std::unique_ptr<HloFusionAdaptor> HloFusionAdaptor::ForComputation( const HloComputation* computation) { auto fusion_adaptor = absl::WrapUnique(new HloFusionAdaptor); fusion_adaptor->AddComputation(computation); return fusion_adaptor; } bool HloFusionAdaptor::ContainsInstruction( HloInstructionAdaptor instruction) const { return ContainsInstruction(&instruction.instruction()); } bool HloFusionAdaptor::ContainsInstruction( const HloInstruction* instruction) const { for (const auto& fusion_instruction : fusion_instructions_) { if (fusion_instruction->ContainsInstruction(instruction)) return true; } return false; } absl::InlinedVector<HloInstructionAdaptor, 2> HloFusionAdaptor::GetRoots() const { auto roots = fusion_instructions_.back()->GetRoots(); if (fusion_instructions_.size() == 1) { return roots; } CHECK_EQ(fusion_instructions_.size(), 2); auto producer_roots = fusion_instructions_[0]->GetRoots(); const HloInstruction& producer_fusion = fusion_instructions_[0]->FusionInstruction(); const HloInstruction& consumer_fusion = fusion_instructions_.back()->FusionInstruction(); for (auto& root : roots) { if (root.opcode() != HloOpcode::kParameter) { continue; } const HloInstruction* operand = consumer_fusion.operand(root.instruction().parameter_number()); int64_t root_index = 0; if (operand->opcode() == HloOpcode::kGetTupleElement) { root_index = operand->tuple_index(); operand = operand->operand(0); } if (operand == &producer_fusion) { root = producer_roots[root_index]; } } if (!producer_fusion.IsMultiOutputFusion()) { return roots; } absl::flat_hash_set<int64_t> root_indices_with_outside_usage; for (HloInstruction* instr : producer_fusion.users()) { bool has_outside_user = false; int64_t root_index = 0; if (instr->opcode() == HloOpcode::kGetTupleElement) { for (HloInstruction* user : instr->users()) { if (user != &consumer_fusion) { root_index = instr->tuple_index(); has_outside_user = true; break; } } } else if (instr != &consumer_fusion) { has_outside_user = true; } if (has_outside_user) { root_indices_with_outside_usage.insert(root_index); } } for (int64_t i = 0; i < producer_roots.size(); ++i) { if (!root_indices_with_outside_usage.contains(i)) { continue; } if (producer_roots[i].opcode() != HloOpcode::kParameter) { roots.push_back(producer_roots[i]); } } return roots; } absl::InlinedVector<const HloInstruction*, 2> HloFusionAdaptor::GetParameters() const { if (fusion_instructions_.size() == 1) { return fusion_instructions_.back()->GetParameters(); } CHECK_EQ(fusion_instructions_.size(), 2); absl::InlinedVector<const HloInstruction*, 2> combined_parameters; const HloInstruction& producer_fusion = fusion_instructions_[0]->FusionInstruction(); for (const auto& param : fusion_instructions_.back()->GetParameters()) { const HloInstruction* operand = param; if (operand->opcode() == HloOpcode::kGetTupleElement) { operand = operand->operand(0); } if (operand != &producer_fusion) { combined_parameters.push_back(param); } } absl::flat_hash_set<const HloInstruction*> params(combined_parameters.begin(), combined_parameters.end()); auto producer_roots = fusion_instructions_[0]->GetRoots(); absl::flat_hash_set<const HloInstruction*> parameters_to_skip; for (const auto& root : producer_roots) { if (root.opcode() == HloOpcode::kParameter) { if (&root.instruction() == &producer_fusion) { parameters_to_skip.insert(&producer_fusion); } else if (root.instruction().user_count() <= 1) { parameters_to_skip.insert( producer_fusion.operand(root.instruction().parameter_number())); } } } for (auto param : fusion_instructions_[0]->GetParameters()) { if (!parameters_to_skip.contains(param) && params.insert(param).second) { combined_parameters.push_back(param); } } return combined_parameters; } absl::InlinedVector<HloInstructionAdaptor, 2> HloFusionAdaptor::MakeInstructionPostOrder() const { absl::InlinedVector<HloInstructionAdaptor, 2> result_post_order; for (const auto& fusion_instruction : fusion_instructions_) { absl::c_move(fusion_instruction->MakeInstructionPostOrder(), std::back_inserter(result_post_order)); } return result_post_order; } void HloFusionAdaptor::ForEach( const std::function<void(HloInstructionAdaptor)>& fn) const { for (const auto& fusion_instruction : fusion_instructions_) { fusion_instruction->ForEach(fn); } } std::string HloFusionAdaptor::ToString() const { std::ostringstream ss; for (const auto& fusion_instruction : fusion_instructions_) { ss << fusion_instruction->ToString() << "\n"; } return ss.str(); } void HloFusionAdaptor::AddInstruction(const HloInstruction* instruction) { if (instruction->opcode() == HloOpcode::kFusion) { AddComputation(instruction->fused_instructions_computation()); } else { fusion_instructions_.push_back( std::make_unique<SingleInstructionFusion>(instruction, this)); } } void HloFusionAdaptor::AddComputation(const HloComputation* computation) { fusion_instructions_.push_back( std::make_unique<HloComputationFusion>(computation, this)); } absl::InlinedVector<HloInstructionAdaptor, 2> HloInstructionAdaptor::GetOperands() const { absl::InlinedVector<HloInstructionAdaptor, 2> operands; if (instruction_->opcode() == HloOpcode::kParameter) { auto operand = ResolveOperand(instruction_, *parent_); if (operand != instruction_) { operands.emplace_back(*operand, parent_); } } else { for (const auto* operand : instruction_->operands()) { operands.emplace_back(*ResolveOperand(operand, *parent_), parent_); } } return operands; } HloInstructionAdaptor::HloInstructionAdaptor(const HloInstruction& instruction, const HloFusionAdaptor* parent) : instruction_(&instruction), parent_(parent) { CHECK_NE(parent, nullptr) << "Parent fusion adaptor must not be null"; } HloInstructionAdaptor HloInstructionAdaptor::GetOperand(int index) const { return HloInstructionAdaptor{ *ResolveOperand(instruction_->operand(index), *parent_), parent_}; } absl::InlinedVector<HloInstructionAdaptor, 2> HloInstructionAdaptor::GetUsers() const { absl::InlinedVector<HloInstructionAdaptor, 2> users; auto add_user = [&](const HloInstruction* instr) { users.emplace_back(*instr, parent_); }; if (instruction_->IsRoot()) { if (auto* fusion = instruction_->parent()->FusionInstruction()) { for (auto* user : fusion->users()) { ResolveUsers(fusion, user, *parent_, add_user); } } } for (auto* user : instruction_->users()) { ResolveUsers(instruction_, user, *parent_, add_user); } return users; } bool operator==(const HloInstructionAdaptor& lhs, const HloInstructionAdaptor& rhs) { return lhs.instruction_->GetModule() == rhs.instruction_->GetModule() && lhs.instruction_->unique_id() == rhs.instruction_->unique_id(); } bool operator!=(const HloInstructionAdaptor& lhs, const HloInstructionAdaptor& rhs) { return !(lhs == rhs); } namespace { void HloBfsTraversal( absl::Span<const HloInstructionAdaptor> roots, const HloFusionAdaptor& fusion, const std::function<TraversalResult(HloInstructionAdaptor node)>& visit_node, bool visit_operands) { absl::flat_hash_set<HloInstructionAdaptor> visited; std::queue<HloInstructionAdaptor> q; auto enqueue = [&](const HloInstructionAdaptor& node) { const auto& adjacent_nodes = visit_operands ? node.GetOperands() : node.GetUsers(); for (const auto& node : adjacent_nodes) { if (fusion.ContainsInstruction(node) && visited.insert(node).second) { q.push(node); } } }; for (auto root : roots) { if (visited.insert(root).second) { q.push(root); } } while (!q.empty()) { HloInstructionAdaptor node = q.front(); q.pop(); switch (visit_node(node)) { case TraversalResult::kAdvance: enqueue(node); break; case TraversalResult::kInterrupt: return; case TraversalResult::kSkip: break; } } } } void HloBfsConsumersFirstTraversal( absl::Span<const HloInstructionAdaptor> roots, const HloFusionAdaptor& fusion, const std::function<TraversalResult(HloInstructionAdaptor node)>& visit_node) { HloBfsTraversal(roots, fusion, visit_node, true); } void HloBfsProducersFirstTraversal( absl::Span<const HloInstructionAdaptor> producers, const HloFusionAdaptor& fusion, const std::function<TraversalResult(HloInstructionAdaptor node)>& visit_node) { HloBfsTraversal(producers, fusion, visit_node, false); } bool HloBfsAnyOf(absl::Span<const HloInstructionAdaptor> roots, const HloFusionAdaptor& fusion, const std::function<bool(HloInstructionAdaptor node)>& visit, bool visit_operands) { return HloBfsFindIf(roots, fusion, visit, visit_operands).has_value(); } bool HloBfsAnyOf(absl::Span<const HloInstruction* const> roots, const std::function<bool(const HloInstruction* node)>& visit, bool visit_operands) { return HloBfsFindIf(roots, visit, visit_operands).has_value(); } std::optional<HloInstructionAdaptor> HloBfsFindIf( absl::Span<const HloInstructionAdaptor> roots, const HloFusionAdaptor& fusion, const std::function<bool(HloInstructionAdaptor node)>& visit, bool visit_operands) { std::optional<HloInstructionAdaptor> result = std::nullopt; HloBfsTraversal( roots, fusion, [&](HloInstructionAdaptor node) { if (visit(node)) { result = node; return TraversalResult::kInterrupt; } return TraversalResult::kAdvance; }, visit_operands); return result; } std::vector<const HloInstruction*> HloFindAllImpl( absl::Span<const HloInstruction* const> roots, const std::function<bool(const HloInstruction* node)>& visit, bool visit_operands, bool find_first_only = false) { std::vector<const HloInstruction*> result; absl::flat_hash_set<const HloInstruction*> visited; std::queue<const HloInstruction*> q; auto enqueue = [&](const HloInstruction* node) { if (visit_operands) { for (const HloInstruction* operand : node->operands()) { if (visited.insert(operand).second) { q.push(operand); } } } else { for (const HloInstruction* operand : node->users()) { if (visited.insert(operand).second) { q.push(operand); } } } }; for (auto root : roots) { if (visited.insert(root).second) { q.push(root); } } while (!q.empty()) { const HloInstruction* node = q.front(); q.pop(); if (visit(node)) { result.push_back(node); if (find_first_only) { return result; } } enqueue(node); } return result; } std::optional<const HloInstruction*> HloBfsFindIf( absl::Span<const HloInstruction* const> roots, const std::function<bool(const HloInstruction* node)>& visit, bool visit_operands) { auto result = HloFindAllImpl(roots, visit, visit_operands, true); if (result.empty()) { return std::nullopt; } return result[0]; } std::vector<const HloInstruction*> HloBfsFindAll( absl::Span<const HloInstruction* const> roots, const std::function<bool(const HloInstruction* node)>& visit, bool visit_operands) { std::vector<const HloInstruction*> result; return HloFindAllImpl(roots, visit, visit_operands); } std::vector<HloInstructionAdaptor> HloFindUseChain(HloInstructionAdaptor parent, HloInstructionAdaptor root) { absl::flat_hash_set<HloInstructionAdaptor> visited; std::vector<HloInstructionAdaptor> result; std::function<bool(HloInstructionAdaptor)> visit; visit = [&](HloInstructionAdaptor node) { if (node == root) return true; for (const auto& user : node.GetUsers()) { if (visited.insert(user).second && visit(user)) { result.push_back(user); return true; } } return false; }; if (visit(parent)) { result.push_back(parent); std::reverse(result.begin(), result.end()); } else { result.clear(); } return result; } } }
#include "xla/service/gpu/hlo_traversal.h" #include <optional> #include <string> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/algorithm/container.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/tests/hlo_test_base.h" namespace xla { namespace gpu { namespace { namespace m = ::xla::match; using ::testing::ElementsAre; using ::testing::IsEmpty; MATCHER_P(InstructionAdaptorName, name, "") { return arg.name() == name; } class HloTraversalTest : public HloTestBase {}; const char kTestModule[] = R"( HloModule test scalar_add_computation { scalar_lhs.0 = f32[] parameter(0) scalar_rhs.0 = f32[] parameter(1) ROOT add.0 = f32[] add(scalar_lhs.0, scalar_rhs.0) } fused_computation { p0.1 = f32[] parameter(0) p1.1 = f32[128] parameter(1) mul = f32[128] multiply(p1.1, p1.1) ROOT reduce.1 = f32[] reduce(mul, p0.1), dimensions={0}, to_apply=scalar_add_computation } fused_computation_1 { p0.2 = f32[] parameter(0) zero = f32[] constant(0.0) is_positive = pred[] compare(p0.2, zero), direction=GE not = pred[] not(is_positive) ROOT tuple = (pred[], pred[]) tuple(is_positive, not) } ENTRY entry { p0 = f32[] parameter(0) p1 = f32[128] parameter(1) sum = f32[128] add(p1, p1) log = f32[128] log(sum) negate = f32[128] negate(log) fusion = f32[] fusion(p0, negate), kind=kLoop, calls=fused_computation fusion2 = (pred[], pred[]) fusion(fusion), kind=kLoop, calls=fused_computation_1 gte = pred[] get-tuple-element(fusion2), index=0 ROOT select = f32[] select(gte, fusion, p0) })"; TEST_F(HloTraversalTest, AdaptorOperands) { auto module = ParseAndReturnVerifiedModule(kTestModule).value(); auto fusion_adaptor = HloFusionAdaptor::ForProducerConsumer( module->entry_computation()->GetInstructionWithName("fusion2"), module->entry_computation()->GetInstructionWithName("select")); HloInstructionAdaptor instr = fusion_adaptor->GetRoots()[0]; EXPECT_THAT(instr.GetOperands(), ElementsAre(InstructionAdaptorName("is_positive"), InstructionAdaptorName("fusion"), InstructionAdaptorName("p0"))); } TEST_F(HloTraversalTest, AdaptorUsers) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test fused_computation { p0 = f32[] parameter(0) neg = f32[] negate(p0) add = f32[] add(p0, neg) ROOT t = (f32[], f32[]) tuple(neg, add) } fused_computation_1 { p0.0 = f32[] parameter(0) mul = f32[] multiply(p0.0, p0.0) ROOT neg.1 = f32[] negate(mul) } ENTRY entry { p0 = f32[] parameter(0) fusion = (f32[], f32[]) fusion(p0), kind=kLoop, calls=fused_computation gte = f32[] get-tuple-element(fusion), index=0 add.1 = f32[] add(p0, gte) fusion2 = f32[] fusion(gte), kind=kLoop, calls=fused_computation_1 exp.1 = f32[] exponential(fusion2) ROOT res = (f32[], (f32[], f32[]), f32[], f32[]) tuple(add.1, fusion, fusion2, exp.1) } )") .value(); auto fusion_adaptor1 = HloFusionAdaptor::ForProducerConsumer( module->entry_computation()->GetInstructionWithName("fusion"), module->entry_computation()->GetInstructionWithName("fusion2")); HloInstructionAdaptor add{*module->GetComputationWithName("fused_computation") ->GetInstructionWithName("add"), fusion_adaptor1.get()}; EXPECT_THAT(add.GetUsers(), ElementsAre(InstructionAdaptorName("mul"), InstructionAdaptorName("res"))); auto fusion_adaptor2 = HloFusionAdaptor::ForInstruction( module->entry_computation()->GetInstructionWithName("fusion2")); HloInstructionAdaptor mul{ *module->GetComputationWithName("fused_computation_1") ->GetInstructionWithName("mul"), fusion_adaptor2.get()}; EXPECT_THAT(mul.GetUsers(), ElementsAre(InstructionAdaptorName("neg.1"))); HloInstructionAdaptor neg{ *module->GetComputationWithName("fused_computation_1") ->GetInstructionWithName("neg.1"), fusion_adaptor2.get()}; EXPECT_TRUE(neg.GetUsers().empty()); } TEST_F(HloTraversalTest, TraverseFusionConsumerFirst) { auto module = ParseAndReturnVerifiedModule(kTestModule).value(); std::vector<std::string> visited_nodes; auto fusion = HloFusionAdaptor::ForInstruction( module->entry_computation()->GetInstructionWithName("fusion")); HloBfsConsumersFirstTraversal(fusion->GetRoots(), *fusion, [&](HloInstructionAdaptor node) { visited_nodes.emplace_back(node.name()); return TraversalResult::kAdvance; }); EXPECT_THAT(visited_nodes, ElementsAre("reduce.1", "mul")); } TEST_F(HloTraversalTest, TraverseFusionConsumerFirstFromFusionRootAndInnerNode) { auto module = ParseAndReturnVerifiedModule(kTestModule).value(); std::vector<std::string> visited_nodes; auto fusion = HloFusionAdaptor::ForInstruction( module->entry_computation()->GetInstructionWithName("fusion")); auto root = fusion->GetRoots()[0]; HloBfsConsumersFirstTraversal({root, root.GetOperand(0)}, *fusion, [&](HloInstructionAdaptor node) { visited_nodes.emplace_back(node.name()); return TraversalResult::kAdvance; }); EXPECT_THAT(visited_nodes, ElementsAre("reduce.1", "mul")); } TEST_F(HloTraversalTest, TraverseFusionProducerFirst) { auto module = ParseAndReturnVerifiedModule(kTestModule).value(); std::vector<std::string> visited_nodes; auto fusion = HloFusionAdaptor::ForInstruction( module->entry_computation()->GetInstructionWithName("fusion")); auto root = fusion->GetRoots()[0]; HloBfsProducersFirstTraversal({root.GetOperand(0)}, *fusion, [&](HloInstructionAdaptor node) { visited_nodes.emplace_back(node.name()); return TraversalResult::kAdvance; }); EXPECT_THAT(visited_nodes, ElementsAre("mul", "reduce.1")); } TEST_F(HloTraversalTest, AbortTraversal) { auto module = ParseAndReturnVerifiedModule(kTestModule).value(); auto fusion = HloFusionAdaptor::ForInstruction( module->entry_computation()->GetInstructionWithName("fusion")); std::vector<std::string> visited_nodes; HloBfsConsumersFirstTraversal(fusion->GetRoots(), *fusion, [&](HloInstructionAdaptor node) { visited_nodes.emplace_back(node.name()); return node.opcode() == HloOpcode::kReduce ? TraversalResult::kAdvance : TraversalResult::kInterrupt; }); EXPECT_THAT(visited_nodes, ElementsAre("reduce.1", "mul")); } TEST_F(HloTraversalTest, FindArguments) { auto module = ParseAndReturnVerifiedModule(kTestModule).value(); auto fusion = HloFusionAdaptor::ForInstruction( module->entry_computation()->GetInstructionWithName("fusion")); std::vector<std::string> producers; absl::c_for_each(fusion->GetParameters(), [&](const HloInstruction* producer) { producers.emplace_back(producer->name()); }); EXPECT_THAT(producers, ElementsAre("p0", "negate")); } TEST_F(HloTraversalTest, FindArgumentsAfterFusion) { auto module = ParseAndReturnVerifiedModule(kTestModule).value(); auto fusion = HloFusionAdaptor::ForProducerConsumer( module->entry_computation()->GetInstructionWithName("negate"), module->entry_computation()->GetInstructionWithName("fusion")); std::vector<std::string> producers; absl::c_for_each(fusion->GetParameters(), [&](const HloInstruction* producer) { producers.emplace_back(producer->name()); }); EXPECT_THAT(producers, ElementsAre("p0", "log")); } TEST_F(HloTraversalTest, HloBfsFindIf_Found) { auto module = ParseAndReturnVerifiedModule(kTestModule).value(); auto fusion = HloFusionAdaptor::ForInstruction( module->entry_computation()->GetInstructionWithName("fusion")); auto result = HloBfsFindIf(fusion->GetRoots(), *fusion, [&](HloInstructionAdaptor node) { return node.opcode() == HloOpcode::kMultiply; }); ASSERT_NE(result, std::nullopt); ASSERT_EQ(result->name(), "mul"); } TEST_F(HloTraversalTest, HloBfsFindIf_NotFound) { auto module = ParseAndReturnVerifiedModule(kTestModule).value(); auto fusion = HloFusionAdaptor::ForInstruction( module->entry_computation()->GetInstructionWithName("fusion")); auto result = HloBfsFindIf(fusion->GetRoots(), *fusion, [&](HloInstructionAdaptor node) { return false; }); ASSERT_EQ(result, std::nullopt); } TEST_F(HloTraversalTest, HloAnyOf_Found) { auto module = ParseAndReturnVerifiedModule(kTestModule).value(); auto fusion = HloFusionAdaptor::ForInstruction( module->entry_computation()->GetInstructionWithName("fusion")); EXPECT_TRUE(HloAnyOf(*fusion, [&](HloInstructionAdaptor node) { return node.opcode() == HloOpcode::kMultiply; })); } TEST_F(HloTraversalTest, HloAnyOf_NotFound) { auto module = ParseAndReturnVerifiedModule(kTestModule).value(); auto fusion = HloFusionAdaptor::ForInstruction( module->entry_computation()->GetInstructionWithName("fusion")); EXPECT_FALSE( HloAnyOf(*fusion, [&](HloInstructionAdaptor node) { return false; })); } TEST_F(HloTraversalTest, FindAllMultiple) { const char kConverts[] = R"( HloModule test ENTRY entry { p0 = s8[128] parameter(0) p1 = pred[128] parameter(1) p1c = s8[128] convert(p1) p1c1 = f16[128] convert(p1c) p0c = f16[128] convert(p0) ROOT diff = f16[128] subtract(p0c, p1c1) })"; auto module = ParseAndReturnVerifiedModule(kConverts).value(); auto root = module->entry_computation()->GetInstructionWithName("diff"); std::vector<const HloInstruction*> converts = HloBfsFindAll({root}, [&](const HloInstruction* node) { return node->opcode() == HloOpcode::kConvert; }); auto get = [&](absl::string_view name) { return module->entry_computation()->GetInstructionWithName(name); }; EXPECT_THAT(converts, ElementsAre(get("p0c"), get("p1c1"), get("p1c"))); } TEST_F(HloTraversalTest, FindAllNotFound) { const char kConverts[] = R"( HloModule test ENTRY entry { p0 = s8[128] parameter(0) p1 = f16[128] parameter(1) p0c = f16[128] convert(p0) ROOT diff = f16[128] subtract(p0c, p1) })"; auto module = ParseAndReturnVerifiedModule(kConverts).value(); auto root = module->entry_computation()->GetInstructionWithName("diff"); std::vector<const HloInstruction*> converts = HloBfsFindAll({root}, [&](const HloInstruction* node) { return node->opcode() == HloOpcode::kAdd; }); EXPECT_THAT(converts, IsEmpty()); } const char kTwoFusions[] = R"( HloModule test scalar_add_computation { scalar_lhs.0 = f32[] parameter(0) scalar_rhs.0 = f32[] parameter(1) ROOT add.0 = f32[] add(scalar_lhs.0, scalar_rhs.0) } fused_computation_1 { p0.1 = f32[] parameter(0) p1.1 = f32[128] parameter(1) mul = f32[128] multiply(p1.1, p1.1) ROOT reduce.1 = f32[] reduce(mul, p0.1), dimensions={0}, to_apply=scalar_add_computation } fused_computation_2 { p0.2 = f32[] parameter(0) p1.2 = f32[128] parameter(1) ROOT reduce.2 = f32[] reduce(p1.2, p0.2), dimensions={0}, to_apply=scalar_add_computation } ENTRY entry { p0 = f32[] parameter(0) p1 = f32[128] parameter(1) sum = f32[128] add(p1, p1) negate = f32[128] negate(sum) fusion.1 = f32[] fusion(p0, negate), kind=kLoop, calls=fused_computation_1 fusion.2 = f32[] fusion(fusion.1, negate), kind=kLoop, calls=fused_computation_2 ROOT difference = f32[] subtract(fusion.2, p0) })"; TEST_F(HloTraversalTest, FuseFusionConsumer) { auto module = ParseAndReturnVerifiedModule(kTwoFusions).value(); auto producer = module->entry_computation()->GetInstructionWithName("negate"); auto consumer = module->entry_computation()->GetInstructionWithName("fusion.1"); auto fusion = HloFusionAdaptor::ForProducerConsumer(producer, consumer); HloInstructionAdaptor reduce_1( *module->GetComputationWithName("fused_computation_1") ->GetInstructionWithName("reduce.1"), fusion.get()); EXPECT_TRUE(reduce_1.GetUsers().empty()); std::vector<std::string> nodes; HloBfsConsumersFirstTraversal(fusion->GetRoots(), *fusion, [&](HloInstructionAdaptor node) { nodes.emplace_back(node.name()); return TraversalResult::kAdvance; }); EXPECT_THAT(nodes, ElementsAre("reduce.1", "mul", "negate")); } TEST_F(HloTraversalTest, FuseFusionProducer) { auto module = ParseAndReturnVerifiedModule(kTwoFusions).value(); auto producer = module->entry_computation()->GetInstructionWithName("fusion.2"); auto consumer = module->entry_computation()->GetInstructionWithName("difference"); auto fusion = HloFusionAdaptor::ForProducerConsumer(producer, consumer); HloInstructionAdaptor reduce_2( *module->GetComputationWithName("fused_computation_2") ->GetInstructionWithName("reduce.2"), fusion.get()); EXPECT_THAT(reduce_2.GetOperands(), ElementsAre(InstructionAdaptorName("negate"), InstructionAdaptorName("fusion.1"))); std::vector<std::string> nodes; HloBfsConsumersFirstTraversal(fusion->GetRoots(), *fusion, [&](HloInstructionAdaptor node) { nodes.emplace_back(node.name()); return TraversalResult::kAdvance; }); EXPECT_THAT(nodes, ElementsAre("difference", "reduce.2")); } TEST_F(HloTraversalTest, FuseFusionConsumerAndProducer) { auto module = ParseAndReturnVerifiedModule(kTwoFusions).value(); auto producer = module->entry_computation()->GetInstructionWithName("fusion.1"); auto consumer = module->entry_computation()->GetInstructionWithName("fusion.2"); auto fusion = HloFusionAdaptor::ForProducerConsumer(producer, consumer); std::vector<std::string> nodes; HloBfsConsumersFirstTraversal(fusion->GetRoots(), *fusion, [&](HloInstructionAdaptor node) { nodes.emplace_back(node.name()); return TraversalResult::kAdvance; }); std::vector<std::string> params; absl::c_for_each(fusion->GetParameters(), [&](const HloInstruction* param) { params.emplace_back(param->name()); }); EXPECT_THAT(nodes, ElementsAre("reduce.2", "reduce.1", "mul")); EXPECT_THAT(params, ElementsAre("negate", "p0")); } TEST_F(HloTraversalTest, FuseNonFusionConsumerAndProducer) { auto module = ParseAndReturnVerifiedModule(kTestModule).value(); auto producer = module->entry_computation()->GetInstructionWithName("log"); auto consumer = module->entry_computation()->GetInstructionWithName("negate"); auto fusion = HloFusionAdaptor::ForProducerConsumer(producer, consumer); std::vector<std::string> nodes; HloBfsConsumersFirstTraversal(fusion->GetRoots(), *fusion, [&](HloInstructionAdaptor node) { nodes.emplace_back(node.name()); return TraversalResult::kAdvance; }); EXPECT_THAT(nodes, ElementsAre("negate", "log")); } TEST_F(HloTraversalTest, SingleInstructionFusionOfFusion) { auto module = ParseAndReturnVerifiedModule(kTwoFusions).value(); auto fusion = HloFusionAdaptor::ForInstruction( module->entry_computation()->GetInstructionWithName("fusion.1")); std::vector<std::string> nodes; HloBfsConsumersFirstTraversal(fusion->GetRoots(), *fusion, [&](HloInstructionAdaptor node) { nodes.emplace_back(node.name()); return TraversalResult::kAdvance; }); EXPECT_THAT(nodes, ElementsAre("reduce.1", "mul")); } TEST_F(HloTraversalTest, SingleInstructionFusionOfInstruction) { auto module = ParseAndReturnVerifiedModule(kTwoFusions).value(); auto fusion = HloFusionAdaptor::ForInstruction( module->entry_computation()->GetInstructionWithName("negate")); std::vector<std::string> nodes; HloBfsConsumersFirstTraversal(fusion->GetRoots(), *fusion, [&](HloInstructionAdaptor node) { nodes.emplace_back(node.name()); return TraversalResult::kAdvance; }); EXPECT_THAT(nodes, ElementsAre("negate")); } TEST_F(HloTraversalTest, MultiOutputFusionDuplicateRoot) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test fused_computation { p0.1 = f32[128] parameter(0) p1.1 = f32[128] parameter(1) mul = f32[128] multiply(p0.1, p1.1) ROOT res = (f32[128], f32[128]) tuple(mul, mul) } ENTRY entry { p0 = f32[128] parameter(0) p1 = f32[128] parameter(1) ROOT fusion = (f32[128], f32[128]) fusion(p0, p1), kind=kLoop, calls=fused_computation })") .value(); auto fusion = HloFusionAdaptor::ForInstruction( module->entry_computation()->GetInstructionWithName("fusion")); EXPECT_THAT(fusion->GetRoots(), ElementsAre(InstructionAdaptorName("mul"), InstructionAdaptorName("mul"))); } TEST_F(HloTraversalTest, MakeInstructionsPostOrder_SingleInstruction) { auto module = ParseAndReturnVerifiedModule(kTwoFusions).value(); auto fusion = HloFusionAdaptor::ForInstruction( module->entry_computation()->GetInstructionWithName("negate")); auto nodes = fusion->MakeInstructionPostOrder(); EXPECT_THAT(nodes, ElementsAre(InstructionAdaptorName("negate"))); } TEST_F(HloTraversalTest, MakeInstructionsPostOrder_TwoFusions) { auto module = ParseAndReturnVerifiedModule(kTwoFusions).value(); auto fusion = HloFusionAdaptor::ForProducerConsumer( module->entry_computation()->GetInstructionWithName("fusion.1"), module->entry_computation()->GetInstructionWithName("fusion.2")); auto nodes = fusion->MakeInstructionPostOrder(); EXPECT_THAT(nodes, ElementsAre(InstructionAdaptorName("mul"), InstructionAdaptorName("reduce.1"), InstructionAdaptorName("reduce.2"))); } TEST_F(HloTraversalTest, MakeInstructionsPostOrder_TwoMultiOutputFusions) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test scalar_add_computation { scalar_lhs.0 = f32[] parameter(0) scalar_rhs.0 = f32[] parameter(1) ROOT add.0 = f32[] add(scalar_lhs.0, scalar_rhs.0) } fused_computation_1 { p0.1 = f32[] parameter(0) p1.1 = f32[128] parameter(1) mul = f32[128] multiply(p1.1, p1.1) reduce.1 = f32[] reduce(mul, p0.1), dimensions={0}, to_apply=scalar_add_computation ROOT t = (f32[128], f32[]) tuple(mul, reduce.1) } fused_computation_2 { p0.2 = f32[] parameter(0) p1.2 = f32[128] parameter(1) neg = f32[128] negate(p1.2) reduce.2 = f32[] reduce(neg, p0.2), dimensions={0}, to_apply=scalar_add_computation ROOT t2 = (f32[], f32[128]) tuple(reduce.2, neg) } ENTRY entry { p0 = f32[] parameter(0) p1 = f32[128] parameter(1) sum = f32[128] add(p1, p1) negate = f32[128] negate(sum) fusion.1 = (f32[128], f32[]) fusion(p0, negate), kind=kLoop, calls=fused_computation_1 gte1 = f32[128] get-tuple-element(fusion.1), index=0 gte2 = f32[] get-tuple-element(fusion.1), index=1 fusion.2 = (f32[], f32[128]) fusion(p0, gte1), kind=kLoop, calls=fused_computation_2 gte3 = f32[] get-tuple-element(fusion.2), index=0 gte4 = f32[128] get-tuple-element(fusion.2), index=1 difference = f32[] subtract(gte3, p0) ROOT res = (f32[], f32[128]) tuple(difference, gte4) })") .value(); auto fusion = HloFusionAdaptor::ForProducerConsumer( module->entry_computation()->GetInstructionWithName("fusion.1"), module->entry_computation()->GetInstructionWithName("fusion.2")); auto nodes = fusion->MakeInstructionPostOrder(); EXPECT_THAT(nodes, ElementsAre(InstructionAdaptorName("mul"), InstructionAdaptorName("reduce.1"), InstructionAdaptorName("neg"), InstructionAdaptorName("reduce.2"))); } const char kTwoMultiOutputFusions[] = R"( HloModule mof mof_producer { param0 = f32[10]{0} parameter(0) param1 = f32[10]{0} parameter(1) param2 = f32[10]{0} parameter(2) add = f32[10]{0} add(param0, param1) sub = f32[10]{0} subtract(param0, param1) ROOT res = (f32[10]{0}, f32[10]{0}, f32[10]{0}, f32[10]{0}, f32[10]{0}) tuple(param1, add, sub, param0, param2) } mof_consumer { param0.0 = f32[10]{0} parameter(0) param1.0 = f32[10]{0} parameter(1) param2.0 = f32[10]{0} parameter(2) mul = f32[10]{0} multiply(param0.0, param1.0) div = f32[10]{0} divide(param0.0, param1.0) ROOT res = (f32[10]{0}, f32[10]{0}, f32[10]{0}) tuple(mul, div, param2.0) } ENTRY main { p0 = f32[10]{0} parameter(0) p1 = f32[10]{0} parameter(1) p2 = f32[10]{0} parameter(2) producer = (f32[10]{0}, f32[10]{0}, f32[10]{0}, f32[10]{0}, f32[10]{0}) fusion(p0, p1, p2), kind=kLoop, calls=mof_producer gte0 = f32[10]{0} get-tuple-element(producer), index=0 gte1 = f32[10]{0} get-tuple-element(producer), index=1 gte2 = f32[10]{0} get-tuple-element(producer), index=2 gte3 = f32[10]{0} get-tuple-element(producer), index=3 gte4 = f32[10]{0} get-tuple-element(producer), index=4 consumer = (f32[10]{0}, f32[10]{0}, f32[10]{0}) fusion(gte1, gte2, gte3), kind=kLoop, calls=mof_consumer gte5 = f32[10]{0} get-tuple-element(consumer), index=0 gte6 = f32[10]{0} get-tuple-element(consumer), index=1 gte7 = f32[10]{0} get-tuple-element(consumer), index=2 ROOT res = tuple(gte0, gte1, gte3, gte4, gte5, gte6, gte7) })"; TEST_F(HloTraversalTest, GetParametersMultiOutputFusion) { auto module = ParseAndReturnVerifiedModule(kTwoMultiOutputFusions).value(); auto producer = module->entry_computation()->GetInstructionWithName("producer"); auto consumer = module->entry_computation()->GetInstructionWithName("consumer"); auto fusion_adaptor = HloFusionAdaptor::ForProducerConsumer(producer, consumer); auto p0 = module->entry_computation()->GetInstructionWithName("p0"); auto p1 = module->entry_computation()->GetInstructionWithName("p1"); EXPECT_THAT(fusion_adaptor->GetParameters(), ElementsAre(p0, p1)); consumer->MergeFusionInstructionIntoMultiOutput(producer); EXPECT_THAT(consumer->operands(), ElementsAre(p0, p1)); } TEST_F(HloTraversalTest, GetRootsMultiOutputFusion) { auto module = ParseAndReturnVerifiedModule(kTwoMultiOutputFusions).value(); auto consumer_fusion_instr = module->entry_computation()->GetInstructionWithName("consumer"); auto producer_fusion_instr = module->entry_computation()->GetInstructionWithName("producer"); auto fusion_adaptor = HloFusionAdaptor::ForProducerConsumer( producer_fusion_instr, consumer_fusion_instr); auto producer_computation = module->GetComputationWithName("mof_producer"); auto producer = HloFusionAdaptor::ForComputation(producer_computation); auto consumer_computation = module->GetComputationWithName("mof_consumer"); auto consumer = HloFusionAdaptor::ForComputation(consumer_computation); EXPECT_THAT(fusion_adaptor->GetRoots(), ElementsAre( HloInstructionAdaptor{ *consumer_computation->GetInstructionWithName("mul"), consumer.get()}, HloInstructionAdaptor{ *consumer_computation->GetInstructionWithName("div"), consumer.get()}, HloInstructionAdaptor{ *producer_computation->GetInstructionWithName("param0"), producer.get()}, HloInstructionAdaptor{ *producer_computation->GetInstructionWithName("add"), producer.get()})); consumer_fusion_instr->MergeFusionInstructionIntoMultiOutput( producer_fusion_instr); EXPECT_THAT(consumer_fusion_instr->fused_expression_root(), GmockMatch(m::Tuple( m::Multiply(m::Add(m::Parameter(0), m::Parameter(1)), m::Subtract(m::Parameter(0), m::Parameter(1))), m::Divide(m::Add(m::Parameter(0), m::Parameter(1)), m::Subtract(m::Parameter(0), m::Parameter(1))), m::Parameter(0), m::Add(m::Parameter(0), m::Parameter(1))))); } TEST_F(HloTraversalTest, HloFindUseChain) { auto module = ParseAndReturnVerifiedModule(R"( fusion { p0 = f32[] parameter(0) p1 = f32[] parameter(1) negate = f32[] negate(p0) log = f32[] log(p0) sum = f32[] add(p0, log) exp = f32[] exponential(p1) ROOT call = f32[] custom-call(negate, exp, sum), custom_call_target="it" } ENTRY main { p0 = f32[] parameter(0) p1 = f32[] parameter(1) ROOT fusion = f32[] fusion(p0, p1), kind=kLoop, calls=fusion } )") .value(); auto* fusion_computation = module->GetComputationWithName("fusion"); auto fusion = HloFusionAdaptor::ForComputation(fusion_computation); auto get = [&](absl::string_view name) { return HloInstructionAdaptor{ *fusion_computation->GetInstructionWithName(name), fusion.get()}; }; auto p0 = get("p0"); auto p1 = get("p1"); auto log = get("log"); auto sum = get("sum"); auto negate = get("negate"); auto exp = get("exp"); auto call = get("call"); EXPECT_THAT(HloFindUseChain(p0, p0), ElementsAre(p0)); EXPECT_THAT(HloFindUseChain(p0, p1), IsEmpty()); EXPECT_THAT(HloFindUseChain(p0, call), ElementsAre(p0, negate, call)); EXPECT_THAT(HloFindUseChain(p0, sum), ElementsAre(p0, log, sum)); EXPECT_THAT(HloFindUseChain(p1, exp), ElementsAre(p1, exp)); EXPECT_THAT(HloFindUseChain(negate, exp), IsEmpty()); EXPECT_THAT(HloFindUseChain(call, p0), IsEmpty()); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/hlo_traversal.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/hlo_traversal_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
a08ec787-9d7c-4b53-a2d3-06716be6e35f
cpp
tensorflow/tensorflow
cutlass_gemm_fusion
third_party/xla/xla/service/gpu/kernels/cutlass_gemm_fusion.cc
third_party/xla/xla/service/gpu/kernels/cutlass_gemm_fusion_test.cc
#include "xla/service/gpu/kernels/cutlass_gemm_fusion.h" #include <algorithm> #include <array> #include <cstddef> #include <cstdint> #include <optional> #include <string> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_set.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/types/span.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/kernels/custom_kernel.h" #include "xla/service/gpu/kernels/custom_kernel_fusion.h" #include "xla/service/gpu/kernels/custom_kernel_fusion_pattern.h" #include "xla/service/gpu/kernels/cutlass_gemm.h" #include "xla/service/gpu/kernels/cutlass_gemm_custom_kernel.h" #include "xla/service/pattern_matcher.h" #include "xla/shape.h" #include "xla/stream_executor/device_description.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla::gpu { namespace { struct RootWithWorkspace { HloInstruction* root; HloInstruction* workspace; }; static RootWithWorkspace MatchRootWithWorkspace(HloInstruction* root) { RootWithWorkspace result; if (Match(root, match::Tuple(match::Op(&result.root), match::CustomCall( &result.workspace, {CustomKernelFusionPattern::kWorkspace})))) { return result; } return {root, nullptr}; } struct GemmWithUpcast { explicit GemmWithUpcast(HloDotInstruction* dot) : dot(dot) {} HloInstruction* dot; HloInstruction* lhs_upcast = nullptr; HloInstruction* rhs_upcast = nullptr; }; struct GemmWithDynamicSlice { explicit GemmWithDynamicSlice(HloDynamicUpdateSliceInstruction* update_slice) : update_slice(update_slice) {} std::vector<HloInstruction*> Instrs() { if (bitcast == nullptr) { return {dot, update_slice}; } return {dot, bitcast, update_slice}; } HloInstruction* dot = nullptr; HloInstruction* bitcast = nullptr; HloInstruction* update_slice = nullptr; }; absl::Status MatchRowMajorGemm(HloDotInstruction* dot) { if (dot->operand(0)->shape().dimensions_size() != 2 || dot->operand(1)->shape().dimensions_size() != 2) { return absl::InternalError("operands must have rank 2"); } if (dot->shape().layout().minor_to_major().back() != 0) { return absl::InternalError("The dot result must have row major layout."); } auto& dot_dims = dot->dot_dimension_numbers(); if (dot_dims.lhs_contracting_dimensions().size() != 1) { return absl::InternalError("Lhs contracting dimensions must be of size 1."); } if (dot_dims.rhs_contracting_dimensions().size() != 1) { return absl::InternalError("Rhs contracting dimensions must be of size 1."); } if (dot->operand(0)->shape().layout().minor_to_major(0) != dot_dims.lhs_contracting_dimensions()[0]) { return absl::InternalError( "Lhs contracting dimension should be along the minor axis (elements " "that are stored contigous in memory)."); } if (dot->operand(1)->shape().layout().minor_to_major(1) != dot_dims.rhs_contracting_dimensions()[0]) { return absl::InternalError( "Rhs contracting dimension should be along the major axis (elements " "that are NOT stored contigous in memory)."); } return absl::OkStatus(); } } static absl::Status MatchSimpleGemm( HloDotInstruction* dot, absl::Span<const PrimitiveType> support_dtypes) { TF_RETURN_IF_ERROR(MatchRowMajorGemm(dot)); for (PrimitiveType dtype : support_dtypes) { if (dot->operand(0)->shape().element_type() == dtype && dot->operand(1)->shape().element_type() == dtype && dot->shape().element_type() == dtype) { return absl::OkStatus(); } } return absl::InternalError("unsupported operands type"); } static absl::StatusOr<GemmWithUpcast> MatchGemmWithUpcast( HloDotInstruction* dot) { TF_RETURN_IF_ERROR(MatchRowMajorGemm(dot)); GemmWithUpcast match(dot); if (Match(const_cast<HloInstruction*>(dot->operand(0)), match::Convert(&match.lhs_upcast, match::Op())) && Match(const_cast<HloInstruction*>(dot->operand(1)), match::Convert(&match.rhs_upcast, match::Op()))) { return match; } if (Match(const_cast<HloInstruction*>(dot->operand(0)), match::Convert(&match.lhs_upcast, match::Op()))) { return match; } if (Match(const_cast<HloInstruction*>(dot->operand(1)), match::Convert(&match.rhs_upcast, match::Op()))) { return match; } return absl::InternalError("unsupported gemm with upcasing"); } template <typename Pattern> auto OptionalBitcast(HloInstruction** optional_bitcast, Pattern pattern) { return match::AnyOf<HloInstruction>(match::Bitcast(optional_bitcast, pattern), std::move(pattern)); } static absl::StatusOr<GemmWithDynamicSlice> MatchGemmWithDynamicUpdateSlice( HloDynamicUpdateSliceInstruction* update_slice) { GemmWithDynamicSlice match(update_slice); if (!Match(const_cast<HloInstruction*>(update_slice->update()), OptionalBitcast(&match.bitcast, match::Dot(&match.dot, match::Op(), match::Op())))) { return absl::InternalError("failed to match update slice instr"); } TF_RETURN_IF_ERROR(MatchRowMajorGemm(Cast<HloDotInstruction>(match.dot))); return match; } static bool AreInstructionsOnTheSameStream( absl::Span<const HloInstruction* const> instructions) { absl::flat_hash_set<int64_t> stream_set; for (const HloInstruction* inst : instructions) { auto gpu_config = inst->backend_config<GpuBackendConfig>(); if (!gpu_config.ok()) { continue; } stream_set.insert(gpu_config->operation_queue_id()); if (stream_set.size() > 1) { return false; } } return true; }; std::optional<CustomKernelFusionPattern::Match> CutlassGemmPattern::TryMatch( const se::DeviceDescription& device, HloInstruction* instr) const { auto* dot = DynCast<HloDotInstruction>(instr); if (!dot) return std::nullopt; auto matched = MatchSimpleGemm(dot, {PrimitiveType::F32}); if (!matched.ok()) return std::nullopt; CustomFusionConfig config; config.set_name("cutlass_gemm"); return Match{config, {instr}}; } std::optional<CustomKernelFusionPattern::Match> CutlassGemmWithDynamicUpdateSlicePattern::TryMatch( const se::DeviceDescription& device, HloInstruction* instr) const { auto* update_slice = DynCast<HloDynamicUpdateSliceInstruction>(instr); if (!update_slice) return std::nullopt; auto matched = MatchGemmWithDynamicUpdateSlice(update_slice); if (!matched.ok() || !AreInstructionsOnTheSameStream(matched->Instrs())) return std::nullopt; CustomFusionConfig config; config.set_name("cutlass_gemm_with_dynamic_update_slice"); Match match(config, matched->Instrs()); match.AddReplacement(matched->dot, [=](HloFusionInstruction* fusion) { HloComputation* parent = fusion->parent(); auto* dus = Cast<HloDynamicUpdateSliceInstruction>(matched->update_slice); bool has_bitcast = matched->bitcast != nullptr; const Shape dus_shape = has_bitcast ? matched->bitcast->shape() : matched->dot->shape(); auto* slice = parent->AddInstruction(HloInstruction::CreateDynamicSlice( dus_shape, fusion, dus->index_operands(), dus_shape.dimensions())); return parent->AddInstruction( HloInstruction::CreateBitcast(matched->dot->shape(), slice)); }); return match; } namespace { bool IsSupportedKernel(PrimitiveType lhs, PrimitiveType rhs, PrimitiveType dot) { constexpr std::array<std::array<PrimitiveType, 3>, 4> kSupportedKernels = { {{BF16, BF16, F32}, {F32, BF16, F32}, {BF16, S8, F32}}}; return absl::c_linear_search(kSupportedKernels, std::array<PrimitiveType, 3>{lhs, rhs, dot}); } } std::optional<CustomKernelFusionPattern::Match> CutlassGemmWithUpcastPattern::TryMatch(const se::DeviceDescription& device, HloInstruction* instr) const { auto* dot = DynCast<HloDotInstruction>(instr); if (!dot) return std::nullopt; absl::StatusOr<GemmWithUpcast> matched = MatchGemmWithUpcast(dot); if (!matched.ok()) { VLOG(3) << "No match due to unsupported gemm with upcast: " << matched.status(); return std::nullopt; } CustomFusionConfig config; config.set_name("cutlass_gemm_with_upcast"); HloInstruction* lhs = matched->lhs_upcast; HloInstruction* rhs = matched->rhs_upcast; PrimitiveType dot_type = dot->shape().element_type(); PrimitiveType lhs_type = lhs != nullptr ? lhs->operand(0)->shape().element_type() : dot->operand(0)->shape().element_type(); PrimitiveType rhs_type = rhs != nullptr ? rhs->operand(0)->shape().element_type() : dot->operand(1)->shape().element_type(); if (!IsSupportedKernel(lhs_type, rhs_type, dot_type)) { VLOG(3) << "No match due to unsupported kernel input types: " << PrimitiveType_Name(lhs_type) << "x" << PrimitiveType_Name(rhs_type) << "To" << PrimitiveType_Name(dot_type); return std::nullopt; } if (lhs != nullptr && rhs == nullptr) { return Match{config, {matched->lhs_upcast, instr}}; } else if (lhs == nullptr && rhs != nullptr) { return Match{config, {matched->rhs_upcast, instr}}; } else { return Match{config, {matched->lhs_upcast, matched->rhs_upcast, instr}}; } } class CutlassGemmFusion : public CustomKernelFusion { public: absl::StatusOr<std::vector<CustomKernel>> LoadKernels( const se::DeviceDescription& device, const HloComputation* computation) const final { auto* dot = DynCast<HloDotInstruction>(computation->root_instruction()); if (dot == nullptr) { return absl::InternalError( "cutlass_gemm requires ROOT operation to be a dot"); } TF_RETURN_IF_ERROR(MatchSimpleGemm(dot, {PrimitiveType::F32})); PrimitiveType dot_type = dot->shape().element_type(); auto* lhs = Cast<HloParameterInstruction>(dot->operand(0)); auto* rhs = Cast<HloParameterInstruction>(dot->operand(1)); kernel::gemm_universal::ArgsIndices indices = { lhs->parameter_number(), rhs->parameter_number(), computation->num_parameters()}; const Shape& lhs_shape = lhs->shape(); const Shape& rhs_shape = rhs->shape(); size_t m = lhs_shape.dimensions(0); size_t k = lhs_shape.dimensions(1); size_t n = rhs_shape.dimensions(1); PrimitiveType lhs_type = lhs->shape().element_type(); PrimitiveType rhs_type = rhs->shape().element_type(); return GetCutlassGemmKernels("cutlass_gemm", dot_type, lhs_type, rhs_type, m, n, k, indices, {}, device); } }; class CutlassGemmWithUpcastFusion : public CustomKernelFusion { public: absl::StatusOr<std::vector<CustomKernel>> LoadKernels( const se::DeviceDescription& device, const HloComputation* computation) const final { auto* dot = DynCast<HloDotInstruction>(computation->root_instruction()); if (dot == nullptr) { return absl::InternalError( "cutlass_gemm_with_upcast requires ROOT operation to be a dot"); } TF_ASSIGN_OR_RETURN(GemmWithUpcast matched, MatchGemmWithUpcast(dot)); const HloParameterInstruction* lhs; const HloParameterInstruction* rhs; if (matched.lhs_upcast == nullptr && matched.rhs_upcast != nullptr) { lhs = Cast<HloParameterInstruction>(matched.dot->operand(0)); rhs = Cast<HloParameterInstruction>(matched.rhs_upcast->operand(0)); } else if (matched.lhs_upcast != nullptr && matched.rhs_upcast == nullptr) { lhs = Cast<HloParameterInstruction>(matched.lhs_upcast->operand(0)); rhs = Cast<HloParameterInstruction>(matched.dot->operand(1)); } else { lhs = Cast<HloParameterInstruction>(matched.lhs_upcast->operand(0)); rhs = Cast<HloParameterInstruction>(matched.rhs_upcast->operand(0)); } const Shape& lhs_shape = lhs->shape(); const Shape& rhs_shape = rhs->shape(); size_t m = lhs_shape.dimensions(0); size_t k = lhs_shape.dimensions(1); size_t n = rhs_shape.dimensions(1); PrimitiveType dot_type = dot->shape().element_type(); PrimitiveType lhs_type = lhs_shape.element_type(); PrimitiveType rhs_type = rhs_shape.element_type(); kernel::gemm_universal::ArgsIndices args_indices = { lhs->parameter_number(), rhs->parameter_number(), computation->num_parameters()}; return GetCutlassGemmKernels("cutlass_gemm_with_upcast", dot_type, lhs_type, rhs_type, m, n, k, args_indices, {}, device); } }; class CutlassGemmWithDynamicUpdateSliceFusion : public CustomKernelFusion { public: absl::StatusOr<std::vector<CustomKernel>> LoadKernels( const se::DeviceDescription& device, const HloComputation* computation) const final { auto [root, workspace] = MatchRootWithWorkspace(computation->root_instruction()); auto* dus = DynCast<HloDynamicUpdateSliceInstruction>(root); if (dus == nullptr) { return absl::InternalError( "cutlass_gemm_with_dynamic_update_slice requires ROOT operation to " "be a dynamic update slice"); } TF_ASSIGN_OR_RETURN(auto matched, MatchGemmWithDynamicUpdateSlice(dus)); TF_RETURN_IF_ERROR( MatchSimpleGemm(Cast<HloDotInstruction>(matched.dot), {PrimitiveType::F32, PrimitiveType::BF16})); auto dot_type = matched.dot->shape().element_type(); auto* lhs = Cast<HloParameterInstruction>(matched.dot->operand(0)); auto* rhs = Cast<HloParameterInstruction>(matched.dot->operand(1)); auto* out = Cast<HloParameterInstruction>(matched.update_slice->operand(0)); kernel::gemm_universal::ArgsIndices args_indices = { lhs->parameter_number(), rhs->parameter_number(), out->parameter_number(), workspace != nullptr}; auto* offset = Cast<HloParameterInstruction>(matched.update_slice->operand(2)); kernel::gemm_universal::DynamicSliceIndices slices; slices.out = offset->parameter_number(); const Shape& lhs_shape = lhs->shape(); const Shape& rhs_shape = rhs->shape(); size_t m = lhs_shape.dimensions(0); size_t k = lhs_shape.dimensions(1); size_t n = rhs_shape.dimensions(1); PrimitiveType lhs_type = lhs->shape().element_type(); PrimitiveType rhs_type = rhs->shape().element_type(); return GetCutlassGemmKernels("cutlass_gemm_with_dynamic_update_slice", dot_type, lhs_type, rhs_type, m, n, k, args_indices, slices, device); } }; } XLA_REGISTER_CUSTOM_FUSION_PATTERN(::xla::gpu::CutlassGemmWithUpcastPattern); XLA_REGISTER_CUSTOM_FUSION_PATTERN( ::xla::gpu::CutlassGemmWithDynamicUpdateSlicePattern); XLA_REGISTER_CUSTOM_FUSION("cutlass_gemm", ::xla::gpu::CutlassGemmFusion); XLA_REGISTER_CUSTOM_FUSION("cutlass_gemm_with_upcast", ::xla::gpu::CutlassGemmWithUpcastFusion); XLA_REGISTER_CUSTOM_FUSION("cutlass_gemm_with_dynamic_update_slice", ::xla::gpu::CutlassGemmWithDynamicUpdateSliceFusion);
#include "xla/service/gpu/kernels/cutlass_gemm_fusion.h" #include <cstdint> #include <memory> #include <optional> #include <string> #include <utility> #include <vector> #include <gtest/gtest.h> #include "absl/log/log.h" #include "absl/status/statusor.h" #include "xla/array.h" #include "xla/array2d.h" #include "xla/array3d.h" #include "xla/error_spec.h" #include "xla/literal_util.h" #include "xla/service/gpu/gpu_device_info_for_tests.h" #include "xla/service/gpu/kernels/custom_kernel_fusion_pattern.h" #include "xla/service/gpu/kernels/cutlass_gemm_custom_kernel.h" #include "xla/service/gpu/transforms/custom_kernel_fusion_rewriter.h" #include "xla/tests/hlo_test_base.h" #include "xla/tests/verified_hlo_module.h" #include "xla/types.h" #include "xla/xla_data.pb.h" #include "tsl/platform/test.h" namespace xla::gpu { class CutlassFusionTest : public HloTestBase { public: int GpuSharedMemorySize() { return backend() .default_stream_executor() ->GetDeviceDescription() .shared_memory_per_block_optin(); } int CutlassGemmKernelSharedMemorySize(PrimitiveType dot_type, PrimitiveType lhs_type, PrimitiveType rhs_type, int m, int n, int k) { return kernel::gemm_universal::GetCutlassGemmKernels( "cutlass_gemm", dot_type, lhs_type, rhs_type, m, n, k, {0, 1, 2}, {}, backend().default_stream_executor()->GetDeviceDescription()) ->at(0) .shared_memory_bytes(); }; }; TEST_F(CutlassFusionTest, RowMajorGemm) { const char* hlo = R"( HloModule test ENTRY %main (p0: f32[15,19], p1: f32[19,17]) -> f32[15,17] { %p0 = f32[15,19]{1,0} parameter(0) %p1 = f32[19,17]{1,0} parameter(1) ROOT %r = f32[15,17]{1,0} dot(%p0, %p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; const char* expected = R"( ; CHECK: %cutlass_gemm {{.*}} { ; CHECK: [[P0:%[^ ]+]] = f32[15,19]{1,0} parameter(0) ; CHECK: [[P1:%[^ ]+]] = f32[19,17]{1,0} parameter(1) ; CHECK: ROOT [[DOT:%[^ ]+]] = f32[15,17]{1,0} dot([[P0]], [[P1]]), ; CHECK: lhs_contracting_dims={1}, rhs_contracting_dims={0} ; CHECK: } ; CHECK: ENTRY %main {{.*}} { ; CHECK: ROOT [[FUSION:%[^ ]+]] = f32[15,17]{1,0} fusion ; CHECK: kind=kCustom, calls=%cutlass_gemm, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"cutlass_gemm","kernel_index":0} ; CHECK: } ; CHECK: } )"; CustomKernelFusionPatternRegistry patterns; patterns.Emplace<CutlassGemmPattern>(); auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); CustomKernelFusionRewriter pass(&device, 0, &patterns); RunAndFilecheckHloRewrite(hlo, std::move(pass), expected); } TEST_F(CutlassFusionTest, RowMajorGemmWithUpcast) { const char* hlo = R"( HloModule test ENTRY %main (p0: bf16[15,19], p1: f32[19,17]) -> f32[15,17] { %p0 = bf16[15,19]{1,0} parameter(0) %p1 = bf16[19,17]{1,0} parameter(1) %c1 = f32[19,17]{1,0} convert(%p1) ROOT %r = f32[15,17]{1,0} dot(%p0, %c1), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; const char* expected = R"( ; CHECK: %cutlass_gemm_with_upcast {{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = bf16[15,19]{1,0} parameter ; CHECK-DAG: [[P1:%[^ ]+]] = bf16[19,17]{1,0} parameter ; CHECK: [[C1:%[^ ]+]] = f32[19,17]{1,0} convert([[P1]]) ; CHECK: ROOT [[DOT:%[^ ]+]] = f32[15,17]{1,0} dot([[P0]], [[C1]]), ; CHECK: lhs_contracting_dims={1}, rhs_contracting_dims={0} ; CHECK: } ; CHECK: ENTRY %main {{.*}} { ; CHECK: ROOT [[FUSION:%[^ ]+]] = f32[15,17]{1,0} fusion ; CHECK: kind=kCustom, calls=%cutlass_gemm_with_upcast, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"cutlass_gemm_with_upcast","kernel_index":0} ; CHECK: } ; CHECK: } )"; CustomKernelFusionPatternRegistry patterns; patterns.Emplace<CutlassGemmWithUpcastPattern>(); auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); CustomKernelFusionRewriter pass(&device, 0, &patterns); RunAndFilecheckHloRewrite(hlo, std::move(pass), expected); } TEST_F(CutlassFusionTest, RowMajorGemmWithUpcastOfBothOperands) { const char* hlo = R"( HloModule test ENTRY %main (p0: bf16[15,19], p1: bf16[19,17]) -> f32[15,17] { %p0 = bf16[15,19]{1,0} parameter(0) %c1 = f32[15,19]{1,0} convert(%p0) %p1 = bf16[19,17]{1,0} parameter(1) %c2 = f32[19,17]{1,0} convert(%p1) ROOT %r = f32[15,17]{1,0} dot(%c1, %c2), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; const char* expected = R"( ; CHECK: %cutlass_gemm_with_upcast {{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = bf16[15,19]{1,0} parameter ; CHECK: [[C1:%[^ ]+]] = f32[15,19]{1,0} convert([[P0]]) ; CHECK-DAG: [[P1:%[^ ]+]] = bf16[19,17]{1,0} parameter ; CHECK: [[C2:%[^ ]+]] = f32[19,17]{1,0} convert([[P1]]) ; CHECK: ROOT [[DOT:%[^ ]+]] = f32[15,17]{1,0} dot([[C1]], [[C2]]), ; CHECK: lhs_contracting_dims={1}, rhs_contracting_dims={0} ; CHECK: } ; CHECK: ENTRY %main {{.*}} { ; CHECK: ROOT [[FUSION:%[^ ]+]] = f32[15,17]{1,0} fusion ; CHECK: kind=kCustom, calls=%cutlass_gemm_with_upcast, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"cutlass_gemm_with_upcast","kernel_index":0} ; CHECK: } ; CHECK: } )"; CustomKernelFusionPatternRegistry patterns; patterns.Emplace<CutlassGemmWithUpcastPattern>(); auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); CustomKernelFusionRewriter pass(&device, 0, &patterns); RunAndFilecheckHloRewrite(hlo, std::move(pass), expected); } TEST_F(CutlassFusionTest, DoNotPatternMatchNotImplementedKernelTypes) { const char* hlo = R"( HloModule test ENTRY %main (p0: bf16[15,19], p1: bf16[19,17]) -> f32[15,17] { %p0 = s8[15,19]{1,0} parameter(0) %c1 = f32[15,19]{1,0} convert(%p0) %p1 = s8[19,17]{1,0} parameter(1) %c2 = f32[19,17]{1,0} convert(%p1) ROOT %r = f32[15,17]{1,0} dot(%c1, %c2), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; CustomKernelFusionPatternRegistry patterns; patterns.Emplace<CutlassGemmWithUpcastPattern>(); absl::StatusOr<std::unique_ptr<VerifiedHloModule>> hlo_module = ParseAndReturnVerifiedModule(hlo); auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); CustomKernelFusionRewriter pass(&device, 0, &patterns); ASSERT_FALSE(pass.Run(hlo_module.value().get()).value()); } TEST_F(CutlassFusionTest, RowMajorGemmWithDynamicUpdateSlice) { const char* hlo = R"( HloModule test ENTRY %main (p0: f32[2,2,2], p1: f32[2,2], i: s32[]) -> f32[2,2,2] { %p0 = f32[2,2,2]{2,1,0} parameter(0) %p1 = f32[2,2]{1,0} parameter(1) %i = s32[] parameter(2) %dot = f32[2,2]{1,0} dot(%p1, %p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} %bc = f32[1,2,2]{2,1,0} bitcast(%dot) ROOT %r = f32[2,2,2]{2,1,0} dynamic-update-slice(%p0, %bc, %i, %i, %i) } )"; const char* expected = R"( ; CHECK: %cutlass_gemm_with_dynamic_update_slice {{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f32[2,2]{1,0} parameter ; CHECK-DAG: [[P1:%[^ ]+]] = f32[2,2,2]{2,1,0} parameter ; CHECK-DAG: [[P2:%[^ ]+]] = s32[] parameter ; CHECK-DAG: [[DOT:%[^ ]+]] = f32[2,2]{1,0} dot([[P0]], [[P0]]) ; CHECK-DAG: [[CAST:%[^ ]+]] = f32[1,2,2]{2,1,0} bitcast([[DOT]]) ; CHECK: ROOT [[DUS:%[^ ]+]] = f32[2,2,2]{2,1,0} dynamic-update-slice( ; CHECK: [[P1]], [[CAST]], [[P2]], [[P2]], [[P2]] ; CHECK: ) ; CHECK: } ; CHECK: ENTRY %main {{.*}} { ; CHECK: ROOT [[FUSION:%[^ ]+]] = f32[2,2,2]{2,1,0} fusion ; CHECK: kind=kCustom, calls=%cutlass_gemm_with_dynamic_update_slice, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{ ; CHECK: "name":"cutlass_gemm_with_dynamic_update_slice","kernel_index":0 ; CHECK: } ; CHECK: } ; CHECK: } )"; CustomKernelFusionPatternRegistry patterns; patterns.Emplace<CutlassGemmWithDynamicUpdateSlicePattern>(); auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); CustomKernelFusionRewriter pass(&device, 0, &patterns); RunAndFilecheckHloRewrite(hlo, std::move(pass), expected); } TEST_F(CutlassFusionTest, RowMajorGemmWithDynamicUpdateSliceMultipleUses) { const char* hlo = R"( HloModule test ENTRY %main { %p0 = f32[2,2,2]{2,1,0} parameter(0) %p1 = f32[2,2]{1,0} parameter(1) %i = s32[] parameter(2) %dot = f32[2,2]{1,0} dot(%p1, %p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} %add = f32[2,2]{1,0} add(%dot, %dot) %cast = f32[1,2,2]{2,1,0} bitcast(%dot) %dus = f32[2,2,2]{2,1,0} dynamic-update-slice(%p0, %cast, %i, %i, %i) ROOT %r = (f32[2,2]{1,0}, f32[2,2,2]{2,1,0}) tuple(%add, %dus) } )"; const char* expected = R"( ; CHECK: %cutlass_gemm_with_dynamic_update_slice {{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f32[2,2]{1,0} parameter ; CHECK-DAG: [[P1:%[^ ]+]] = f32[2,2,2]{2,1,0} parameter ; CHECK-DAG: [[P2:%[^ ]+]] = s32[] parameter ; CHECK-DAG: [[DOT:%[^ ]+]] = f32[2,2]{1,0} dot([[P0]], [[P0]]) ; CHECK-DAG: [[CAST:%[^ ]+]] = f32[1,2,2]{2,1,0} bitcast([[DOT]]) ; CHECK: ROOT [[DUS:%[^ ]+]] = f32[2,2,2]{2,1,0} dynamic-update-slice( ; CHECK: [[P1]], [[CAST]], [[P2]], [[P2]], [[P2]] ; CHECK: ) ; CHECK: } ; CHECK: ENTRY %main {{.*}} { ; CHECK: [[OFFSET:%[^ ]+]] = s32[] parameter(2) ; CHECK: [[FUSION:%[^ ]+]] = f32[2,2,2]{2,1,0} fusion ; CHECK: kind=kCustom, calls=%cutlass_gemm_with_dynamic_update_slice, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{ ; CHECK: "name":"cutlass_gemm_with_dynamic_update_slice","kernel_index":0 ; CHECK: } ; CHECK: } ; CHECK: [[SLICE:%[^ ]+]] = f32[1,2,2]{2,1,0} dynamic-slice( ; CHECK: [[FUSION]], [[OFFSET]], [[OFFSET]], [[OFFSET]]), ; CHECK: dynamic_slice_sizes={1,2,2} ; CHECK: [[CAST:%[^. ]+]] = f32[2,2]{1,0} bitcast([[SLICE]]) ; CHECK: [[ADD:%[^. ]+]] = f32[2,2]{1,0} add([[CAST]], [[CAST]]) ; CHECK: } )"; CustomKernelFusionPatternRegistry patterns; patterns.Emplace<CutlassGemmWithDynamicUpdateSlicePattern>(); auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); CustomKernelFusionRewriter pass(&device, 0, &patterns); RunAndFilecheckHloRewrite(hlo, std::move(pass), expected); } TEST_F(CutlassFusionTest, RowMajorGemmWithDynamicUpdateSliceWithoutBitcast) { const char* hlo = R"( HloModule test ENTRY %main (p0: f32[4,2], p1: f32[2,2], i: s32[]) -> f32[4,2] { %p0 = f32[4,2]{1,0} parameter(0) %p1 = f32[2,2]{1,0} parameter(1) %i = s32[] parameter(2) %dot = f32[2,2]{1,0} dot(%p1, %p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT %r = f32[4,2]{1,0} dynamic-update-slice(%p0, %dot, %i, %i) } )"; const char* expected = R"( ; CHECK: %cutlass_gemm_with_dynamic_update_slice {{.*}} { ; CHECK-DAG: [[P1:%[^ ]+]] = f32[4,2]{1,0} parameter ; CHECK-DAG: [[P0:%[^ ]+]] = f32[2,2]{1,0} parameter ; CHECK-DAG: [[DOT:%[^ ]+]] = f32[2,2]{1,0} dot([[P0]], [[P0]]) ; CHECK-DAG: [[P2:%[^ ]+]] = s32[] parameter ; CHECK: ROOT [[DUS:%[^ ]+]] = f32[4,2]{1,0} dynamic-update-slice([[P1]], [[DOT]], [[P2]], [[P2]]) ; CHECK: } ; CHECK: ENTRY %main {{.*}} { ; CHECK: ROOT [[FUSION:%[^ ]+]] = f32[4,2]{1,0} fusion ; CHECK: kind=kCustom, calls=%cutlass_gemm_with_dynamic_update_slice, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{ ; CHECK: "name":"cutlass_gemm_with_dynamic_update_slice","kernel_index":0 ; CHECK: } ; CHECK: } ; CHECK: } )"; CustomKernelFusionPatternRegistry patterns; patterns.Emplace<CutlassGemmWithDynamicUpdateSlicePattern>(); auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); CustomKernelFusionRewriter pass(&device, 0, &patterns); RunAndFilecheckHloRewrite(hlo, std::move(pass), expected); } TEST_F(CutlassFusionTest, RowMajorGemmKernel) { ErrorSpec error_spec{1e-3, 1e-3}; const char* hlo_text_cublas = R"( HloModule cublas ENTRY e { arg0 = f32[100,784]{1,0} parameter(0) arg1 = f32[784,10]{1,0} parameter(1) gemm = (f32[100,10]{1,0}, s8[0]{0}) custom-call(arg0, arg1), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{"alpha_real":1,"beta":0,"dot_dimension_numbers":{"lhs_contracting_dimensions":[1],"rhs_contracting_dimensions":[0],"lhs_batch_dimensions":[],"rhs_batch_dimensions":[]},"alpha_imag":0,"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},"epilogue":"DEFAULT"}} ROOT get-tuple-element = f32[100,10]{1,0} get-tuple-element((f32[100,10]{1,0}, s8[0]{0}) gemm), index=0 })"; const char* hlo_text_custom_fusion = R"( HloModule cutlass cutlass_gemm { arg0 = f32[100,784]{1,0} parameter(0) arg1 = f32[784,10]{1,0} parameter(1) ROOT dot = f32[100,10]{1,0} dot(arg0, arg1), lhs_contracting_dims={1}, rhs_contracting_dims={0} } ENTRY e { arg0 = f32[100,784]{1,0} parameter(0) arg1 = f32[784,10]{1,0} parameter(1) ROOT _ = f32[100,10]{1,0} fusion(arg0, arg1), kind=kCustom, calls=cutlass_gemm, backend_config={"fusion_backend_config":{kind: "__custom_fusion", custom_fusion_config: {"name":"cutlass_gemm", "kernel_index":0}}} })"; EXPECT_TRUE(RunAndCompareTwoModules(hlo_text_cublas, hlo_text_custom_fusion, error_spec, false)); } TEST_F(CutlassFusionTest, GemmWithRightHandSideUpcastKernel) { ErrorSpec error_spec{1e-3, 1e-3}; const char* hlo_text_cublas = R"( HloModule cublas ENTRY e { p0 = f32[16,32]{1,0} parameter(0) p1 = bf16[32,8]{1,0} parameter(1) c1 = f32[32,8]{1,0} convert(p1) gemm = (f32[16,8]{1,0}, s8[0]{0}) custom-call(p0, c1), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{"alpha_real":1,"beta":0,"dot_dimension_numbers":{"lhs_contracting_dimensions":[1],"rhs_contracting_dimensions":[0],"lhs_batch_dimensions":[],"rhs_batch_dimensions":[]},"alpha_imag":0,"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},"epilogue":"DEFAULT"}} ROOT get-tuple-element = f32[16,8]{1,0} get-tuple-element(gemm), index=0 })"; const char* hlo_text_custom_fusion = R"( HloModule cutlass cutlass_gemm_with_upcast { p0 = f32[16,32]{1,0} parameter(0) p1 = bf16[32,8]{1,0} parameter(1) c1 = f32[32,8]{1,0} convert(p1) ROOT dot = f32[16,8]{1,0} dot(p0, c1), lhs_contracting_dims={1}, rhs_contracting_dims={0} } ENTRY e { p0 = f32[16,32]{1,0} parameter(0) p1 = bf16[32,8]{1,0} parameter(1) ROOT _ = f32[16,8]{1,0} fusion(p0, p1), kind=kCustom, calls=cutlass_gemm_with_upcast, backend_config={"fusion_backend_config":{kind: "__custom_fusion", custom_fusion_config: {"name":"cutlass_gemm_with_upcast", "kernel_index":0}}} })"; EXPECT_TRUE(RunAndCompareTwoModules(hlo_text_cublas, hlo_text_custom_fusion, error_spec, false)); } TEST_F(CutlassFusionTest, GemmWithLeftHandAndRightHandSideUpcastKernel) { ErrorSpec error_spec{1e-3, 1e-3}; const char* hlo_text_cublas = R"( HloModule cublas ENTRY e { p0 = bf16[16,32]{1,0} parameter(0) c0 = f32[16,32]{1,0} convert(p0) p1 = s8[32,8]{1,0} parameter(1) c1 = f32[32,8]{1,0} convert(p1) gemm = (f32[16,8]{1,0}, s8[0]{0}) custom-call(c0, c1), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{"alpha_real":1,"beta":0,"dot_dimension_numbers":{"lhs_contracting_dimensions":[1],"rhs_contracting_dimensions":[0],"lhs_batch_dimensions":[],"rhs_batch_dimensions":[]},"alpha_imag":0,"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},"epilogue":"DEFAULT"}} ROOT get-tuple-element = f32[16,8]{1,0} get-tuple-element(gemm), index=0 })"; const char* hlo_text_custom_fusion = R"( HloModule cutlass cutlass_gemm_with_upcast { p0 = bf16[16,32]{1,0} parameter(0) c0 = f32[16,32]{1,0} convert(p0) p1 = s8[32,8]{1,0} parameter(1) c1 = f32[32,8]{1,0} convert(p1) ROOT dot = f32[16,8]{1,0} dot(c0, c1), lhs_contracting_dims={1}, rhs_contracting_dims={0} } ENTRY e { p0 = bf16[16,32]{1,0} parameter(0) p1 = s8[32,8]{1,0} parameter(1) ROOT _ = f32[16,8]{1,0} fusion(p0, p1), kind=kCustom, calls=cutlass_gemm_with_upcast, backend_config={"fusion_backend_config":{kind: "__custom_fusion", custom_fusion_config: {"name":"cutlass_gemm_with_upcast", "kernel_index":0}}} })"; EXPECT_TRUE(RunAndCompareTwoModules(hlo_text_cublas, hlo_text_custom_fusion, error_spec, false)); } TEST_F(CutlassFusionTest, RowMajorGemmWithDynamicUpdateSliceKernel) { if (GpuSharedMemorySize() < CutlassGemmKernelSharedMemorySize(BF16, BF16, BF16, 8, 8, 8)) { GTEST_SKIP_("The GPU does not have sufficient shared memory"); } ErrorSpec error_spec{1e-3, 1e-3}; const char* hlo_text_cublas = R"( HloModule cublas ENTRY e { p0 = bf16[2,8,8]{2,1,0} parameter(0) p1 = bf16[8,8]{1,0} parameter(1) p2 = s32[] parameter(2) p3 = s32[] parameter(3) gemm.tuple = (bf16[8,8]{1,0}, s8[0]{0}) custom-call(p1, p1), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{"alpha_real":1,"beta":0,"dot_dimension_numbers":{"lhs_contracting_dimensions":[1],"rhs_contracting_dimensions":[0],"lhs_batch_dimensions":[],"rhs_batch_dimensions":[]},"alpha_imag":0,"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},"epilogue":"DEFAULT"}} gemm = bf16[8,8]{1,0} get-tuple-element(gemm.tuple), index=0 cast = bf16[1,8,8]{2,1,0} bitcast(gemm) ROOT r = bf16[2,8,8]{2,1,0} dynamic-update-slice(p0, cast, p2, p3, p3) })"; const char* hlo_text_custom_fusion = R"( HloModule cutlass cutlass_gemm { p0.1 = bf16[8,8]{1,0} parameter(0) p1.1 = bf16[2,8,8]{2,1,0} parameter(1) p2 = s32[] parameter(2) p3 = s32[] parameter(3) dot.1 = bf16[8,8]{1,0} dot(p0.1, p0.1), lhs_contracting_dims={1}, rhs_contracting_dims={0} bc.1 = bf16[1,8,8]{2,1,0} bitcast(dot.1) r.1 = bf16[2,8,8]{2,1,0} dynamic-update-slice(p1.1, bc.1, p2, p3, p3) workspace = u8[1024]{0} custom-call(), custom_call_target="__custom_kernel_fusion$workspace", api_version=API_VERSION_TYPED_FFI ROOT tuple = (bf16[2,8,8]{2,1,0}, u8[1024]{0}) tuple(r.1, workspace) } ENTRY e { p0 = bf16[2,8,8]{2,1,0} parameter(0) p1 = bf16[8,8]{1,0} parameter(1) p2 = s32[] parameter(2) p3 = s32[] parameter(3) r.0 = (bf16[2,8,8]{2,1,0}, u8[1024]{0}) fusion(p1, p0, p2, p3), kind=kCustom, calls=%cutlass_gemm, backend_config={"fusion_backend_config":{"kind":"__custom_fusion","custom_fusion_config":{"name":"cutlass_gemm_with_dynamic_update_slice", "kernel_index":0}}} ROOT %get-tuple-element = bf16[2,8,8]{2,1,0} get-tuple-element(r.0), index=0 })"; Array3D<bfloat16> p0_arr(2, 8, 8); Array2D<bfloat16> p1_arr(8, 8); p1_arr.Each([](int64_t i, int64_t j, bfloat16* out) { *out = bfloat16{1.0f * i * j}; }); Array<int32_t> p2_arr({}, 1); Array<int32_t> p3_arr({}, 0); auto p0 = LiteralUtil::CreateFromArray(p0_arr); auto p1 = LiteralUtil::CreateFromArray(p1_arr); auto p2 = LiteralUtil::CreateFromArray(p2_arr); auto p3 = LiteralUtil::CreateFromArray(p3_arr); EXPECT_TRUE(RunAndCompareTwoModules(hlo_text_cublas, hlo_text_custom_fusion, {&p0, &p1, &p2, &p3}, error_spec, false)); } TEST_F(CutlassFusionTest, RowMajorGemmWithDynamicUpdateSliceKernelWithoutBitcast) { if (GpuSharedMemorySize() < CutlassGemmKernelSharedMemorySize(BF16, BF16, BF16, 8, 8, 8)) { GTEST_SKIP_("The GPU does not have sufficient shared memory"); } ErrorSpec error_spec{1e-3, 1e-3}; const char* hlo_text_cublas = R"( HloModule cublas ENTRY e { p0 = bf16[16,8]{1,0} parameter(0) p1 = bf16[8,8]{1,0} parameter(1) p2 = s32[] parameter(2) p3 = s32[] parameter(3) gemm.tuple = (bf16[8,8]{1,0}, s8[0]{0}) custom-call(p1, p1), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{"alpha_real":1,"beta":0,"dot_dimension_numbers":{"lhs_contracting_dimensions":[1],"rhs_contracting_dimensions":[0],"lhs_batch_dimensions":[],"rhs_batch_dimensions":[]},"alpha_imag":0,"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},"epilogue":"DEFAULT"}} gemm = bf16[8,8]{1,0} get-tuple-element(gemm.tuple), index=0 ROOT r = bf16[16,8]{1,0} dynamic-update-slice(p0, gemm, p2, p3) } )"; const char* hlo_text_custom_fusion = R"( HloModule cutlass cutlass_gemm { p0.1 = bf16[8,8]{1,0} parameter(0) p1.1 = bf16[16,8]{1,0} parameter(1) p2 = s32[] parameter(2) p3 = s32[] parameter(3) dot.1 = bf16[8,8]{1,0} dot(p0.1, p0.1), lhs_contracting_dims={1}, rhs_contracting_dims={0} r.1 = bf16[16,8]{1,0} dynamic-update-slice(p1.1, dot.1, p2, p3) workspace = u8[1024]{0} custom-call(), custom_call_target="__custom_kernel_fusion$workspace", api_version=API_VERSION_TYPED_FFI ROOT tuple = (bf16[16,8]{1,0}, u8[1024]{0}) tuple(r.1, workspace) } ENTRY e { p0 = bf16[16,8]{1,0} parameter(0) p1 = bf16[8,8]{1,0} parameter(1) p2 = s32[] parameter(2) p3 = s32[] parameter(3) r.0 = (bf16[16,8]{1,0}, u8[1024]{0}) fusion(p1, p0, p2, p3), kind=kCustom, calls=%cutlass_gemm, backend_config={"fusion_backend_config":{"kind":"__custom_fusion","custom_fusion_config":{"name":"cutlass_gemm_with_dynamic_update_slice", "kernel_index":0}}} ROOT %get-tuple-element = bf16[16,8]{1,0} get-tuple-element(r.0), index=0 })"; Array2D<bfloat16> p0_arr(16, 8); Array2D<bfloat16> p1_arr(8, 8); p1_arr.Each([](int64_t i, int64_t j, bfloat16* out) { *out = bfloat16{1.0f * i * j}; }); Array<int32_t> p2_arr({}, 0); Array<int32_t> p3_arr({}, 1); auto p0 = LiteralUtil::CreateFromArray(p0_arr); auto p1 = LiteralUtil::CreateFromArray(p1_arr); auto p2 = LiteralUtil::CreateFromArray(p2_arr); auto p3 = LiteralUtil::CreateFromArray(p3_arr); EXPECT_TRUE(RunAndCompareTwoModules(hlo_text_cublas, hlo_text_custom_fusion, {&p0, &p1, &p2, &p3}, error_spec, false)); } TEST_F(CutlassFusionTest, GemmWithUpcastShouldBeFused) { const char* hlo = R"( ENTRY e { p0 = f32[16,32]{1,0} parameter(0) p1 = bf16[32,8]{1,0} parameter(1) c1 = f32[32,8]{1,0} convert(p1) ROOT dot = f32[16,8]{1,0} dot(p0, c1), lhs_contracting_dims={1}, rhs_contracting_dims={0} })"; std::string expected = "CHECK: cutlass_gemm"; CustomKernelFusionPatternRegistry patterns; patterns.Emplace<CutlassGemmWithUpcastPattern>(); auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); CustomKernelFusionRewriter pass(&device, 0, &patterns); RunAndFilecheckHloRewrite(hlo, std::move(pass), expected); EXPECT_TRUE(RunAndCompare(hlo, ErrorSpec{1e-3, 1e-3})); } TEST_F(CutlassFusionTest, GemmWithUpcastWithALhsColumnMajorOperandShouldNotBeFused) { const char* hlo = R"( ENTRY e { p0 = f32[16,32]{0,1} parameter(0) p1 = bf16[32,8]{1,0} parameter(1) c1 = f32[32,8]{1,0} convert(p1) ROOT dot = f32[16,8]{1,0} dot(p0, c1), lhs_contracting_dims={1}, rhs_contracting_dims={0} })"; CustomKernelFusionPatternRegistry patterns; patterns.Emplace<CutlassGemmWithUpcastPattern>(); auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); CustomKernelFusionRewriter pass(&device, 0, &patterns); RunAndFilecheckHloRewrite(hlo, std::move(pass), std::nullopt); } TEST_F(CutlassFusionTest, GemmWithUpcastWithARhsColumnMajorOperandShouldNotBeFused) { const char* hlo = R"( ENTRY e { p0 = f32[16,32]{1,0} parameter(0) p1 = bf16[32,8]{0,1} parameter(1) c1 = f32[32,8]{0,1} convert(p1) ROOT dot = f32[16,8]{1,0} dot(p0, c1), lhs_contracting_dims={1}, rhs_contracting_dims={0} })"; CustomKernelFusionPatternRegistry patterns; patterns.Emplace<CutlassGemmWithUpcastPattern>(); auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); CustomKernelFusionRewriter pass(&device, 0, &patterns); RunAndFilecheckHloRewrite(hlo, std::move(pass), std::nullopt); } TEST_F(CutlassFusionTest, GemmWithUpcastWithAColumnMajorDotResultShouldNotBeFused) { const char* hlo = R"( ENTRY e { p0 = f32[16,32]{1,0} parameter(0) p1 = bf16[32,8]{1,0} parameter(1) c1 = f32[32,8]{1,0} convert(p1) ROOT dot = f32[16,8]{0,1} dot(p0, c1), lhs_contracting_dims={1}, rhs_contracting_dims={0} })"; CustomKernelFusionPatternRegistry patterns; patterns.Emplace<CutlassGemmWithUpcastPattern>(); auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); CustomKernelFusionRewriter pass(&device, 0, &patterns); RunAndFilecheckHloRewrite(hlo, std::move(pass), std::nullopt); } TEST_F(CutlassFusionTest, GemmWithUpcastLhsContractingDimensionShouldBeOnTheMinorAxis) { const char* hlo = R"( ENTRY e { p0 = f32[32,16]{1,0} parameter(0) p1 = bf16[32,8]{1,0} parameter(1) c1 = f32[32,8]{1,0} convert(p1) ROOT dot = f32[16,8]{1,0} dot(p0, c1), lhs_contracting_dims={0}, rhs_contracting_dims={0} })"; CustomKernelFusionPatternRegistry patterns; patterns.Emplace<CutlassGemmWithUpcastPattern>(); auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); CustomKernelFusionRewriter pass(&device, 0, &patterns); RunAndFilecheckHloRewrite(hlo, std::move(pass), std::nullopt); } TEST_F(CutlassFusionTest, GemmWithUpcastRhsContractingDimensionShouldBeOnTheMajorAxis) { const char* hlo = R"( ENTRY e { p0 = f32[16,32]{1,0} parameter(0) p1 = bf16[8,32]{1,0} parameter(1) c1 = f32[8,32]{1,0} convert(p1) ROOT dot = f32[16,8]{1,0} dot(p0, c1), lhs_contracting_dims={1}, rhs_contracting_dims={1} })"; CustomKernelFusionPatternRegistry patterns; patterns.Emplace<CutlassGemmWithUpcastPattern>(); auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); CustomKernelFusionRewriter pass(&device, 0, &patterns); RunAndFilecheckHloRewrite(hlo, std::move(pass), std::nullopt); } TEST_F(CutlassFusionTest, GemmWithUpcastWithBatchDimensionShouldNotBeFused) { const char* hlo = R"( ENTRY e { p0 = f32[4,16,32]{2,1,0} parameter(0) p1 = bf16[4,32,8]{2,1,0} parameter(1) c1 = f32[4,32,8]{2,1,0} convert(p1) ROOT dot = f32[4,16,8]{2,1,0} dot(p0, c1), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1} })"; CustomKernelFusionPatternRegistry patterns; patterns.Emplace<CutlassGemmWithUpcastPattern>(); auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); CustomKernelFusionRewriter pass(&device, 0, &patterns); RunAndFilecheckHloRewrite(hlo, std::move(pass), std::nullopt); } TEST_F(CutlassFusionTest, GemmWithUpcastAndColumnMajorOperandsShouldBeFused) { const char* hlo = R"( ENTRY e { p0 = f32[32,16]{0,1} parameter(0) p1 = bf16[8,32]{0,1} parameter(1) c1 = f32[8,32]{0,1} convert(p1) ROOT dot = f32[16,8]{1,0} dot(p0, c1), lhs_contracting_dims={0}, rhs_contracting_dims={1} })"; CustomKernelFusionPatternRegistry patterns; patterns.Emplace<CutlassGemmWithUpcastPattern>(); auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); CustomKernelFusionRewriter pass(&device, 0, &patterns); std::string expected = "CHECK: cutlass_gemm"; RunAndFilecheckHloRewrite(hlo, std::move(pass), expected); EXPECT_TRUE(RunAndCompare(hlo, ErrorSpec{1e-3, 1e-3})); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/kernels/cutlass_gemm_fusion.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/kernels/cutlass_gemm_fusion_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
b5b5cb72-a89f-46c4-99ab-2d6652fe6b51
cpp
tensorflow/tensorflow
cutlass_gemm_custom_kernel
third_party/xla/xla/service/gpu/kernels/cutlass_gemm_custom_kernel.cc
third_party/xla/xla/service/gpu/kernels/cutlass_gemm_custom_kernel_test.cc
#include "xla/service/gpu/kernels/cutlass_gemm_custom_kernel.h" #include <cstddef> #include <cstdint> #include <memory> #include <optional> #include <string> #include <tuple> #include <utility> #include <vector> #include "absl/container/flat_hash_map.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "third_party/gpus/cuda/include/cuda.h" #include "xla/service/gpu/kernels/custom_kernel.h" #include "xla/service/gpu/kernels/cutlass_gemm.h" #include "xla/stream_executor/device_description.h" #include "xla/stream_executor/kernel.h" #include "xla/stream_executor/kernel_spec.h" #include "xla/stream_executor/launch_dim.h" #include "xla/xla_data.pb.h" namespace xla::gpu::kernel::gemm_universal { static constexpr auto Default = Arch::kDefault; static constexpr auto Sm80 = Arch::kSm80; static constexpr auto Sm90 = Arch::kSm90; extern template struct Adaptor<F32xF32ToF32<Default>>; extern template struct DeviceKernel<F32xF32ToF32<Default>>; extern template struct Adaptor<Bf16xBf16ToBf16<Default>>; extern template struct DeviceKernel<Bf16xBf16ToBf16<Default>>; extern template struct Adaptor<Bf16xBf16ToBf16<Sm80>>; extern template struct DeviceKernel<Bf16xBf16ToBf16<Sm80>>; extern template struct Adaptor<Bf16xBf16ToBf16<Sm90>>; extern template struct DeviceKernel<Bf16xBf16ToBf16<Sm90>>; using KernelArgsPacking = se::MultiKernelLoaderSpec::KernelArgsPacking; template <typename Dim> static Dim As(Dim3 dim3) { return Dim(dim3.x, dim3.y, dim3.z); } template <typename Dim> static std::optional<Dim> As(std::optional<Dim3> dim3) { if (dim3.has_value()) return Dim(dim3->x, dim3->y, dim3->z); return std::nullopt; } static int32_t* SlicePtr(const se::KernelArgsDeviceMemoryArray* args, int64_t index) { const void* opaque = args->device_memory_ptr(index); return static_cast<int32_t*>(const_cast<void*>(opaque)); } template <typename Tag> KernelArgsPacking ArgsPacking(GemmMode mode, int32_t batch_count, int32_t m, int32_t n, int32_t k, const ArgsIndices& indices, const DynamicSliceIndices& slices, int32_t device_sms, Adaptor<Tag> adaptor) { using Packed = absl::StatusOr<std::unique_ptr<se::KernelArgsPackedArrayBase>>; struct Params { #if defined(_MSC_VER) alignas(64) std::byte storage[1024]; #else alignas(128) std::byte storage[1024]; #endif }; return [=](const se::Kernel& kernel, const se::KernelArgs& args) -> Packed { auto* mem_args = se::Cast<se::KernelArgsDeviceMemoryArray>(&args); Arguments arguments = {mode, batch_count, m, n, k}; arguments.lhs = const_cast<void*>(mem_args->device_memory_ptr(indices.lhs)); arguments.rhs = const_cast<void*>(mem_args->device_memory_ptr(indices.rhs)); arguments.out = const_cast<void*>(mem_args->device_memory_ptr(indices.out)); if (indices.has_workspace) { size_t num_mem_args = mem_args->device_memory_args().size(); arguments.workspace = const_cast<void*>(mem_args->device_memory_ptr(num_mem_args - 1)); } else { arguments.workspace = nullptr; } if (slices.out.has_value()) { arguments.slices.out = SlicePtr(mem_args, *slices.out); } if (!adaptor.CanImplement(arguments)) { return absl::InternalError(absl::StrCat( "CUTLASS kernel can not implement gemm for a given problem size", ": m=", m, ", n=", n, ", k=", k)); } auto threads = As<se::ThreadDim>(adaptor.ThreadDim()); auto shmem_bytes = adaptor.SharedMemoryBytes(); static int32_t sm_occupancy = kernel.GetMaxOccupiedBlocksPerCore(threads, shmem_bytes).value_or(1); if (sm_occupancy == 0) { LOG_FIRST_N(WARNING, 1) << "CUTLASS gemm kernel reported 0 occupancy: threads_per_block=" << (threads.x * threads.y * threads.z) << ", dynamic_shared_memory_bytes=" << shmem_bytes; } Params params; adaptor.Initialize(&params, arguments, device_sms, sm_occupancy); return se::PackKernelArgs<Params, DynamicSliceArguments>( args.number_of_shared_bytes(), params, arguments.slices); }; } template <typename Tag> static CustomKernel Load(std::string name, GemmMode mode, int32_t batch_count, int32_t m, int32_t n, int32_t k, const ArgsIndices& indices, const DynamicSliceIndices& slices, const se::DeviceDescription& device, Adaptor<Tag> adaptor = {}, DeviceKernel<Tag> kernel = {}) { auto cluster_dim = As<se::ClusterDim>(adaptor.ClusterDim()); auto block_dim = As<se::BlockDim>(adaptor.BlockDim(m, n, k)); auto thread_dim = As<se::ThreadDim>(adaptor.ThreadDim()); auto shared_memory_bytes = adaptor.SharedMemoryBytes(); auto packing = ArgsPacking<Tag>(mode, batch_count, m, n, k, indices, slices, device.core_count(), adaptor); se::MultiKernelLoaderSpec spec(2, std::move(packing)); spec.AddInProcessSymbol(kernel.symbol(), name); if (cluster_dim.has_value()) { return CustomKernel(std::move(name), std::move(spec), block_dim, thread_dim, *cluster_dim, shared_memory_bytes); } else { return CustomKernel(std::move(name), std::move(spec), block_dim, thread_dim, shared_memory_bytes); } } absl::StatusOr<std::vector<CustomKernel>> GetCutlassGemmKernels( std::string name, PrimitiveType dot_type, PrimitiveType lhs_type, PrimitiveType rhs_type, int32_t m, int32_t n, int32_t k, const ArgsIndices& indices, const DynamicSliceIndices& slices, const se::DeviceDescription& device) { absl::flat_hash_map<std::tuple<PrimitiveType, PrimitiveType, PrimitiveType>, std::vector<CustomKernel>> kernels = { {{BF16, BF16, BF16}, {{Load<Bf16xBf16ToBf16<Default>>(name, GemmMode::kGemm, 1, m, n, k, indices, slices, device)}}}, {{BF16, BF16, F32}, {{Load<Bf16xBf16ToF32<Default>>(name, GemmMode::kGemm, 1, m, n, k, indices, slices, device)}}}, {{F32, BF16, F32}, {{Load<F32xBf16ToF32<Default>>(name, GemmMode::kGemm, 1, m, n, k, indices, slices, device)}, {Load<F32xBf16ToF32<Default>>(name, GemmMode::kGemmSplitKParallel, 16, m, n, k, indices, slices, device)}}}, {{BF16, S8, F32}, {{Load<Bf16xS8ToF32<Default>>(name, GemmMode::kGemm, 1, m, n, k, indices, slices, device)}, {Load<Bf16xS8ToF32<Default>>(name, GemmMode::kGemmSplitKParallel, 16, m, n, k, indices, slices, device)}}}, {{F32, F32, F32}, {{Load<F32xF32ToF32<Default>>(name, GemmMode::kGemm, 1, m, n, k, indices, slices, device)}}}}; auto loaded_kernels = kernels.find({lhs_type, rhs_type, dot_type}); if (loaded_kernels != kernels.end()) { return loaded_kernels->second; } else { std::string kernel_name = PrimitiveType_Name(lhs_type) + "x" + PrimitiveType_Name(rhs_type) + "To" + PrimitiveType_Name(dot_type); return absl::InvalidArgumentError(absl::StrCat( "Unsupported CUTLASS gemm data type for kernel: ", kernel_name)); } } absl::StatusOr<CustomKernel> LoadCutlassGemmKernel( std::string name, const std::string& library_path, PrimitiveType dtype, int32_t m, int32_t n, int32_t k, const ArgsIndices& indices, const DynamicSliceIndices& slices, const se::DeviceDescription& device) { auto adaptor = Adaptor<DlOpenedKernel>::Load(library_path); if (!adaptor.has_value()) { return absl::InternalError( absl::StrCat("Failed to load CUTLASS adaptor from a shared library: ", library_path)); } auto kernel = DeviceKernel<DlOpenedKernel>::Load(library_path); if (!kernel.has_value()) { return absl::InternalError(absl::StrCat( "Failed to load CUTLASS kernel from a shared library: ", library_path)); } return Load<DlOpenedKernel>(std::move(name), GemmMode::kGemm, 1, m, n, k, indices, slices, device, *adaptor, *kernel); } }
#include "xla/service/gpu/kernels/cutlass_gemm_custom_kernel.h" #include <cstdint> #include <cstring> #include <string> #include <vector> #include "xla/stream_executor/device_memory.h" #include "xla/stream_executor/kernel.h" #include "xla/stream_executor/platform.h" #include "xla/stream_executor/platform_manager.h" #include "xla/stream_executor/stream.h" #include "xla/stream_executor/stream_executor.h" #include "xla/tsl/lib/core/status_test_util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/path.h" #include "tsl/platform/statusor.h" #include "tsl/platform/test.h" namespace xla::gpu::kernel::gemm_universal { TEST(CutlassGemmKernelTest, SimpleGemm) { se::Platform* platform = se::PlatformManager::PlatformWithName("CUDA").value(); se::StreamExecutor* executor = platform->ExecutorForDevice(0).value(); auto stream = executor->CreateStream().value(); TF_ASSERT_OK_AND_ASSIGN( auto custom_kernels, GetCutlassGemmKernels("cutlass_gemm", PrimitiveType::F32, PrimitiveType::F32, PrimitiveType::F32, 4, 4, 4, {0, 1, 2}, {}, executor->GetDeviceDescription())); auto custom_kernel = custom_kernels[0]; TF_ASSERT_OK_AND_ASSIGN(auto gemm, executor->LoadKernel(custom_kernel.kernel_spec())); int64_t length = 4 * 4; int64_t byte_length = sizeof(float) * length; se::DeviceMemory<float> a = executor->AllocateArray<float>(length, 0); se::DeviceMemory<float> b = executor->AllocateArray<float>(length, 0); se::DeviceMemory<float> c = executor->AllocateArray<float>(length, 0); float value = 2.0; uint32_t pattern; std::memcpy(&pattern, &value, sizeof(pattern)); TF_ASSERT_OK(stream->Memset32(&a, pattern, byte_length)); TF_ASSERT_OK(stream->Memset32(&b, pattern, byte_length)); TF_ASSERT_OK(stream->MemZero(&c, byte_length)); se::KernelArgsDeviceMemoryArray arr( std::vector<se::DeviceMemoryBase>({a, b, c}), custom_kernel.shared_memory_bytes()); TF_ASSERT_OK(stream->Launch(custom_kernel.thread_dims(), custom_kernel.block_dims(), *gemm, arr)); std::vector<float> dst(length, -1.0f); TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length)); std::vector<float> expected(length, 16.0); ASSERT_EQ(dst, expected); } TEST(CutlassGemmKernelTest, LoadFromSharedLibrary) { std::string kernel_lib_path = tsl::io::JoinPath(tsl::testing::XlaSrcRoot(), "service", "gpu", "kernels", "cutlass_gemm_kernel_f32xf32_to_f32.so"); se::Platform* platform = se::PlatformManager::PlatformWithName("CUDA").value(); se::StreamExecutor* executor = platform->ExecutorForDevice(0).value(); auto stream = executor->CreateStream().value(); auto custom_kernel = LoadCutlassGemmKernel( "cutlass_gemm", kernel_lib_path, PrimitiveType::F32, 4, 4, 4, {0, 1, 2}, {}, executor->GetDeviceDescription()); TF_ASSERT_OK_AND_ASSIGN(auto gemm, executor->LoadKernel(custom_kernel->kernel_spec())); int64_t length = 4 * 4; int64_t byte_length = sizeof(float) * length; se::DeviceMemory<float> a = executor->AllocateArray<float>(length, 0); se::DeviceMemory<float> b = executor->AllocateArray<float>(length, 0); se::DeviceMemory<float> c = executor->AllocateArray<float>(length, 0); float value = 2.0; uint32_t pattern; std::memcpy(&pattern, &value, sizeof(pattern)); TF_ASSERT_OK(stream->Memset32(&a, pattern, byte_length)); TF_ASSERT_OK(stream->Memset32(&b, pattern, byte_length)); TF_ASSERT_OK(stream->MemZero(&c, byte_length)); se::KernelArgsDeviceMemoryArray arr( std::vector<se::DeviceMemoryBase>({a, b, c}), custom_kernel->shared_memory_bytes()); TF_ASSERT_OK(stream->Launch(custom_kernel->thread_dims(), custom_kernel->block_dims(), *gemm, arr)); std::vector<float> dst(length, -1.0f); TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length)); std::vector<float> expected(length, 16.0); ASSERT_EQ(dst, expected); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/kernels/cutlass_gemm_custom_kernel.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/kernels/cutlass_gemm_custom_kernel_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
122e2de0-23d3-441b-a4e9-2765e88d32d1
cpp
tensorflow/tensorflow
topk_kernel
third_party/xla/xla/service/gpu/kernels/topk_kernel.cc
third_party/xla/xla/service/gpu/kernels/topk_kernel_test.cc
#include "xla/service/gpu/kernels/topk_kernel.h" #include <algorithm> #include <cstddef> #include <cstdint> #include "absl/numeric/bits.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "xla/primitive_util.h" #include "xla/service/gpu/kernels/topk_kernel_common.h" #include "xla/stream_executor/kernel.h" #include "xla/stream_executor/launch_dim.h" #include "xla/stream_executor/stream.h" #include "xla/stream_executor/typed_kernel_factory.h" #include "xla/types.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" #include "tsl/platform/statusor.h" namespace xla::gpu { namespace { size_t NumThreads(size_t n, size_t k, size_t batch_size) { size_t simultaneous_threads_per_block = 512 * (16 / k); size_t threads_per_block = std::min(simultaneous_threads_per_block, kTopKMaxThreadsPerBlock); size_t min_slice = absl::bit_floor(n / absl::bit_ceil(k)); return std::min(threads_per_block, min_slice); } template <typename T> absl::StatusOr<void*> GetKernel(int n, int k) { if (k <= 1) return GetTopKKernelForK<T, 1>(n); if (k <= 2) return GetTopKKernelForK<T, 2>(n); if (k <= 4) return GetTopKKernelForK<T, 4>(n); if (k <= 8) return GetTopKKernelForK<T, 8>(n); if (k <= 16) return GetTopKKernelForK<T, 16>(n); return absl::UnimplementedError(absl::StrCat("Unsupported K: ", k)); } template <typename T> absl::Status TypedTopK(se::Stream* stream, se::DeviceMemoryBase data, size_t num_elements, se::DeviceMemoryBase top_elements, se::DeviceMemoryBase top_indices, size_t k, size_t batch_size) { constexpr size_t max_kv_size = sizeof(uint64_t); int shmem_size = absl::bit_ceil(k) * max_kv_size * GetTopKWaveFrontSize<T>(); int num_threads = NumThreads(num_elements, k, batch_size); if (num_threads == 0) { return absl::FailedPreconditionError( "Invalid kernel parameters. This is likely a bug in the " "TopkSpecializer."); } se::StreamExecutor* executor = stream->parent(); se::DeviceMemory<T> data_typed(data); se::DeviceMemory<T> top_elements_typed(top_elements); se::DeviceMemory<uint32_t> top_indices_typed(top_indices); TF_ASSIGN_OR_RETURN(void* kernel_symbol, GetKernel<T>(num_elements, k)); TF_ASSIGN_OR_RETURN( auto kernel, (se::TypedKernelFactory<se::DeviceMemory<T>, size_t, se::DeviceMemory<T>, se::DeviceMemory<uint32_t>, size_t>::Create(executor, "topk", kernel_symbol))); TF_RETURN_IF_ERROR(stream->ThenLaunch( se::ThreadDim(num_threads, 1, 1), se::BlockDim(batch_size, 1, 1), shmem_size, kernel, data_typed, num_elements, top_elements_typed, top_indices_typed, k)); return absl::OkStatus(); } } absl::Status RunTopk(se::Stream* stream, PrimitiveType dtype, se::DeviceMemoryBase data, size_t num_elements, se::DeviceMemoryBase top_elements, se::DeviceMemoryBase top_indices, size_t k, size_t batch_size) { VLOG(2) << "TopK: " << primitive_util::LowercasePrimitiveTypeName(dtype) << ", n: " << num_elements << ", k: " << k << ", bs: " << batch_size; switch (dtype) { case PrimitiveType::F32: return TypedTopK<float>(stream, data, num_elements, top_elements, top_indices, k, batch_size); case PrimitiveType::BF16: return TypedTopK<bfloat16>(stream, data, num_elements, top_elements, top_indices, k, batch_size); default: return absl::UnimplementedError("GpuTopK not implemented for this dtype"); } } }
#include "xla/service/gpu/kernels/topk_kernel.h" #include <stddef.h> #include <stdint.h> #include <algorithm> #include <functional> #include <tuple> #include <vector> #include "absl/log/check.h" #include "absl/random/random.h" #include "absl/strings/substitute.h" #include "absl/time/time.h" #include "xla/stream_executor/device_memory_handle.h" #include "xla/stream_executor/gpu/gpu_init.h" #include "xla/stream_executor/gpu/gpu_types.h" #include "xla/stream_executor/platform.h" #include "xla/stream_executor/platform_manager.h" #include "xla/stream_executor/stream.h" #include "xla/types.h" #include "xla/xla_data.pb.h" #include "tsl/platform/statusor.h" #include "tsl/platform/test.h" #include "tsl/platform/test_benchmark.h" namespace xla::gpu { namespace { using se::gpu::GpuStreamHandle; using ::testing::Combine; using ::testing::Values; template <typename T> std::vector<T> RandomVecRange(int num_elements, T start, T end) { std::vector<T> local; local.reserve(num_elements); thread_local absl::BitGen gen; for (int i = 0; i < num_elements; ++i) { local.push_back(absl::Uniform<T>(gen, start, end)); } return local; } template <typename T> std::vector<T> RandomVec(int num_elements) { return RandomVecRange(num_elements, static_cast<T>(0), static_cast<T>(num_elements)); } template <typename T> std::vector<T> RandomVecNegative(int num_elements) { return RandomVecRange(num_elements, -static_cast<T>(num_elements), static_cast<T>(0)); } PrimitiveType Get(float) { return PrimitiveType::F32; } se::StreamExecutor* GetGpuExecutor() { auto* platform = se::PlatformManager::PlatformWithName(se::GpuPlatformName()).value(); return platform->ExecutorForDevice(0).value(); } using TopkTest = ::testing::TestWithParam<std::tuple<int, int, int, int>>; TEST_P(TopkTest, TopKFloat) { using T = float; auto* executor = GetGpuExecutor(); auto stream = executor->CreateStream().value(); const auto [n_kb, k, batch_size, offset] = GetParam(); const size_t n = n_kb * 1024 + offset; stream_executor::DeviceMemoryHandle input_buffer( executor, executor->AllocateArray<T>(n * batch_size)); stream_executor::DeviceMemoryHandle output_values( executor, executor->AllocateArray<T>(k * batch_size)); stream_executor::DeviceMemoryHandle output_indices( executor, executor->AllocateArray<uint32_t>(k * batch_size)); ASSERT_TRUE(!(input_buffer.memory().is_null() || output_values.memory().is_null() || output_indices.memory().is_null())); auto source = RandomVec<T>(n * batch_size); CHECK_OK(stream->Memcpy(input_buffer.memory_ptr(), source.data(), n * batch_size * sizeof(T))); ASSERT_TRUE(RunTopk(stream.get(), Get(T()), input_buffer.memory(), n, output_values.memory(), output_indices.memory(), k, batch_size) .ok()); std::vector<T> got(k); ASSERT_TRUE(stream->BlockHostUntilDone().ok()); for (int i = 0; i < batch_size; i++) { CHECK_OK(stream->Memcpy( got.data(), se::DeviceMemory<T>(output_values.memory()).GetSlice(k * i, k), k * sizeof(T))); std::vector<T> slice(source.data() + n * i, source.data() + n * (i + 1)); std::sort(slice.begin(), slice.end(), std::greater<T>()); slice.resize(k); EXPECT_THAT(got, ::testing::ElementsAreArray(slice)) << " k=" << k << ", batch_size=" << batch_size << " i=" << i; } } TEST_P(TopkTest, TopKPackedNegative) { using T = float; auto* executor = GetGpuExecutor(); auto stream = executor->CreateStream().value(); const auto [n_kb, k, batch_size, offset] = GetParam(); const size_t n = n_kb * 1024 + offset; stream_executor::DeviceMemoryHandle input_buffer( executor, executor->AllocateArray<T>(n * batch_size)); stream_executor::DeviceMemoryHandle output_values( executor, executor->AllocateArray<T>(k * batch_size)); stream_executor::DeviceMemoryHandle output_indices( executor, executor->AllocateArray<uint32_t>(k * batch_size)); ASSERT_TRUE(!(input_buffer.memory().is_null() || output_values.memory().is_null() || output_indices.memory().is_null())); auto source = RandomVecNegative<T>(n * batch_size); CHECK_OK(stream->Memcpy(input_buffer.memory_ptr(), source.data(), n * batch_size * sizeof(T))); ASSERT_TRUE(RunTopk(stream.get(), Get(T()), input_buffer.memory(), n, output_values.memory(), output_indices.memory(), k, batch_size) .ok()); std::vector<T> got(k); ASSERT_TRUE(stream->BlockHostUntilDone().ok()); for (int i = 0; i < batch_size; i++) { CHECK_OK(stream->Memcpy( got.data(), se::DeviceMemory<T>(output_values.memory()).GetSlice(k * i, k), k * sizeof(T))); std::vector<T> slice(source.data() + n * i, source.data() + n * (i + 1)); std::sort(slice.begin(), slice.end(), std::greater<T>()); slice.resize(k); EXPECT_THAT(got, ::testing::ElementsAreArray(slice)) << " k=" << k << ", batch_size=" << batch_size << " i=" << i; } } INSTANTIATE_TEST_SUITE_P(TopkTests, TopkTest, Combine( Values(1, 8, 12, 64, 128), Values(1, 2, 8, 16, 7, 12), Values(1, 16, 64, 128), Values(0, 7, 4)), [](const auto& info) { return absl::Substitute( "n$0KiB_k$1_batch_size$2_offset$3", std::get<0>(info.param), std::get<1>(info.param), std::get<2>(info.param), std::get<3>(info.param)); }); template <size_t K> void BM_SmallTopk(benchmark::State& state) { using T = float; size_t k = K; size_t batch_size = state.range(0); size_t n = state.range(1) * 1024; state.SetLabel( absl::Substitute("n=$0Ki k=$1 batch_size=$2", n / 1024, k, batch_size)); auto* executor = GetGpuExecutor(); auto stream = executor->CreateStream().value(); stream_executor::DeviceMemoryHandle input_buffer( executor, executor->AllocateArray<T>(n * batch_size)); stream_executor::DeviceMemoryHandle output_values( executor, executor->AllocateArray<T>(k * batch_size)); stream_executor::DeviceMemoryHandle output_indices( executor, executor->AllocateArray<uint32_t>(k * batch_size)); if (input_buffer.memory().is_null() || output_values.memory().is_null() || output_indices.memory().is_null()) { state.SkipWithError("Unable to allocate GPU memory: aborting benchmark"); return; } auto source = RandomVec<T>(n); for (size_t i = 0; i < batch_size; i++) { auto slice = se::DeviceMemory<T>(input_buffer.memory()).GetSlice(i * n, n); CHECK_OK(stream->Memcpy(&slice, source.data(), n * sizeof(T))); } for (auto _ : state) { CHECK_OK(RunTopk(stream.get(), Get(T()), input_buffer.memory(), n, output_values.memory(), output_indices.memory(), k, batch_size)); TF_ASSERT_OK_AND_ASSIGN(auto timer, stream->CreateEventBasedTimer(true)); CHECK_OK(RunTopk(stream.get(), Get(T()), input_buffer.memory(), n, output_values.memory(), output_indices.memory(), k, batch_size)); auto timer_duration = timer->GetElapsedDuration(); CHECK_OK(timer_duration.status()); state.SetIterationTime(absl::ToDoubleSeconds(timer_duration.value())); } size_t items_processed = batch_size * n * state.iterations(); state.SetItemsProcessed(items_processed); state.SetBytesProcessed(items_processed * sizeof(T)); } BENCHMARK(BM_SmallTopk<1>)->RangePair(1, 1024, 16, 1024)->UseManualTime(); BENCHMARK(BM_SmallTopk<2>)->RangePair(1, 1024, 16, 1024)->UseManualTime(); BENCHMARK(BM_SmallTopk<4>)->RangePair(1, 1024, 16, 1024)->UseManualTime(); BENCHMARK(BM_SmallTopk<8>)->RangePair(1, 1024, 16, 1024)->UseManualTime(); BENCHMARK(BM_SmallTopk<16>)->RangePair(1, 1024, 16, 1024)->UseManualTime(); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/kernels/topk_kernel.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/kernels/topk_kernel_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
f785b429-b481-4e0e-9822-be3f05ffcec6
cpp
tensorflow/tensorflow
topk_custom_kernel
third_party/xla/xla/service/gpu/kernels/topk_custom_kernel.cc
third_party/xla/xla/service/gpu/kernels/topk_custom_kernel_test.cc
#include "xla/service/gpu/kernels/topk_custom_kernel.h" #include <algorithm> #include <cstddef> #include <cstdint> #include <memory> #include <string> #include <utility> #include "absl/numeric/bits.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "xla/service/gpu/kernels/custom_kernel.h" #include "xla/stream_executor/device_memory.h" #include "xla/stream_executor/kernel.h" #include "xla/stream_executor/kernel_spec.h" #include "xla/stream_executor/launch_dim.h" #include "xla/types.h" #include "xla/xla_data.pb.h" #include "tsl/platform/statusor.h" #if defined(GOOGLE_CUDA) || defined(TENSORFLOW_USE_ROCM) #include "xla/service/gpu/kernels/topk_kernel_common.h" #endif namespace xla::gpu::kernel::topk { #if defined(GOOGLE_CUDA) || defined(TENSORFLOW_USE_ROCM) namespace { using KernelArgsPacking = se::MultiKernelLoaderSpec::KernelArgsPacking; size_t EstimateOptimalNumThreads(size_t n, size_t k, size_t batch_size) { constexpr size_t kEstimatedThreadsPerBlock = 512; constexpr size_t kMaxKValue = 16; size_t simultaneous_threads_per_block = kEstimatedThreadsPerBlock * (kMaxKValue / k); size_t threads_per_block = std::min(simultaneous_threads_per_block, kTopKMaxThreadsPerBlock); size_t min_slice = absl::bit_floor(n / absl::bit_ceil(k)); return std::min(threads_per_block, min_slice); } template <typename T> absl::StatusOr<void*> GetKernel(int n, int k) { if (k <= 1) return GetTopKKernelForK<T, 1>(n); if (k <= 2) return GetTopKKernelForK<T, 2>(n); if (k <= 4) return GetTopKKernelForK<T, 4>(n); if (k <= 8) return GetTopKKernelForK<T, 8>(n); if (k <= 16) return GetTopKKernelForK<T, 16>(n); return absl::UnimplementedError(absl::StrCat("Unsupported K: ", k)); } template <typename T> KernelArgsPacking CreateTopKArgsPacking(size_t num_elements, size_t k) { using Packed = absl::StatusOr<std::unique_ptr<se::KernelArgsPackedArrayBase>>; return [=](const se::Kernel& kernel, const se::KernelArgs& args) -> Packed { auto* mem_args = se::Cast<se::KernelArgsDeviceMemoryArray>(&args); se::DeviceMemory<T> data(mem_args->device_memory_args()[0]); se::DeviceMemory<T> top_elements(mem_args->device_memory_args()[1]); se::DeviceMemory<uint32_t> top_indices(mem_args->device_memory_args()[2]); return se::PackKernelArgs(args.number_of_shared_bytes(), data, num_elements, top_elements, top_indices, k); }; } template <typename T> absl::StatusOr<CustomKernel> GetTypedTopK(std::string name, size_t num_elements, size_t k, size_t batch_size) { constexpr size_t kMaxKVSize = sizeof(uint64_t); int shmem_size = absl::bit_ceil(k) * kMaxKVSize * GetTopKWaveFrontSize<T>(); int num_threads = EstimateOptimalNumThreads(num_elements, k, batch_size); if (num_threads == 0) { return absl::FailedPreconditionError( "Invalid kernel parameters. This is likely a bug in the " "TopkSpecializer."); } auto packing = CreateTopKArgsPacking<T>(num_elements, k); se::MultiKernelLoaderSpec spec(5, std::move(packing)); TF_ASSIGN_OR_RETURN(void* kernel_symbol, GetKernel<T>(num_elements, k)); spec.AddInProcessSymbol(kernel_symbol, name); return CustomKernel(std::move(name), std::move(spec), se::BlockDim(batch_size, 1, 1), se::ThreadDim(num_threads, 1, 1), shmem_size); } } absl::StatusOr<CustomKernel> GetTopKKernel(std::string name, PrimitiveType dtype, size_t num_elements, size_t k, size_t batch_size) { switch (dtype) { case PrimitiveType::F32: return GetTypedTopK<float>(std::move(name), num_elements, k, batch_size); case PrimitiveType::BF16: return GetTypedTopK<bfloat16>(std::move(name), num_elements, k, batch_size); default: return absl::InvalidArgumentError( absl::StrCat("Unsupported GpuTopK data type: ", dtype)); } } #else absl::StatusOr<CustomKernel> GetTopKKernel(std::string name, PrimitiveType dtype, size_t num_elements, size_t k, size_t batch_size) { return absl::InternalError("XLA compiled without CUDA support"); } #endif }
#include "xla/service/gpu/kernels/topk_custom_kernel.h" #include <algorithm> #include <cstdint> #include <cstring> #include <functional> #include <tuple> #include <vector> #include <gtest/gtest.h> #include "absl/random/random.h" #include "absl/strings/ascii.h" #include "absl/strings/substitute.h" #include "xla/service/platform_util.h" #include "xla/stream_executor/kernel.h" #include "xla/stream_executor/platform.h" #include "xla/stream_executor/platform_manager.h" #include "xla/stream_executor/stream.h" #include "xla/stream_executor/stream_executor.h" #include "xla/tsl/lib/core/status_test_util.h" #include "xla/types.h" #include "xla/xla_data.pb.h" #include "tsl/platform/statusor.h" #include "tsl/platform/test.h" namespace xla::gpu::kernel::topk { using ::testing::Combine; using ::testing::Values; template <typename T> std::vector<T> RandomVecRange(int num_elements, T start, T end) { std::vector<T> local; local.reserve(num_elements); thread_local absl::BitGen gen; for (int i = 0; i < num_elements; ++i) { local.push_back(absl::Uniform<T>(gen, start, end)); } return local; } template <typename T> std::vector<T> RandomVec(int num_elements) { return RandomVecRange(num_elements, static_cast<T>(0), static_cast<T>(num_elements)); } template <typename T> std::vector<T> RandomVecNegative(int num_elements) { return RandomVecRange(num_elements, -static_cast<T>(num_elements), static_cast<T>(0)); } PrimitiveType Get(float) { return PrimitiveType::F32; } PrimitiveType Get(bfloat16) { return PrimitiveType::BF16; } using TopKKernelTest = ::testing::TestWithParam<std::tuple<int, int, int, int>>; TEST_P(TopKKernelTest, TopKFloat) { using T = float; auto name = absl::AsciiStrToUpper(PlatformUtil::CanonicalPlatformName("gpu").value()); se::Platform* platform = se::PlatformManager::PlatformWithName(name).value(); se::StreamExecutor* executor = platform->ExecutorForDevice(0).value(); auto stream = executor->CreateStream().value(); const auto [n_kb, k, batch_size, offset] = GetParam(); const size_t n = n_kb * 1024 + offset; se::DeviceMemory<T> input_buffer = executor->AllocateArray<T>(n * batch_size, 0); se::DeviceMemory<T> output_values = executor->AllocateArray<T>(k * batch_size, 0); se::DeviceMemory<uint32_t> output_indices = executor->AllocateArray<uint32_t>(k * batch_size, 0); auto source = RandomVec<T>(n * batch_size); TF_ASSERT_OK( stream->Memcpy(&input_buffer, source.data(), n * batch_size * sizeof(T))); TF_ASSERT_OK(stream->MemZero(&output_values, k * batch_size * sizeof(T))); TF_ASSERT_OK( stream->MemZero(&output_indices, k * batch_size * sizeof(uint32_t))); auto custom_kernel = GetTopKKernel("topk", PrimitiveType::F32, n, k, batch_size); TF_ASSERT_OK_AND_ASSIGN(auto kernel, executor->LoadKernel(custom_kernel->kernel_spec())); se::KernelArgsDeviceMemoryArray arr( std::vector<se::DeviceMemoryBase>( {input_buffer, output_values, output_indices}), custom_kernel->shared_memory_bytes()); TF_ASSERT_OK(stream->Launch(custom_kernel->thread_dims(), custom_kernel->block_dims(), *kernel, arr)); std::vector<T> got(k); ASSERT_TRUE(stream->BlockHostUntilDone().ok()); for (int i = 0; i < batch_size; i++) { TF_ASSERT_OK(stream->Memcpy(got.data(), output_values.GetSlice(k * i, k), k * sizeof(T))); std::vector<T> slice(source.data() + n * i, source.data() + n * (i + 1)); std::sort(slice.begin(), slice.end(), std::greater<T>()); slice.resize(k); EXPECT_THAT(got, ::testing::ElementsAreArray(slice)) << " k=" << k << ", batch_size=" << batch_size << " i=" << i; } } TEST_P(TopKKernelTest, TopKPackedNegative) { using T = float; auto name = absl::AsciiStrToUpper(PlatformUtil::CanonicalPlatformName("gpu").value()); se::Platform* platform = se::PlatformManager::PlatformWithName(name).value(); se::StreamExecutor* executor = platform->ExecutorForDevice(0).value(); auto stream = executor->CreateStream().value(); const auto [n_kb, k, batch_size, offset] = GetParam(); const size_t n = n_kb * 1024 + offset; se::DeviceMemory<T> input_buffer = executor->AllocateArray<T>(n * batch_size, 0); se::DeviceMemory<T> output_values = executor->AllocateArray<T>(k * batch_size, 0); se::DeviceMemory<uint32_t> output_indices = executor->AllocateArray<uint32_t>(k * batch_size, 0); auto source = RandomVecNegative<T>(n * batch_size); TF_ASSERT_OK( stream->Memcpy(&input_buffer, source.data(), n * batch_size * sizeof(T))); TF_ASSERT_OK(stream->MemZero(&output_values, k * batch_size * sizeof(T))); TF_ASSERT_OK( stream->MemZero(&output_indices, k * batch_size * sizeof(uint32_t))); auto custom_kernel = GetTopKKernel("topk", PrimitiveType::F32, n, k, batch_size); TF_ASSERT_OK_AND_ASSIGN(auto kernel, executor->LoadKernel(custom_kernel->kernel_spec())); se::KernelArgsDeviceMemoryArray arr( std::vector<se::DeviceMemoryBase>( {input_buffer, output_values, output_indices}), custom_kernel->shared_memory_bytes()); TF_ASSERT_OK(stream->Launch(custom_kernel->thread_dims(), custom_kernel->block_dims(), *kernel, arr)); std::vector<T> got(k); ASSERT_TRUE(stream->BlockHostUntilDone().ok()); for (int i = 0; i < batch_size; i++) { TF_ASSERT_OK(stream->Memcpy(got.data(), output_values.GetSlice(k * i, k), k * sizeof(T))); std::vector<T> slice(source.data() + n * i, source.data() + n * (i + 1)); std::sort(slice.begin(), slice.end(), std::greater<T>()); slice.resize(k); EXPECT_THAT(got, ::testing::ElementsAreArray(slice)) << " k=" << k << ", batch_size=" << batch_size << " i=" << i; } } INSTANTIATE_TEST_SUITE_P(TopKTests, TopKKernelTest, Combine( Values(1, 8, 12, 64, 128), Values(1, 2, 8, 16, 7, 12), Values(1, 16, 64, 128), Values(0, 7, 4)), [](const auto& info) { return absl::Substitute( "n$0KiB_k$1_batch_size$2_offset$3", std::get<0>(info.param), std::get<1>(info.param), std::get<2>(info.param), std::get<3>(info.param)); }); }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/kernels/topk_custom_kernel.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/kernels/topk_custom_kernel_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
c95c32cb-784d-47f8-b823-f59375314092
cpp
tensorflow/tensorflow
cudnn
third_party/xla/xla/service/gpu/fusions/cudnn.cc
third_party/xla/xla/service/gpu/fusions/cudnn_test.cc
#include "xla/service/gpu/fusions/cudnn.h" #include "absl/log/log.h" #include "absl/status/statusor.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/service/gpu/fusions/fusion_emitter.h" #include "xla/service/gpu/ir_emitter_context.h" #include "xla/service/gpu/kernel_arguments.h" #include "xla/service/gpu/kernel_reuse_cache.h" #include "xla/service/gpu/runtime/thunk.h" #include "tsl/platform/statusor.h" #if GOOGLE_CUDA #include "xla/service/gpu/runtime/cudnn_thunk.h" #endif namespace xla { namespace gpu { absl::StatusOr<FusionEmissionResult> CuDnnFusion::Emit( IrEmitterContext& ir_emitter_context, const HloFusionInstruction& fusion) const { #if GOOGLE_CUDA VLOG(3) << fusion.ToString(); TF_ASSIGN_OR_RETURN( auto kernel_arguments, KernelArguments::Create(ir_emitter_context.buffer_assignment(), &fusion)); FusionEmissionResult result; result.thunks.emplace_back(std::make_unique<CuDnnThunk>( GetComputationFingerprint(fusion.fused_instructions_computation(), {}), Thunk::ThunkInfo::WithProfileAnnotation(&fusion), kernel_arguments.args())); return result; #else return absl::UnimplementedError("cuDNN support requires CUDA"); #endif } } }
#include <array> #include <memory> #include <string> #include <tuple> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_replace.h" #include "absl/strings/string_view.h" #include "absl/strings/substitute.h" #include "xla/comparison_util.h" #include "xla/debug_options_flags.h" #include "xla/error_spec.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/primitive_util.h" #include "xla/service/dump.h" #include "xla/service/executable.h" #include "xla/service/gpu/ir_emission_utils.h" #include "xla/service/gpu/stream_executor_util.h" #include "xla/service/gpu/tests/gpu_codegen_test.h" #include "xla/service/gpu/transforms/cudnn_fusion_compiler.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/stream_executor/dnn.h" #include "xla/stream_executor/stream_executor.h" #include "xla/stream_executor/stream_executor_memory_allocator.h" #include "xla/tests/filecheck.h" #include "xla/tests/verified_hlo_module.h" #include "xla/tsl/lib/core/status_test_util.h" #include "xla/xla.pb.h" #include "xla/xla_data.pb.h" #include "tsl/platform/env.h" #include "tsl/platform/errors.h" #include "tsl/platform/path.h" #include "tsl/platform/statusor.h" #include "tsl/platform/test.h" namespace xla { namespace gpu { namespace { class CuDnnFusionTest : public GpuCodegenTest { public: DebugOptions GetDebugOptionsForTest() override { DebugOptions debug_options = GpuCodegenTest::GetDebugOptionsForTest(); debug_options.set_xla_gpu_autotune_level(0); debug_options.set_xla_gpu_cudnn_gemm_fusion_level(1); return debug_options; } bool IsAtLeastHopperWithCuDnn9() { se::StreamExecutor* executor = backend().default_stream_executor(); return executor->GetDeviceDescription() .cuda_compute_capability() .IsAtLeastHopper() && GetDnnVersionInfoOrDefault(executor).major_version() >= 9; } bool IsAtLeastCuDnn91() { se::StreamExecutor* executor = backend().default_stream_executor(); const se::dnn::VersionInfo version = GetDnnVersionInfoOrDefault(executor); return (version.major_version() == 9 && version.minor_version() >= 1) || version.major_version() > 9; } protected: void SetUp() override { if (!IsAtLeastHopperWithCuDnn9()) { GTEST_SKIP() << "cuDNN GEMM fusion is not enabled before Hopper / cuDNN 9."; } } }; class CuDnnFusionFileCheckTest : public CuDnnFusionTest { public: CuDnnFusionFileCheckTest() { if (!tsl::io::GetTestUndeclaredOutputsDir(&output_directory_)) { output_directory_ = tsl::testing::TmpDir(); } } DebugOptions GetDebugOptionsForTest() override { DebugOptions options = CuDnnFusionTest::GetDebugOptionsForTest(); options.set_xla_dump_to(output_directory_); return options; } absl::StatusOr<bool> RunCuDnnFileCheck(absl::string_view hlo, absl::string_view pattern) { TF_ASSIGN_OR_RETURN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(hlo)); const std::string root_name( module->entry_computation()->root_instruction()->name()); BinaryMap dnn_compiled_graphs; CuDnnFusionCompiler cudnn_compiler(*backend().default_stream_executor(), dnn_compiled_graphs); cudnn_compiler.Run(module.get()).IgnoreError(); std::string dump; TF_RETURN_IF_ERROR(tsl::ReadFileToString( tsl::Env::Default(), tsl::io::JoinPath( output_directory_, FilenameFor(*module, "", absl::StrCat("cudnn_fusion_", root_name, ".json"))), &dump)); return RunFileCheck(dump, pattern); } private: std::string output_directory_; }; TEST_F(CuDnnFusionFileCheckTest, F32DotGraphIsConvertedCorrectly) { EXPECT_TRUE(*RunCuDnnFileCheck(R"( fd0 { p0 = f32[64,64] parameter(0) p1 = f32[64,64] parameter(1) ROOT d = f32[64,64] dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} } ENTRY e { p0 = f32[64,64] parameter(0) p1 = f32[64,64] parameter(1) ROOT d0 = f32[64,64] fusion(p0, p1), kind=kCustom, calls=fd0, backend_config={"fusion_backend_config":{"kind":"__cudnn$fusion","cudnn_fusion_config":{"plan_id":"0"}}} })", R"( CHECK: "nodes": [ CHECK: "inputs": { CHECK: "A": "p0", CHECK: "B": "p1" CHECK: }, CHECK: "outputs": { CHECK: "C": "d" CHECK: }, CHECK: "tag": "MATMUL" CHECK: } CHECK: ], CHECK: "tensors": { CHECK: "d": { CHECK: "data_type": "FLOAT", CHECK: "dim": [{{[[:space:]]*1,[[:space:]]*64,[[:space:]]*64[[:space:]]*}}], CHECK: "stride": [{{[[:space:]]*1,[[:space:]]*64,[[:space:]]*1[[:space:]]*}}], CHECK: "uid": 3, CHECK: "uid_assigned": true CHECK: }, CHECK: "p0": { CHECK: "data_type": "FLOAT", CHECK: "dim": [{{[[:space:]]*1,[[:space:]]*64,[[:space:]]*64[[:space:]]*}}], CHECK: "stride": [{{[[:space:]]*1,[[:space:]]*64,[[:space:]]*1[[:space:]]*}}], CHECK: "uid": 1, CHECK: "uid_assigned": true CHECK: }, CHECK: "p1": { CHECK: "data_type": "FLOAT", CHECK: "dim": [{{[[:space:]]*1,[[:space:]]*64,[[:space:]]*64[[:space:]]*}}], CHECK: "stride": [{{[[:space:]]*1,[[:space:]]*64,[[:space:]]*1[[:space:]]*}}], CHECK: "uid": 2, CHECK: "uid_assigned": true CHECK: } )")); } using CuDnnFusionExecutionTest = CuDnnFusionTest; namespace m = ::xla::match; TEST_F(CuDnnFusionExecutionTest, WorkspaceAllocationWorks) { if (!IsAtLeastCuDnn91()) { GTEST_SKIP() << "This test case requests a workspace only with cuDNN 9.1+."; } const std::string kHloText = R"( fusion1 { p0 = f32[32,96] parameter(0) p1 = f32[96,64] parameter(1) ROOT r = f32[32,64] dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} } ENTRY e { p0 = f32[32,96] parameter(0) p1 = f32[96,64] parameter(1) ROOT _ = f32[32,64] fusion(p0, p1), kind=kCustom, calls=fusion1, backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(kHloText)); BinaryMap dnn_compiled_graphs; CuDnnFusionCompiler cudnn_compiler(*backend().default_stream_executor(), dnn_compiled_graphs); TF_ASSERT_OK_AND_ASSIGN(bool changed, cudnn_compiler.Run(module.get())); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::GetTupleElement(m::Fusion()))); EXPECT_THAT(module->entry_computation() ->root_instruction() ->operand(0) ->fused_instructions_computation() ->root_instruction(), GmockMatch(m::Tuple(m::Dot(), m::CustomCall()))); EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3, 1e-3})); } TEST_F(CuDnnFusionExecutionTest, CuDnnFusionCompilerDoesNotFailOnDependentFusions) { if (!IsAtLeastCuDnn91()) { GTEST_SKIP() << "This test case requests a workspace only with cuDNN 9.1+."; } TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( c1 { p0 = f32[32,96] parameter(0) p1 = f32[96,64] parameter(1) ROOT r = f32[32,64] dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} } c2 { p0 = f32[32,96] parameter(0) p1 = f32[32,64] parameter(1) ROOT r = f32[96,64] dot(p0, p1), lhs_contracting_dims={0}, rhs_contracting_dims={0} } ENTRY e { p0 = f32[32,96] parameter(0) p1 = f32[96,64] parameter(1) f0 = f32[32,64] fusion(p0, p1), kind=kCustom, calls=c1, backend_config={"fusion_backend_config": {kind: "__cudnn$fusion","cudnn_fusion_config":{"plan_id":"0"}}} f1 = f32[96,64] fusion(p0, f0), kind=kCustom, calls=c2, backend_config={"fusion_backend_config": {kind: "__cudnn$fusion","cudnn_fusion_config":{"plan_id":"0"}}} ROOT r = tuple(f0, f1) })")); BinaryMap dnn_compiled_graphs; CuDnnFusionCompiler cudnn_compiler(*backend().default_stream_executor(), dnn_compiled_graphs); TF_ASSERT_OK_AND_ASSIGN(bool changed, cudnn_compiler.Run(module.get())); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion()), m::GetTupleElement(m::Fusion())))); } TEST_F(CuDnnFusionExecutionTest, NoTritonConfigIsAssignedAtZeroAutotuningLevel) { EXPECT_EQ(GetDebugOptionsForTest().xla_gpu_autotune_level(), 0); MatchOptimizedHlo(R"( fusion1 { p0 = f32[32,96] parameter(0) p1 = f32[96,64] parameter(1) ROOT r = f32[32,64] dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} } ENTRY e { p0 = f32[32,96] parameter(0) p1 = f32[96,64] parameter(1) ROOT _ = f32[32,64] fusion(p0, p1), kind=kCustom, calls=fusion1, backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}} })", R"( CHECK-NOT: triton_gemm_config )"); } TEST_F(CuDnnFusionExecutionTest, DotF32ExecutesCorrectly) { EXPECT_TRUE(RunAndCompare(R"( fusion1 { p0 = f32[32,96] parameter(0) p1 = f32[96,64] parameter(1) ROOT r = f32[32,64] dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} } ENTRY e { p0 = f32[32,96] parameter(0) p1 = f32[96,64] parameter(1) ROOT _ = f32[32,64] fusion(p0, p1), kind=kCustom, calls=fusion1, backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}} })", ErrorSpec{1e-3, 1e-3})); } TEST_F(CuDnnFusionFileCheckTest, VectorTensorMultiplicationWorksCorrectly) { const std::string kHloText = R"( f { p0 = bf16[64,1] parameter(0) p1 = s8[64,128] parameter(1) p1c = bf16[64,128] convert(p1) ROOT out = bf16[1,128] dot(p0, p1c), lhs_contracting_dims={0}, rhs_contracting_dims={0} } ENTRY e { p0 = bf16[64,1] parameter(0) p1 = s8[64,128] parameter(1) ROOT r = bf16[1,128] fusion(p0, p1), kind=kCustom, calls=f, backend_config={"fusion_backend_config":{"kind":"__cudnn$fusion"}} })"; EXPECT_TRUE(*RunCuDnnFileCheck(kHloText, R"( CHECK: "tensors" CHECK: "out" CHECK: "dim": [{{[[:space:]]*}}1,{{[[:space:]]*}}1,{{[[:space:]]*}}128{{[[:space:]]*}}] CHECK: "stride": [{{[[:space:]]*}}1,{{[[:space:]]*}}128,{{[[:space:]]*}}1{{[[:space:]]*}}] CHECK: "p0" CHECK: "dim": [{{[[:space:]]*}}1,{{[[:space:]]*}}1,{{[[:space:]]*}}64{{[[:space:]]*}}] CHECK: "stride": [{{[[:space:]]*}}1,{{[[:space:]]*}}64,{{[[:space:]]*}}1{{[[:space:]]*}}] CHECK: "p1" CHECK: "dim": [{{[[:space:]]*}}1,{{[[:space:]]*}}64,{{[[:space:]]*}}128{{[[:space:]]*}}] CHECK: "stride": [{{[[:space:]]*}}1,{{[[:space:]]*}}128,{{[[:space:]]*}}1{{[[:space:]]*}}] )")); EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3, 1e-3})); } TEST_F(CuDnnFusionFileCheckTest, TensorVectorMultiplicationWorksCorrectly) { const std::string kHloText = R"( f { p0 = bf16[64,256] parameter(0) p1 = s8[64,1] parameter(1) p1c = bf16[64,1] convert(p1) ROOT out = bf16[256,1] dot(p0, p1c), lhs_contracting_dims={0}, rhs_contracting_dims={0} } ENTRY e { p0 = bf16[64,256] parameter(0) p1 = s8[64,1] parameter(1) ROOT r = bf16[256,1] fusion(p0, p1), kind=kCustom, calls=f, backend_config={"fusion_backend_config":{"kind":"__cudnn$fusion"}} })"; EXPECT_TRUE(*RunCuDnnFileCheck(kHloText, R"( CHECK: "tensors" CHECK: "out" CHECK: "dim": [{{[[:space:]]*}}1,{{[[:space:]]*}}256,{{[[:space:]]*}}1{{[[:space:]]*}}] CHECK: "stride": [{{[[:space:]]*}}1,{{[[:space:]]*}}1,{{[[:space:]]*}}256{{[[:space:]]*}}] CHECK: "p0" CHECK: "dim": [{{[[:space:]]*}}1,{{[[:space:]]*}}256,{{[[:space:]]*}}64{{[[:space:]]*}}] CHECK: "stride": [{{[[:space:]]*}}1,{{[[:space:]]*}}1,{{[[:space:]]*}}256{{[[:space:]]*}}] CHECK: "p1" CHECK: "dim": [{{[[:space:]]*}}1,{{[[:space:]]*}}64,{{[[:space:]]*}}1{{[[:space:]]*}}] CHECK: "stride": [{{[[:space:]]*}}1,{{[[:space:]]*}}1,{{[[:space:]]*}}64{{[[:space:]]*}}] )")); EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3, 1e-3})); } TEST_F(CuDnnFusionExecutionTest, DotBF16WithCopyExecutesCorrectly) { EXPECT_TRUE(RunAndCompare(R"( fusion1 { p0 = bf16[96,512,64]{1,2,0} parameter(0) cp = bf16[96,512,64]{2,1,0} copy(p0) p1 = bf16[96,64,512]{2,1,0} parameter(1) ROOT d = bf16[96,512,512]{2,1,0} dot(cp, p1), lhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_batch_dims={0}, rhs_contracting_dims={1} } ENTRY e { p0 = bf16[96,512,64]{1,2,0} parameter(0) p1 = bf16[96,64,512]{2,1,0} parameter(1) ROOT r = bf16[96,512,512]{2,1,0} fusion(p0, p1), kind=kCustom, calls=fusion1, backend_config={"fusion_backend_config": {kind :"__cudnn$fusion"}} })", ErrorSpec{1e-2, 1e-3})); } TEST_F(CuDnnFusionExecutionTest, DotBF16BF16F32ExecutesCorrectly) { EXPECT_TRUE(RunAndCompare(R"( fusion1 { p0 = bf16[16,32,128] parameter(0) p1 = bf16[16,128,64] parameter(1) ROOT r = f32[16,32,64] dot(p0, p1), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1} } ENTRY e { p0 = bf16[16,32,128] parameter(0) p1 = bf16[16,128,64] parameter(1) ROOT _ = f32[16,32,64] fusion(p0, p1), kind=kCustom, calls=fusion1, backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}} })", ErrorSpec{1e-6, 1e-6})); } TEST_F(CuDnnFusionExecutionTest, DotF32WithOutputSubtractionExecutesCorrectly) { EXPECT_TRUE(RunAndCompare(R"( fusion1 { p0 = f32[9,32,96] parameter(0) p1 = f32[9,96,64] parameter(1) d = f32[9,32,64] dot(p0, p1), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1} p2 = f32[9,32,64] parameter(2) ROOT s = f32[9,32,64] subtract(p2, d) } ENTRY e { p0 = f32[9,32,96] parameter(0) p1 = f32[9,96,64] parameter(1) p2 = f32[9,32,64] parameter(2) ROOT _ = f32[9,32,64] fusion(p0, p1, p2), kind=kCustom, calls=fusion1, backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}} })", ErrorSpec{1e-3, 1e-3})); } TEST_F(CuDnnFusionExecutionTest, DotWithNonDefaultLayoutsExecutesCorrectly) { EXPECT_TRUE(RunAndCompare(R"( fusion1 { p0 = bf16[32,32]{0,1} parameter(0) p1 = bf16[32,32]{1,0} parameter(1) ROOT r = bf16[32,32]{0,1} dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={1} } ENTRY e { p0 = bf16[32,32]{0,1} parameter(0) p1 = bf16[32,32]{1,0} parameter(1) ROOT _ = bf16[32,32]{0,1} fusion(p0, p1), kind=kCustom, calls=fusion1, backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}} })", ErrorSpec{1e-4, 1e-4})); } TEST_F(CuDnnFusionExecutionTest, RHSFusionExecutesCorrectly) { EXPECT_TRUE(RunAndCompare(R"( fusion1 { p0 = bf16[5,32,96] parameter(0) p1 = s8[5,96,16] parameter(1) p1c = bf16[5,96,16] convert(p1) ROOT r = bf16[5,32,16] dot(p0, p1c), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1} } ENTRY e { p0 = bf16[5,32,96] parameter(0) p1 = s8[5,96,16] parameter(1) ROOT _ = bf16[5,32,16] fusion(p0, p1), kind=kCustom, calls=fusion1, backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}} })", ErrorSpec{1e-3, 1e-3})); } TEST_F(CuDnnFusionExecutionTest, SkipNonDefaultPrecision) { EXPECT_FALSE(Run(R"( t { p0 = f32[27,23] parameter(0) p0c = s8[27,23] convert(p0) p0cc = f32[27,23] convert(p0c) p1 = f32[23,21] parameter(1) ROOT r = f32[27,21] dot(p0cc, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0}, operand_precision={HIGH, HIGH} } ENTRY e { p0 = f32[27,23] parameter(0) p1 = f32[23,21] parameter(1) ROOT r = f32[27,21] fusion(p0, p1), kind=kCustom, calls=t, backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}} })")); } TEST_F(CuDnnFusionExecutionTest, DotF16NegateNonDefaultDimensionsExecutesCorrectly) { EXPECT_TRUE(RunAndCompare(R"( fusion1 { p0 = f16[16,32,96] parameter(0) p0n = f16[16,32,96] negate(p0) p1 = f16[16,64,96] parameter(1) ROOT r = f16[16,32,64] dot(p0n, p1), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={2} } ENTRY e { p0 = f16[16,32,96] parameter(0) p1 = f16[16,64,96] parameter(1) ROOT _ = f16[16,32,64] fusion(p0, p1), kind=kCustom, calls=fusion1, backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}} })", ErrorSpec{1e-3, 1e-3})); } TEST_F(CuDnnFusionExecutionTest, DotS8BF16ExecutesCorrectly) { EXPECT_TRUE(RunAndCompare(R"( fusion1 { p0 = s8[5,32,96] parameter(0) p0c = bf16[5,32,96] convert(p0) p1 = bf16[5,96,16] parameter(1) ROOT r = bf16[5,32,16] dot(p0c, p1), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1} } ENTRY e { p0 = s8[5,32,96] parameter(0) p1 = bf16[5,96,16] parameter(1) ROOT _ = bf16[5,32,16] fusion(p0, p1), kind=kCustom, calls=fusion1, backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}} })", ErrorSpec{1e-5, 1e-5})); } TEST_F(CuDnnFusionExecutionTest, IntegerMathExecutesCorrectly) { if (!IsAtLeastCuDnn91()) { GTEST_SKIP() << "Integer math requires cuDNN 9.1+."; } const std::string kHloText = R"( fusion1 { p0 = s8[16,16] parameter(0) p1 = s8[16,16] parameter(1) d = s32[16,16] dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} p2 = s32[16,16] parameter(2) ROOT a = s32[16,16] add(d, p2) } ENTRY e { p0 = s8[16,16] parameter(0) p1 = s8[16,16] parameter(1) p2 = s32[16,16] parameter(2) ROOT r = s32[16,16] fusion(p0, p1, p2), kind=kCustom, calls=fusion1, backend_config={"fusion_backend_config": {"kind":"__cudnn$fusion"}} })"; EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{0, 0})); } class CuDnnFusionCommandBufferTest : public CuDnnFusionTest { public: DebugOptions GetDebugOptionsForTest() override { DebugOptions debug_options = CuDnnFusionTest::GetDebugOptionsForTest(); debug_options.set_xla_gpu_graph_min_graph_size(1); return debug_options; } }; TEST_F(CuDnnFusionCommandBufferTest, CommandBuffersAreSupported) { const std::string kHloText = R"( fd0 { p0 = f32[64,64]{1,0} parameter(0) p1 = f32[64,64]{1,0} parameter(1) ROOT d = f32[64,64]{1,0} dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} } fd1 { p0 = f32[64,64]{1,0} parameter(0) p1 = f32[64,64]{1,0} parameter(1) ROOT d = f32[64,64]{1,0} dot(p0, p1), lhs_contracting_dims={0}, rhs_contracting_dims={1} } ENTRY e { p0 = f32[64,64]{1,0} parameter(0) p1 = f32[64,64]{1,0} parameter(1) d0 = f32[64,64]{1,0} fusion(p0, p1), kind=kCustom, calls=fd0, backend_config={"fusion_backend_config":{"kind":"__cudnn$fusion","cudnn_fusion_config":{"plan_id":"0"}}} a = f32[64,64]{1,0} add(d0, d0) ROOT d1 = f32[64,64]{1,0} fusion(a, d0), kind=kCustom, calls=fd1, backend_config={"fusion_backend_config":{"kind":"__cudnn$fusion","cudnn_fusion_config":{"plan_id":"0"}}} })"; se::StreamExecutorMemoryAllocator allocator( backend().default_stream_executor()); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<Executable> executable, backend().compiler()->RunBackend( GetOptimizedModule(kHloText).value(), backend().default_stream_executor(), &allocator)); absl::StatusOr<bool> filecheck_result = RunFileCheck(executable->module().ToString(), R"( ; CHECK: ENTRY ; CHECK-NEXT: parameter ; CHECK-NEXT: parameter ; CHECK: command_buffer ; CHECK-NOT: fusion )"); TF_ASSERT_OK(filecheck_result.status()); EXPECT_TRUE(filecheck_result.value()); EXPECT_TRUE(RunAndCompare(kHloText, ErrorSpec{1e-3, 1e-3})); } class CuDnnFusionLevel2Test : public CuDnnFusionExecutionTest { public: DebugOptions GetDebugOptionsForTest() override { DebugOptions debug_options = CuDnnFusionExecutionTest::GetDebugOptionsForTest(); debug_options.set_xla_gpu_cudnn_gemm_fusion_level(2); return debug_options; } }; TEST_F(CuDnnFusionLevel2Test, BroadcastToDim2ExecutesCorrectly) { EXPECT_TRUE(RunAndCompare(R"( fusion1 { p0 = f16[16,32,128] parameter(0) p1 = f16[16,128,64] parameter(1) p2 = f16[16,32] parameter(2) p2b = f16[16,32,128] broadcast(p2), dimensions={0,1} a = f16[16,32,128] add(p0, p2b) ROOT r = f16[16,32,64] dot(a, p1), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1} } ENTRY e { p0 = f16[16,32,128] parameter(0) p1 = f16[16,128,64] parameter(1) p2 = f16[16,32] parameter(2) ROOT _ = f16[16,32,64] fusion(p0, p1, p2), kind=kCustom, calls=fusion1, backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}} })", ErrorSpec{1e-3, 1e-3})); } TEST_F(CuDnnFusionLevel2Test, BroadcastToDim1ExecutesCorrectly) { EXPECT_TRUE(RunAndCompare(R"( fusion1 { p0 = f16[16,32,128] parameter(0) p1 = f16[16,128,64] parameter(1) p2 = f16[16,128] parameter(2) p2b = f16[16,32,128] broadcast(p2), dimensions={0,2} a = f16[16,32,128] add(p0, p2b) ROOT r = f16[16,32,64] dot(a, p1), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1} } ENTRY e { p0 = f16[16,32,128] parameter(0) p1 = f16[16,128,64] parameter(1) p2 = f16[16,128] parameter(2) ROOT _ = f16[16,32,64] fusion(p0, p1, p2), kind=kCustom, calls=fusion1, backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}} })", ErrorSpec{1e-3, 1e-3})); } TEST_F(CuDnnFusionLevel2Test, BroadcastToDim0ExecutesCorrectly) { EXPECT_TRUE(RunAndCompare(R"( fusion1 { p0 = bf16[32,128] parameter(0) p0b = bf16[5,32,128] broadcast(p0), dimensions={1,2} p1 = bf16[5,128,64] parameter(1) ROOT r = f32[5,32,64] dot(p0b, p1), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1} } ENTRY e { p0 = bf16[32,128] parameter(0) p1 = bf16[5,128,64] parameter(1) ROOT _ = f32[5,32,64] fusion(p0, p1), kind=kCustom, calls=fusion1, backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}} })", ErrorSpec{1e-3, 1e-3})); } TEST_F(CuDnnFusionLevel2Test, BroadcastTo2DimsExecutesCorrectly) { EXPECT_TRUE(RunAndCompare(R"( fusion1 { p0 = f16[16,32,128] parameter(0) p1 = f16[16,128,64] parameter(1) p2 = f16[128] parameter(2) p2b = f16[16,32,128] broadcast(p2), dimensions={2} a = f16[16,32,128] add(p0, p2b) ROOT r = f16[16,32,64] dot(a, p1), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1} } ENTRY e { p0 = f16[16,32,128] parameter(0) p1 = f16[16,128,64] parameter(1) p2 = f16[128] parameter(2) ROOT _ = f16[16,32,64] fusion(p0, p1, p2), kind=kCustom, calls=fusion1, backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}} })", ErrorSpec{1e-3, 1e-3})); } TEST_F(CuDnnFusionLevel2Test, BroadcastTo3DimsExecutesCorrectly) { EXPECT_TRUE(RunAndCompare(R"( fusion1 { p0 = f16[16,32,128] parameter(0) p1 = f16[16,128,64] parameter(1) p2 = f16[] parameter(2) p2b = f16[16,32,128] broadcast(p2), dimensions={} a = f16[16,32,128] add(p0, p2b) ROOT r = f16[16,32,64] dot(a, p1), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1} } ENTRY e { p0 = f16[16,32,128] parameter(0) p1 = f16[16,128,64] parameter(1) p2 = f16[] parameter(2) ROOT _ = f16[16,32,64] fusion(p0, p1, p2), kind=kCustom, calls=fusion1, backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}} })", ErrorSpec{1e-3, 1e-3})); } TEST_F(CuDnnFusionLevel2Test, ConstantExecutesCorrectly) { if (!IsAtLeastCuDnn91()) { GTEST_SKIP() << "Fused scalar constants require cuDNN 9.1+."; } EXPECT_TRUE(RunAndCompare(R"( fusion1 { x = bf16[16,32] parameter(0) y = bf16[32,16] parameter(1) x_const = bf16[] constant(-1) y_const = s32[] constant(-2) x_const_bcast = bf16[16,32] broadcast(x_const), dimensions={} y_const_bcast = s32[32,16] broadcast(y_const), dimensions={} y_const_convert = bf16[32,16] convert(y_const_bcast) x_add = bf16[16,32] minimum(x, x_const_bcast) y_add = bf16[32,16] minimum(y, y_const_convert) dot_a = f32[16,16] dot(x_add, y_add), lhs_contracting_dims={1}, rhs_contracting_dims={0} c = f32[] constant(0) c_bcast = f32[16,16] broadcast(c), dimensions={} ROOT out = f32[16,16] maximum(dot_a, c_bcast) } ENTRY e { p0 = bf16[16,32] parameter(0) p1 = bf16[32,16] parameter(1) ROOT _ = f32[16,16] fusion(p0, p1), kind=kCustom, calls=fusion1, backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}} })", ErrorSpec{1e-3, 1e-3})); } TEST_F(CuDnnFusionLevel2Test, ClampExecutesCorrectly) { if (!IsAtLeastCuDnn91()) { GTEST_SKIP() << "Clamp test requires cuDNN 9.1+."; } EXPECT_TRUE(RunAndCompare(R"( fusion1 { x = bf16[16,32] parameter(0) y = bf16[32,16] parameter(1) x_const_lower = bf16[] constant(3e-3) x_const_upper = bf16[] constant(1e-1) y_const_lower = bf16[] constant(3e-3) y_const_upper = bf16[] constant(1e-1) x_const_bcast_lower = bf16[16,32] broadcast(x_const_lower), dimensions={} x_const_bcast_upper = bf16[16,32] broadcast(x_const_upper), dimensions={} y_const_bcast_lower = bf16[32,16] broadcast(y_const_lower), dimensions={} y_const_bcast_upper = bf16[32,16] broadcast(y_const_upper), dimensions={} x_clamp = bf16[16,32] clamp(x_const_bcast_lower, x, x_const_bcast_upper) y_clamp = bf16[32,16] clamp(y_const_bcast_lower, y, y_const_bcast_upper) ROOT dot_a = f32[16,16] dot(x_clamp, y_clamp), lhs_contracting_dims={1}, rhs_contracting_dims={0} } ENTRY e { p0 = bf16[16,32] parameter(0) p1 = bf16[32,16] parameter(1) ROOT _ = f32[16,16] fusion(p0, p1), kind=kCustom, calls=fusion1, backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}} })", ErrorSpec{1e-3, 1e-3})); } TEST_F(CuDnnFusionLevel2Test, DotF8ExecutesCorrectly) { EXPECT_TRUE(RunAndCompare(R"( fusion1 { x = f8e4m3fn[16,32] parameter(0) y = f8e4m3fn[32,16] parameter(1) dot = f32[16,16] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} x_scale = f32[] parameter(2) y_scale = f32[] parameter(3) combined_scale = f32[] multiply(x_scale, y_scale) scale_bcast = f32[16,16] broadcast(combined_scale), dimensions={} ROOT out = f32[16,16] multiply(dot, scale_bcast) } ENTRY e { p0 = f8e4m3fn[16,32] parameter(0) p1 = f8e4m3fn[32,16] parameter(1) x_scale = f32[] parameter(2) y_scale = f32[] parameter(3) ROOT _ = f32[16,16] fusion(p0, p1, x_scale, y_scale), kind=kCustom, calls=fusion1, backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}} })", ErrorSpec{1e-3, 1e-3})); } TEST_F(CuDnnFusionLevel2Test, SlicingExecutesCorrectly) { EXPECT_TRUE(RunAndCompare(R"( fusion1 { p0 = f16[11,23,64] parameter(0) s0 = f16[8,16,64] slice(p0), slice={[1:9], [3:19], [0:64]} p1 = f16[8,64,32] parameter(1) ROOT r = f16[8,16,32] dot(s0, p1), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1} } ENTRY e { p0 = f16[11,23,64] parameter(0) p1 = f16[8,64,32] parameter(1) ROOT _ = f16[8,16,32] fusion(p0, p1), kind=kCustom, calls=fusion1, backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}} })", ErrorSpec{1e-3, 1e-3})); } class CuDnnFusionLevel3Test : public CuDnnFusionExecutionTest { public: DebugOptions GetDebugOptionsForTest() override { DebugOptions debug_options = CuDnnFusionExecutionTest::GetDebugOptionsForTest(); debug_options.set_xla_gpu_cudnn_gemm_fusion_level(3); return debug_options; } }; TEST_F(CuDnnFusionLevel3Test, DotWithSplitNonContractingInputExecutesCorrectly) { EXPECT_TRUE(RunAndCompare(R"( fusion1 { p0 = s8[4,3,16,400]{2,1,3,0} parameter(0) cp0 = s8[4,3,16,400]{3,2,1,0} copy(p0) bc0 = s8[192,400]{1,0} bitcast(cp0) cvt0 = bf16[192,400]{1,0} convert(bc0) p1 = bf16[1,128,400]{2,1,0} parameter(1) bc1 = bf16[128,400]{1,0} reshape(p1) ROOT d = bf16[192,128]{1,0} dot(cvt0, bc1), lhs_contracting_dims={1}, rhs_contracting_dims={1} } ENTRY r { p0 = s8[4,3,16,400]{2,1,3,0} parameter(0) p1 = bf16[1,128,400]{2,1,0} parameter(1) ROOT r = bf16[192,128]{1,0} fusion(p0, p1), kind=kCustom, calls=fusion1, backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}} })", ErrorSpec{1, 1e-3})); } TEST_F(CuDnnFusionLevel3Test, DotWithSplitNonContractingInOutExecutesCorrectly) { EXPECT_TRUE(RunAndCompare(R"( fusion1 { p0 = s8[4,3,16,400]{2,1,3,0} parameter(0) cp0 = s8[4,3,16,400]{3,2,1,0} copy(p0) bc0 = s8[192,400]{1,0} bitcast(cp0) cvt0 = bf16[192,400]{1,0} convert(bc0) p1 = bf16[1,128,400]{2,1,0} parameter(1) bc1 = bf16[128,400]{1,0} reshape(p1) d = bf16[192,128]{1,0} dot(cvt0, bc1), lhs_contracting_dims={1}, rhs_contracting_dims={1} bc = bf16[4,3,16,128]{3,2,1,0} bitcast(d) ROOT cp = bf16[4,3,16,128]{2,1,3,0} copy(bc) } ENTRY r { p0 = s8[4,3,16,400]{2,1,3,0} parameter(0) p1 = bf16[1,128,400]{2,1,0} parameter(1) ROOT r = bf16[4,3,16,128]{2,1,3,0} fusion(p0, p1), kind=kCustom, calls=fusion1, backend_config={"fusion_backend_config": {kind: "__cudnn$fusion"}} })", ErrorSpec{1, 1e-3})); } class ElementwiseTest : public CuDnnFusionExecutionTest, public ::testing::WithParamInterface< std::tuple<PrimitiveType, HloOpcode, float>> {}; std::string ElementwiseTestParamsToString( const ::testing::TestParamInfo<std::tuple<PrimitiveType, HloOpcode, float>>& data) { PrimitiveType data_type; HloOpcode opcode; float tolerance; std::tie(data_type, opcode, tolerance) = data.param; return absl::StrCat( primitive_util::LowercasePrimitiveTypeName(data_type), "_", absl::StrReplaceAll(HloOpcodeString(opcode), {{"-", "_"}})); } using UnaryElementwiseTest = ElementwiseTest; TEST_P(UnaryElementwiseTest, ElementwiseFusionExecutesCorrectly) { PrimitiveType data_type; HloOpcode opcode; float tolerance; std::tie(data_type, opcode, tolerance) = GetParam(); const std::string kHloTemplate = R"( fusion_computation { p0 = f32[32,32] parameter(0) p1 = $0[32,32] parameter(1) f1.1 = $0[32,32] $1(p1) c.1 = f32[32,32] convert(f1.1) ROOT _ = f32[32,32] dot(p0, c.1), lhs_contracting_dims={1}, rhs_contracting_dims={0} } ENTRY e { p1 = $0[32,32] parameter(1) p0 = f32[32,32] parameter(0) ROOT r = f32[32,32] fusion(p0, p1), kind=kCustom, calls=fusion_computation, backend_config={"fusion_backend_config":{"kind":"__cudnn$$fusion"}} })"; const std::string hlo_test = absl::Substitute( kHloTemplate, primitive_util::LowercasePrimitiveTypeName(data_type), HloOpcodeString(opcode)); EXPECT_TRUE(RunAndCompare(hlo_test, ErrorSpec{tolerance, tolerance})); } INSTANTIATE_TEST_SUITE_P( ElementwiseTestSuiteF32, UnaryElementwiseTest, ::testing::Combine(::testing::Values(F32), ::testing::ValuesIn( {HloOpcode::kAbs, HloOpcode::kCeil, HloOpcode::kCos, HloOpcode::kExp, HloOpcode::kFloor, HloOpcode::kLog, HloOpcode::kNegate, HloOpcode::kRsqrt, HloOpcode::kSin, HloOpcode::kSqrt, HloOpcode::kTan, HloOpcode::kTanh}), ::testing::Values(1e-3)), ElementwiseTestParamsToString); using BinaryElementwiseTest = ElementwiseTest; TEST_P(BinaryElementwiseTest, ElementwiseFusionExecutesCorrectly) { PrimitiveType data_type; HloOpcode opcode; float tolerance; std::tie(data_type, opcode, tolerance) = GetParam(); const std::string kHloTemplate = R"( fusion_computation { p0 = f32[32,32] parameter(0) p1 = $0[32,32] parameter(1) p2 = $0[32,32] parameter(2) f1.1 = $0[32,32] $1(p1, p2) c.1 = f32[32,32] convert(f1.1) ROOT _ = f32[32,32] dot(p0, c.1), lhs_contracting_dims={1}, rhs_contracting_dims={0} } ENTRY e { p0 = f32[32,32] parameter(0) p1 = $0[32,32] parameter(1) p2 = $0[32,32] parameter(2) ROOT r = f32[32,32] fusion(p0, p1, p2), kind=kCustom, calls=fusion_computation, backend_config={"fusion_backend_config":{"kind":"__cudnn$$fusion"}} })"; const std::string hlo_test = absl::Substitute( kHloTemplate, primitive_util::LowercasePrimitiveTypeName(data_type), HloOpcodeString(opcode)); EXPECT_TRUE(RunAndCompare(hlo_test, ErrorSpec{tolerance, tolerance})); } INSTANTIATE_TEST_SUITE_P( ElementwiseTestSuiteF32, BinaryElementwiseTest, ::testing::Combine( ::testing::Values(F32), ::testing::ValuesIn({HloOpcode::kAdd, HloOpcode::kDivide, HloOpcode::kMaximum, HloOpcode::kMinimum, HloOpcode::kMultiply, HloOpcode::kPower, HloOpcode::kSubtract}), ::testing::Values(3e-3)), ElementwiseTestParamsToString); class CompareTest : public CuDnnFusionExecutionTest, public ::testing::WithParamInterface< std::tuple<PrimitiveType, Comparison::Direction>> {}; std::string CompareTestParamsToString( const ::testing::TestParamInfo< std::tuple<PrimitiveType, Comparison::Direction>>& data) { PrimitiveType data_type; Comparison::Direction direction; std::tie(data_type, direction) = data.param; return absl::StrCat(primitive_util::LowercasePrimitiveTypeName(data_type), "_", ComparisonDirectionToString(direction)); } TEST_P(CompareTest, FusedComparisonExecutesCorrectly) { PrimitiveType data_type; Comparison::Direction direction; std::tie(data_type, direction) = GetParam(); const std::string kHloTemplate = R"( fusion_computation { p0 = f32[32,32] parameter(0) p1 = $0[32,32] parameter(1) p2 = $0[32,32] parameter(2) f1.1 = pred[32,32] compare(p1, p2), direction=$1 c.1 = f32[32,32] convert(f1.1) ROOT _ = f32[32,32] dot(p0, c.1), lhs_contracting_dims={1}, rhs_contracting_dims={0} } ENTRY e { p0 = f32[32,32] parameter(0) p1 = $0[32,32] parameter(1) p2 = $0[32,32] parameter(2) ROOT r = f32[32,32] fusion(p0, p1, p2), kind=kCustom, calls=fusion_computation, backend_config={"fusion_backend_config":{"kind":"__cudnn$$fusion"}} })"; const std::string hlo_test = absl::Substitute( kHloTemplate, primitive_util::LowercasePrimitiveTypeName(data_type), ComparisonDirectionToString(direction)); EXPECT_TRUE(RunAndCompare(hlo_test, ErrorSpec{1e-3, 1e-3})); } using cd = Comparison::Direction; INSTANTIATE_TEST_SUITE_P( CompareTestSuite, CompareTest, ::testing::Combine(::testing::Values(PRED, S8, S32, F16, F32), ::testing::Values(cd::kEq, cd::kNe, cd::kGe, cd::kGt, cd::kLe, cd::kLt)), CompareTestParamsToString); class SelectTest : public CuDnnFusionExecutionTest, public ::testing::WithParamInterface<PrimitiveType> {}; TEST_P(SelectTest, SelectFusionExecutesCorrectly) { if (!IsAtLeastCuDnn91()) { GTEST_SKIP() << "Select operation requires cuDNN 9.1+."; } const std::string kHloTemplate = R"( fusion_computation { p0 = f32[32,32] parameter(0) p1 = $0[32,32] parameter(1) p2 = $0[32,32] parameter(2) p3 = pred[32,32] parameter(3) s = $0[32,32] select(p3, p1, p2) c = f32[32,32] convert(s) ROOT r = f32[32,32] dot(p0, c), lhs_contracting_dims={1}, rhs_contracting_dims={0} } ENTRY e { p0 = f32[32,32] parameter(0) p1 = $0[32,32] parameter(1) p2 = $0[32,32] parameter(2) p3 = pred[32,32] parameter(3) ROOT r = f32[32,32] fusion(p0, p1, p2, p3), kind=kCustom, calls=fusion_computation, backend_config={"fusion_backend_config":{"kind":"__cudnn$$fusion"}} })"; const std::string hlo_test = absl::Substitute( kHloTemplate, primitive_util::LowercasePrimitiveTypeName(GetParam())); EXPECT_TRUE(RunAndCompare(hlo_test, ErrorSpec{1e-4, 1e-4})); } constexpr std::array<PrimitiveType, 3> kSupportedDataTypes{F16, F32, BF16}; INSTANTIATE_TEST_SUITE_P(SelectTestSuite, SelectTest, ::testing::ValuesIn(kSupportedDataTypes)); class CuDnnFusionRewriteTest : public CuDnnFusionTest { public: DebugOptions GetDebugOptionsForTest() override { DebugOptions debug_options = CuDnnFusionTest::GetDebugOptionsForTest(); debug_options.set_xla_gpu_autotune_level( GetDebugOptionsFromFlags().xla_gpu_autotune_level()); debug_options.set_xla_gpu_cudnn_gemm_fusion_level(1); debug_options.set_xla_gpu_cublas_fallback(false); return debug_options; } }; TEST_F(CuDnnFusionRewriteTest, DoNotExecuteGemmFusionWithCuDnnWhenNotSupported) { MatchOptimizedHlo(R"( ENTRY e { p0 = f16[20,40,61] parameter(0) p0n = f16[20,40,61] negate(p0) p1 = f16[20,80,61] parameter(1) ROOT r = f16[20,40,80] dot(p0n, p1), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={2} })", R"( ; CHECK: ENTRY ; CHECK-NEXT: parameter ; CHECK-NEXT: parameter ; CHECK-NEXT: ROOT ; CHECK-SAME: fusion ; CHECK-NOT: cudnn )"); } TEST_F(CuDnnFusionRewriteTest, AutotuningPicksCuDnnForS8BF16OnHopper) { MatchOptimizedHlo(R"( e { p0 = bf16[720,720,720] parameter(0) p1 = s8[720,720,720] parameter(1) c = bf16[720,720,720] convert(p1) ROOT d = bf16[720,720,720] dot(p0, c), lhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_batch_dims={0}, rhs_contracting_dims={1} })", R"( ; CHECK: __cudnn$fusion )"); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/cudnn.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/cudnn_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
9e553563-1ec2-459c-b1c0-e361301415c2
cpp
tensorflow/tensorflow
copy
third_party/xla/xla/service/gpu/fusions/copy.cc
third_party/xla/xla/tests/copy_test.cc
#include "xla/service/gpu/fusions/copy.h" #include <memory> #include <vector> #include "absl/status/status.h" #include "absl/status/statusor.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/service/buffer_assignment.h" #include "xla/service/gpu/fusions/fusion_emitter.h" #include "xla/service/gpu/hlo_traversal.h" #include "xla/service/gpu/ir_emitter_context.h" #include "xla/service/gpu/runtime/copy_thunk.h" #include "xla/service/gpu/runtime/thunk.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { absl::StatusOr<FusionEmissionResult> MemcpyFusion::Emit( IrEmitterContext& ir_emitter_context, const HloFusionInstruction& fusion) const { std::vector<BufferAllocation::Slice> src_buffers; for (const HloInstructionAdaptor& root_adaptor : analysis_.fusion_roots()) { const HloInstruction* root = &root_adaptor.instruction(); const HloInstruction* src_instr = fusion.operand(root->operand(0)->parameter_number()); TF_ASSIGN_OR_RETURN(BufferAllocation::Slice slice, buffer_assignment_->GetUniqueSlice(src_instr, {})); src_buffers.push_back(slice); } std::vector<BufferAllocation::Slice> dst_buffers; TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus( fusion.shape(), [&](const Shape& subshape, const ShapeIndex& index) { if (!subshape.IsArray()) { return absl::OkStatus(); } TF_ASSIGN_OR_RETURN(BufferAllocation::Slice slice, buffer_assignment_->GetUniqueSlice(&fusion, index)); dst_buffers.push_back(slice); return absl::OkStatus(); })); FusionEmissionResult result; for (int i = 0; i < src_buffers.size(); ++i) { if (src_buffers[i] != dst_buffers[i]) { result.thunks.emplace_back(std::make_unique<DeviceToDeviceCopyThunk>( Thunk::ThunkInfo::WithProfileAnnotation(&fusion), src_buffers[i], dst_buffers[i], src_buffers[i].size())); } } return result; } } }
#include <cstddef> #include <cstdint> #include <memory> #include <utility> #include <vector> #include <gtest/gtest.h> #include "absl/types/span.h" #include "xla/array3d.h" #include "xla/array4d.h" #include "xla/hlo/builder/xla_builder.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/layout.h" #include "xla/layout_util.h" #include "xla/literal.h" #include "xla/literal_util.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/tests/client_library_test_base.h" #include "xla/tests/hlo_test_base.h" #include "xla/tests/literal_test_util.h" #include "xla/tests/test_macros.h" #include "xla/xla_data.pb.h" #include "tsl/platform/test.h" namespace xla { namespace { class CopyOpTest : public HloTestBase { protected: void TestCopyOp(const Literal& literal) { auto builder = HloComputation::Builder(TestName()); auto constant = builder.AddInstruction(HloInstruction::CreateConstant(literal.Clone())); builder.AddInstruction(HloInstruction::CreateUnary( constant->shape(), HloOpcode::kCopy, constant)); auto computation = builder.Build(); auto module = CreateNewVerifiedModule(); module->AddEntryComputation(std::move(computation)); Literal result = ExecuteAndTransfer(std::move(module), {}); EXPECT_TRUE(LiteralTestUtil::Equal(literal, result)); } void TestDynamicCopyOp(const Literal& literal, const Shape& bounded_shape) { Literal dynamic_literal = literal.ToBoundedDynamic(bounded_shape); auto builder = HloComputation::Builder(TestName()); auto parameter = builder.AddInstruction( HloInstruction::CreateParameter(0, dynamic_literal.shape(), "param")); builder.AddInstruction(HloInstruction::CreateUnary( parameter->shape(), HloOpcode::kCopy, parameter)); auto computation = builder.Build(); auto module = CreateNewVerifiedModule(); module->AddEntryComputation(std::move(computation)); std::vector<Literal*> args = {&dynamic_literal}; Literal result = ExecuteAndTransfer(std::move(module), args); Literal dynamic_result = result.ToBoundedDynamic(bounded_shape); EXPECT_TRUE(LiteralTestUtil::Equal(dynamic_literal, dynamic_result)); } void TestCopyConstantLayout021(size_t n1, size_t n2, size_t n3); void TestCopyConstantLayoutR4(size_t n1, size_t n2, size_t n3, size_t n4, absl::Span<const int64_t> permutation); }; XLA_TEST_F(CopyOpTest, CopyR0Bool) { TestCopyOp(LiteralUtil::CreateR0<bool>(true)); } XLA_TEST_F(CopyOpTest, CopyR1S0U32) { TestCopyOp(LiteralUtil::CreateR1<uint32_t>({})); } XLA_TEST_F(CopyOpTest, CopyR1S3U32) { TestCopyOp(LiteralUtil::CreateR1<uint32_t>({1, 2, 3})); } XLA_TEST_F(CopyOpTest, CopyDynamicR1S1310720U32Dynamic0) { if (backend().platform()->Name() == "Host") { GTEST_SKIP(); } Shape bounded_shape = ShapeUtil::MakeShape(PrimitiveType::F32, {1310720}, {true}); TestDynamicCopyOp(LiteralUtil::CreateRandomLiteral<PrimitiveType::F32>( ShapeUtil::MakeShape(PrimitiveType::F32, {0}), 0, 1) .value(), bounded_shape); } XLA_TEST_F(CopyOpTest, CopyDynamicR1S1310720U32Dynamic106632) { if (backend().platform()->Name() == "Host") { GTEST_SKIP(); } Shape bounded_shape = ShapeUtil::MakeShape(PrimitiveType::F32, {1310720}, {true}); TestDynamicCopyOp( LiteralUtil::CreateRandomLiteral<PrimitiveType::F32>( ShapeUtil::MakeShape(PrimitiveType::F32, {106632}), 0, 1) .value(), bounded_shape); } XLA_TEST_F(CopyOpTest, CopyDynamicR1S1310720U32Dynamic1310720) { if (backend().platform()->Name() == "Host") { GTEST_SKIP(); } Shape bounded_shape = ShapeUtil::MakeShape(PrimitiveType::F32, {1310720}, {true}); TestDynamicCopyOp( LiteralUtil::CreateRandomLiteral<PrimitiveType::F32>( ShapeUtil::MakeShape(PrimitiveType::F32, {1310720}), 0, 1) .value(), bounded_shape); } XLA_TEST_F(CopyOpTest, CopyDynamicR1S512U32Dynamic64) { if (backend().platform()->Name() == "Host") { GTEST_SKIP(); } Shape bounded_shape = ShapeUtil::MakeShape(PrimitiveType::F32, {512}, {true}); TestDynamicCopyOp(LiteralUtil::CreateRandomLiteral<PrimitiveType::F32>( ShapeUtil::MakeShape(PrimitiveType::F32, {64}), 0, 1) .value(), bounded_shape); } XLA_TEST_F(CopyOpTest, CopyR3F32_2x2x3) { TestCopyOp(LiteralUtil::CreateR3({{{1.0f, 2.0f, 3.0f}, {4.0f, 5.0f, 6.0f}}, {{1.1f, 2.1f, 3.1f}, {6.1f, 3.5f, 2.8f}}})); } XLA_TEST_F(CopyOpTest, CopyR4S32_2x2x3x2) { TestCopyOp(LiteralUtil::CreateR4( {{{{1, -2}, {-4, 5}, {6, 7}}, {{8, 9}, {10, 11}, {12, 13}}}, {{{10, 3}, {7, -2}, {3, 6}}, {{2, 5}, {-11, 5}, {-2, -5}}}})); } XLA_TEST_F(CopyOpTest, CopyR4S32_0x2x3x2) { TestCopyOp(LiteralUtil::CreateR4FromArray4D(Array4D<int32_t>(0, 2, 3, 2))); } XLA_TEST_F(CopyOpTest, CopyParameterScalar) { auto builder = HloComputation::Builder(TestName()); auto literal = LiteralUtil::CreateR0<float>(42.0); Shape shape = literal.shape(); auto param0 = builder.AddInstruction( HloInstruction::CreateParameter(0, shape, "param0")); builder.AddInstruction( HloInstruction::CreateUnary(shape, HloOpcode::kCopy, param0)); auto computation = builder.Build(); auto module = CreateNewVerifiedModule(); module->AddEntryComputation(std::move(computation)); Literal result = ExecuteAndTransfer(std::move(module), {&literal}); LiteralTestUtil::ExpectR0Near<float>(42.0f, result, error_spec_); } XLA_TEST_F(CopyOpTest, CopyConstantR2Twice) { auto builder = HloComputation::Builder(TestName()); auto literal = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}); auto constant = builder.AddInstruction( HloInstruction::CreateConstant(std::move(literal))); auto copy = builder.AddInstruction(HloInstruction::CreateUnary( constant->shape(), HloOpcode::kCopy, constant)); builder.AddInstruction( HloInstruction::CreateUnary(copy->shape(), HloOpcode::kCopy, copy)); auto computation = builder.Build(); auto module = CreateNewVerifiedModule(); module->AddEntryComputation(std::move(computation)); Literal result = ExecuteAndTransfer(std::move(module), {}); LiteralTestUtil::ExpectR2Near<float>({{1.0, 2.0}, {3.0, 4.0}}, result, error_spec_); } XLA_TEST_F(CopyOpTest, CopyConstantR2DifferentLayouts) { HloComputation::Builder builder(TestName()); Literal literal = LiteralUtil::CreateR2<float>({{1.0, 2.0}, {3.0, 4.0}}); Layout* literal_layout = literal.mutable_shape_do_not_use()->mutable_layout(); ASSERT_EQ(2, literal_layout->minor_to_major_size()); *literal_layout->mutable_minor_to_major() = { literal_layout->minor_to_major(1), literal_layout->minor_to_major(0)}; HloInstruction* constant = builder.AddInstruction( HloInstruction::CreateConstant(std::move(literal))); builder.AddInstruction(HloInstruction::CreateUnary( constant->shape(), HloOpcode::kCopy, constant)); std::unique_ptr<HloComputation> computation = builder.Build(); auto module = CreateNewVerifiedModule(); module->AddEntryComputation(std::move(computation)); Literal result = ExecuteAndTransfer(std::move(module), {}); LiteralTestUtil::ExpectR2Near<float>({{1.0, 3.0}, {2.0, 4.0}}, result, error_spec_); } void CopyOpTest::TestCopyConstantLayout021(size_t n1, size_t n2, size_t n3) { Array3D<int32_t> a(n1, n2, n3); for (size_t i = 0; i < n1; ++i) { for (size_t j = 0; j < n2; ++j) { for (size_t k = 0; k < n3; ++k) { a(i, j, k) = i * n3 * n2 + j * n3 + k; } } } HloComputation::Builder builder(TestName()); Literal literal = LiteralUtil::CreateR3FromArray3D(a); HloInstruction* constant = builder.AddInstruction( HloInstruction::CreateConstant(std::move(literal))); builder.AddInstruction(HloInstruction::CreateUnary( constant->shape(), HloOpcode::kCopy, constant)); std::unique_ptr<HloComputation> computation = builder.Build(); auto module = CreateNewVerifiedModule(); module->AddEntryComputation(std::move(computation)); ForceResultLayout(module.get(), LayoutUtil::MakeLayout({1, 2, 0})); Literal result = ExecuteAndTransfer(std::move(module), {}); LiteralTestUtil::ExpectR3EqualArray3D(a, result); } void CopyOpTest::TestCopyConstantLayoutR4( size_t n1, size_t n2, size_t n3, size_t n4, absl::Span<const int64_t> permutation) { Array4D<int32_t> a(n1, n2, n3, n4); for (size_t i = 0; i < n1; ++i) { for (size_t j = 0; j < n2; ++j) { for (size_t k = 0; k < n3; ++k) { for (size_t l = 0; l < n4; ++l) { a(i, j, k, l) = i * n4 * n3 * n2 + j * n4 * n3 + k * n4 + l; } } } } HloComputation::Builder builder(TestName()); Literal literal = LiteralUtil::CreateR4FromArray4D(a); HloInstruction* constant = builder.AddInstruction( HloInstruction::CreateConstant(std::move(literal))); builder.AddInstruction(HloInstruction::CreateUnary( constant->shape(), HloOpcode::kCopy, constant)); std::unique_ptr<HloComputation> computation = builder.Build(); auto module = CreateNewVerifiedModule(); module->AddEntryComputation(std::move(computation)); ForceResultLayout(module.get(), LayoutUtil::MakeLayout(permutation)); Literal result = ExecuteAndTransfer(std::move(module), {}); LiteralTestUtil::ExpectR4EqualArray4D(a, result); } XLA_TEST_F(CopyOpTest, CopyConstantR3Layout021_SingleIncompleteTilePerLayer) { TestCopyConstantLayout021(2, 2, 3); } XLA_TEST_F(CopyOpTest, CopyConstantR3Layout021_SingleCompleteTilePerLayer) { TestCopyConstantLayout021(2, 32, 32); } XLA_TEST_F(CopyOpTest, CopyConstantR3Layout021_MultipleTilesPerLayer) { TestCopyConstantLayout021(2, 70, 35); } XLA_TEST_F(CopyOpTest, CopyConstantR4Layout0231_MultipleTilesPerLayer) { TestCopyConstantLayoutR4(2, 70, 7, 5, {0, 2, 3, 1}); } XLA_TEST_F(CopyOpTest, CopyConstantR4Layout0312_MultipleTilesPerLayer) { TestCopyConstantLayoutR4(2, 14, 5, 35, {0, 3, 1, 2}); } using CopyOpClientTest = ClientLibraryTestBase; XLA_TEST_F(CopyOpClientTest, Copy0x0) { Shape in_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {0, 0}, {0, 1}); Shape out_shape = ShapeUtil::MakeShapeWithDenseLayout(F32, {0, 0}, {1, 0}); auto empty = Literal::CreateFromShape(in_shape); XlaBuilder builder(TestName()); Parameter(&builder, 0, in_shape, "input"); auto input_data = client_->TransferToServer(empty).value(); auto actual = ExecuteAndTransfer(&builder, {input_data.get()}, &out_shape).value(); EXPECT_TRUE(LiteralTestUtil::Equal(empty, actual)); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/copy.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/tests/copy_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
800feb57-d016-4a87-a837-c3d1bbe0754c
cpp
tensorflow/tensorflow
reduction_base
third_party/xla/xla/service/gpu/fusions/reduction_base.cc
third_party/xla/xla/service/gpu/fusions/reduction_base_test.cc
#include "xla/service/gpu/fusions/reduction_base.h" #include <algorithm> #include <cstddef> #include <cstdint> #include <optional> #include <variant> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/container/inlined_vector.h" #include "absl/container/node_hash_map.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/types/span.h" #include "llvm/ADT/STLExtras.h" #include "mlir/IR/AffineExpr.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/utils/hlo_query.h" #include "xla/primitive_util.h" #include "xla/service/gpu/fusions/fusion_emitter.h" #include "xla/service/gpu/gpu_fusible.h" #include "xla/service/gpu/hlo_fusion_analysis.h" #include "xla/service/gpu/hlo_traversal.h" #include "xla/service/gpu/ir_emission_utils.h" #include "xla/service/gpu/model/indexing_map.h" #include "xla/service/gpu/reduction_utils.h" #include "xla/shape.h" #include "xla/stream_executor/device_description.h" #include "xla/union_find.h" #include "xla/util.h" #include "xla/xla_data.pb.h" namespace xla { namespace gpu { int RowReductionGetRowsPerWarp(int reduced_dimension_size) { if (WarpSize() % reduced_dimension_size != 0 || reduced_dimension_size >= WarpSize()) { return 1; } return WarpSize() / reduced_dimension_size; } int GetVectorSize(const HloFusionAnalysis& analysis, const ReductionDimensions& reduction_dimensions, int num_threads, Vector3 reduction_tiling) { int64_t minor_dim = reduction_dimensions.dimensions.back(); if (minor_dim % 2 != 0) { return 1; } if (num_threads * 2 > minor_dim) { return 1; } if (MayPreventVectorization(analysis.fusion())) { return 1; } if (reduction_dimensions.is_row_reduction) { constexpr int kRowMinorReduced = ReductionDimensions::kRowMinorReducedDimension; const auto* cuda_cc = std::get_if<se::CudaComputeCapability>( &analysis.device_info().gpu_compute_capability()); if (cuda_cc == nullptr) return 1; if (cuda_cc->IsAtLeast(se::CudaComputeCapability::VOLTA)) return 2; if (cuda_cc->IsAtLeast(se::CudaComputeCapability::PASCAL_)) { return analysis.input_output_info().smallest_input_dtype_bits <= 32 && reduction_dimensions.dimensions[kRowMinorReduced] % (reduction_tiling[kRowMinorReduced] * num_threads) == 0 ? 2 : 1; } return 1; } return 1; } int GetVectorSizeForMlir(const HloFusionAnalysis& analysis, int64_t minor_dim, int num_threads) { if (minor_dim % 2 != 0) { return 1; } if (num_threads * 2 > minor_dim) { return 1; } for (HloInstructionAdaptor hero : analysis.fusion_heroes()) { for (HloInstructionAdaptor operand : hero.GetOperands()) { if (primitive_util::IsComplexType(operand.shape().element_type())) { return 1; } } } if (analysis.input_output_info().smallest_input_dtype_bits >= 64) { return 1; } if (analysis.input_output_info().smallest_input_dtype_bits >= 32) { return 2; } if (num_threads * 4 > minor_dim) { return 2; } return minor_dim % 4 == 0 ? 4 : 2; } ReductionGroups GroupDisjointReductions(const HloFusionAnalysis& analysis, bool for_mlir) { const int num_fusion_outputs = analysis.fusion_root_count(); CHECK_NE(0, num_fusion_outputs); if (num_fusion_outputs == 1) { return {{{&analysis.fusion_root(0).instruction()}}, {0}, {true}}; } absl::node_hash_map<HloInstructionAdaptor, UnionFind<HloInstructionAdaptor>> disjoint_sets; UnionFind<HloInstructionAdaptor>* first_non_reduction_root = nullptr; absl::node_hash_map<HloInstructionAdaptor, absl::flat_hash_set<HloInstructionAdaptor>> reachable_outputs; absl::flat_hash_set<HloInstructionAdaptor> roots_with_reduction; absl::flat_hash_map<const HloInstruction*, int> root_indices; const auto& roots = analysis.fusion().GetRoots(); ReductionGroups result; result.group_id_per_root.resize(roots.size()); result.is_reduction_root.reserve(roots.size()); for (auto [root, hero] : llvm::zip(roots, analysis.fusion_heroes())) { int index = root_indices.size(); root_indices[&root.instruction()] = index; auto [it, inserted] = disjoint_sets.try_emplace(root, root); CHECK(inserted) << "Duplicate root " << root.ToString(); reachable_outputs[root].insert(root); result.is_reduction_root.push_back( IsRealReductionHero(root.instruction(), hero.instruction())); if (result.is_reduction_root.back()) { roots_with_reduction.insert(root); } else if (first_non_reduction_root != nullptr) { first_non_reduction_root->Merge(&it->second); } else { first_non_reduction_root = &it->second; } } absl::flat_hash_set<HloInstructionAdaptor> instructions; for (const HloInstruction* operand : analysis.fusion().GetParameters()) { instructions.insert(HloInstructionAdaptor{*operand, &analysis.fusion()}); } auto visit = [&](absl::Span<const HloInstructionAdaptor> roots) { HloBfsConsumersFirstTraversal( roots, analysis.fusion(), [&](HloInstructionAdaptor consumer) { auto& consumer_reachable = reachable_outputs[consumer]; for (auto producer : consumer.GetOperands()) { reachable_outputs[producer].insert(consumer_reachable.begin(), consumer_reachable.end()); } instructions.insert(consumer); return TraversalResult::kAdvance; }); }; if (for_mlir) { for (auto root : roots) { visit({root}); } } else { visit(roots); } for (auto instr : instructions) { const auto& reachable = reachable_outputs[instr]; std::vector<HloInstructionAdaptor> reached_output_ids; bool added_to_reduce = false; for (auto output : roots) { bool has_real_hero = roots_with_reduction.contains(output); if (has_real_hero && (hlo_query::IsBroadcastedConstantOrScalar(instr.instruction()))) { if (added_to_reduce) { VLOG(3) << "Skip broadcasted constant or scalar " << instr.ToString(); continue; } } if (reachable.contains(output)) { VLOG(3) << "Reaching " << output.ToString() << " from " << instr.ToString(); reached_output_ids.push_back(output); if (has_real_hero) { added_to_reduce = true; } } } auto& first_reached_output = disjoint_sets.at(reached_output_ids.front()); for (size_t j = 1; j < reached_output_ids.size(); ++j) { first_reached_output.Merge(&disjoint_sets.at(reached_output_ids[j])); } } ConstHloInstructionMap<std::vector<const HloInstruction*>> group_map; for (auto root : roots) { group_map[&disjoint_sets.at(root).Get().instruction()].push_back( &root.instruction()); } result.grouped_roots.reserve(group_map.size()); absl::c_for_each(group_map, [&](auto& it) { for (auto* root : it.second) { result.group_id_per_root[root_indices[root]] = result.grouped_roots.size(); } result.grouped_roots.emplace_back(std::move(it.second)); }); return result; } void AddGroupIdConstraint(IndexingMap& map, int64_t root_index, const ReductionGroups& groups) { int group_index = groups.group_id_per_root[root_index]; map.AddConstraint( mlir::getAffineDimExpr(KernelFusionInterface::kIndexingMapBlockIdxDims[1], map.GetMLIRContext()), {group_index, group_index}); } } }
#include "xla/service/gpu/fusions/reduction_base.h" #include <gmock/gmock.h> #include <gtest/gtest.h> #include "xla/service/gpu/gpu_device_info_for_tests.h" #include "xla/service/gpu/hlo_fusion_analysis.h" #include "xla/tests/hlo_test_base.h" namespace xla { namespace gpu { namespace { using ::testing::ElementsAre; using ::testing::SizeIs; using MlirReductionBaseTest = HloTestBase; TEST_F(MlirReductionBaseTest, TwoGroups) { auto module = ParseAndReturnVerifiedModule(R"( add { p0 = f32[] parameter(0) p1 = f32[] parameter(1) ROOT add = f32[] add(p0, p1) } fusion { %p0 = f32[2] parameter(0) %p1 = f32[2] parameter(1) %c0 = f32[] constant(-inf) %r0 = f32[] reduce(%p0, %c0), dimensions={0}, to_apply=add %c1 = f32[] constant(inf) %r1 = f32[] reduce(%p1, %c1), dimensions={0}, to_apply=add ROOT %tuple = (f32[], f32[]) tuple(%r0, %r1) } ENTRY entry { %p0 = f32[2] parameter(0) %p1 = f32[2] parameter(1) ROOT %fusion = (f32[], f32[]) fusion(%p0, %p1), kind=kInput, calls=fusion })") .value(); auto* root = module->entry_computation()->root_instruction(); auto device_info = TestGpuDeviceInfo::CudaOrRocmDeviceInfo(); auto analysis = HloFusionAnalysis::Create(*root, device_info); auto reduction_groups = GroupDisjointReductions(analysis, true); EXPECT_THAT(reduction_groups.grouped_roots, ElementsAre(ElementsAre(&analysis.fusion_root(0).instruction()), ElementsAre(&analysis.fusion_root(1).instruction()))); } TEST_F(MlirReductionBaseTest, OneGroup) { auto module = ParseAndReturnVerifiedModule(R"( %add { %p0 = c128[] parameter(0) %p1 = c128[] parameter(1) ROOT %add.35 = c128[] add(c128[] %p0, c128[] %p1) } %fusion { %p0 = c128[1,2] parameter(0) %c0 = c128[] constant((0, 0)) %reduce = c128[] reduce(%p0, %c0), dimensions={0,1}, to_apply=%add %real = f64[] real(c128[] %reduce) %imag = f64[] imag(c128[] %reduce) %negate = f64[] negate(f64[] %imag) ROOT %tuple.29 = (f64[], f64[]) tuple(f64[] %real, f64[] %negate) } ENTRY entry { %p0 = c128[1,2] parameter(0) ROOT %fusion = (f64[], f64[]) fusion(%p0), kind=kInput, calls=fusion })") .value(); auto device_info = TestGpuDeviceInfo::CudaOrRocmDeviceInfo(); auto* root = module->entry_computation()->root_instruction(); auto analysis = HloFusionAnalysis::Create(*root, device_info); auto reduction_groups = GroupDisjointReductions(analysis, true); EXPECT_THAT(reduction_groups.grouped_roots, SizeIs(1)); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/reduction_base.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/reduction_base_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
e640441a-92c9-460e-bb15-7f5030fe035e
cpp
tensorflow/tensorflow
triton
third_party/xla/xla/service/gpu/fusions/triton.cc
third_party/xla/xla/service/gpu/fusions/triton_test.cc
#include "xla/service/gpu/fusions/triton.h" #include <cstdint> #include <memory> #include <optional> #include <string> #include <tuple> #include <utility> #include <vector> #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "llvm/ADT/STLExtras.h" #include "llvm/IR/Function.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Module.h" #include "mlir/IR/MLIRContext.h" #include "mlir/Support/LLVM.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/service/gpu/fusions/fusion_emitter.h" #include "xla/service/gpu/fusions/triton/triton_fusion_emitter.h" #include "xla/service/gpu/hlo_traversal.h" #include "xla/service/gpu/ir_emission_utils.h" #include "xla/service/gpu/ir_emitter_context.h" #include "xla/service/gpu/kernel_arguments.h" #include "xla/service/gpu/kernel_reuse_cache.h" #include "xla/service/gpu/launch_dimensions.h" #include "xla/service/gpu/matmul_utils.h" #include "xla/service/gpu/model/tiled_hlo_computation.h" #include "xla/service/gpu/runtime/kernel_thunk.h" #include "xla/service/gpu/triton_fusion_analysis.h" #include "xla/service/llvm_ir/ir_array.h" #include "xla/service/llvm_ir/llvm_util.h" #include "xla/shape.h" #include "xla/status_macros.h" #include "xla/stream_executor/device_description.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { absl::StatusOr<TritonWrapperResult> TritonFusion::GenerateTritonKernelAndWrapper( const HloFusionInstruction& fusion, absl::string_view impl_fn_name, const se::DeviceDescription& device_info, llvm::Module* llvm_module, mlir::MLIRContext* mlir_context) const { const se::GpuComputeCapability& cc = device_info.gpu_compute_capability(); auto backend_config = fusion.backend_config<GpuBackendConfig>()->fusion_backend_config(); absl::string_view fusion_kind = backend_config.kind(); TritonWrapperResult triton_wrapper_result; if (fusion_kind == kTritonFusionKind) { std::optional<LaunchConfig> launch_config = this->launch_config(); if (!launch_config.has_value()) { return absl::InvalidArgumentError(absl::StrCat( "Block level fusion config is required for Triton fusions: ", fusion.ToString())); } TF_ASSIGN_OR_RETURN(triton_wrapper_result, TritonWrapper(impl_fn_name, &fusion, cc, device_info, launch_config->block_level_parameters, llvm_module, *mlir_context)); } else { CHECK_EQ(fusion_kind, kTritonGemmFusionKind); BlockLevelParameters block_level_parameters; if (!backend_config.has_triton_gemm_config()) { block_level_parameters.num_ctas = 1; block_level_parameters.num_stages = 1; block_level_parameters.num_warps = 2; } else { const auto& triton_config = backend_config.triton_gemm_config(); block_level_parameters.num_ctas = triton_config.num_ctas(); block_level_parameters.num_stages = triton_config.num_stages(); block_level_parameters.num_warps = triton_config.num_warps(); } TF_ASSIGN_OR_RETURN( triton_wrapper_result, TritonWrapper(impl_fn_name, &fusion, cc, device_info, block_level_parameters, llvm_module, *mlir_context)); } return triton_wrapper_result; }; absl::StatusOr<FusionEmissionResult> TritonFusion::Emit( IrEmitterContext& ir_emitter_context, const HloFusionInstruction& fusion) const { llvm::IRBuilder builder(ir_emitter_context.llvm_module()->getContext()); VLOG(3) << fusion.ToString(); std::string suggested_kernel_name = std::string(fusion.name()); TF_ASSIGN_OR_RETURN( auto kernel_arguments, KernelArguments::Create(ir_emitter_context.buffer_assignment(), &fusion)); const HloComputation* hlo_computation = fusion.fused_instructions_computation(); auto generate = [&]() -> absl::StatusOr<KernelReuseCache::Entry> { VLOG(3) << "Generating: " << suggested_kernel_name; const std::string impl_fn_name = ir_emitter_context.name_uniquer()->GetUniqueName( llvm_ir::SanitizeFunctionName( absl::StrCat(suggested_kernel_name, "_impl"))); TF_ASSIGN_OR_RETURN( TritonWrapperResult triton_wrapper_result, GenerateTritonKernelAndWrapper(fusion, impl_fn_name, ir_emitter_context.gpu_device_info(), ir_emitter_context.llvm_module(), ir_emitter_context.mlir_context())); auto backend_config = fusion.backend_config<GpuBackendConfig>()->fusion_backend_config(); absl::string_view fusion_kind = backend_config.kind(); LaunchDimensions launch_dimensions; if (fusion_kind == kTritonFusionKind) { std::optional<LaunchConfig> launch_config = this->launch_config(); CHECK(launch_config.has_value()); launch_dimensions = std::move(launch_config->launch_dimensions); } else { CHECK_EQ(fusion_kind, kTritonGemmFusionKind); BlockLevelParameters block_level_parameters; if (!backend_config.has_triton_gemm_config()) { LOG(WARNING) << "Using fallback triton GEMM config for op " << fusion.name(); auto& triton_config = *backend_config.mutable_triton_gemm_config(); triton_config.set_block_m(64); triton_config.set_block_k(64); triton_config.set_block_n(64); triton_config.set_split_k(1); triton_config.set_num_stages(1); triton_config.set_num_warps(2); triton_config.set_num_ctas(1); } TF_ASSIGN_OR_RETURN( TritonGemmConfig config, TritonGemmConfig::FromProto(backend_config.triton_gemm_config())); TF_ASSIGN_OR_RETURN(auto analysis, TritonFusionAnalysis::Execute( *hlo_computation, config.split_k)); TF_ASSIGN_OR_RETURN( launch_dimensions, GetMatMulLaunchDimensions(analysis, analysis_.fusion(), config)); } llvm::Function* impl_fn = ir_emitter_context.llvm_module()->getFunction(impl_fn_name); TF_RET_CHECK(impl_fn); llvm::Function* kernel; std::vector<llvm_ir::IrArray> inputs; std::vector<llvm_ir::IrArray> outputs; TF_ASSIGN_OR_RETURN( std::tie(kernel, inputs, outputs), BuildKernelPrototype(ir_emitter_context, suggested_kernel_name, kernel_arguments.args(), impl_fn->arg_size(), launch_dimensions, &builder)); llvm::Function* prototype_func = builder.GetInsertBlock()->getParent(); prototype_func->splice(prototype_func->begin(), impl_fn); for (const auto& [arg, ir_array] : llvm::zip(impl_fn->args(), inputs)) { arg.replaceAllUsesWith(ir_array.GetBasePointer()); } impl_fn->eraseFromParent(); return {{kernel->getName().str(), launch_dimensions, triton_wrapper_result.cluster_dim, triton_wrapper_result.shmem_bytes}}; }; auto [status_or_entry, was_cached] = ir_emitter_context.kernel_cache().GetWithStatus( hlo_computation, kernel_arguments.args(), "", generate); TF_ASSIGN_OR_RETURN(const KernelReuseCache::Entry* entry, status_or_entry); FusionEmissionResult result; result.thunks.emplace_back(std::make_unique<KernelThunk>( &fusion, entry->kernel_name, kernel_arguments.args(), entry->launch_dimensions, entry->cluster_dim, entry->shmem_bytes)); return result; } std::optional<TritonFusion::LaunchConfig> TritonFusion::launch_config() const { if (analysis_.fusion_backend_config().has_block_level_fusion_config()) { BlockLevelParameters block_level_parameters = BlockLevelParameters::FromBlockLevelFusionConfig( analysis_.fusion_backend_config().block_level_fusion_config()); int64_t num_blocks = 1; for (auto [dim_size, dim_tile_size] : llvm::zip(analysis_.fusion_root(0).shape().dimensions(), block_level_parameters.output_tile_sizes)) { num_blocks *= (dim_size + dim_tile_size - 1) / dim_tile_size; } LaunchConfig launch_config; launch_config.launch_dimensions = LaunchDimensions{ static_cast<uint64_t>(num_blocks), static_cast<uint64_t>(block_level_parameters.num_warps * WarpSize())}; launch_config.block_level_parameters = std::move(block_level_parameters); return launch_config; } return std::nullopt; } } }
#include "xla/service/gpu/fusions/triton.h" #include <memory> #include <optional> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/status/status.h" #include "mlir/IR/MLIRContext.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/service/gpu/fusions/fusion_emitter.h" #include "xla/service/gpu/fusions/fusions.h" #include "xla/service/gpu/gpu_device_info_for_tests.h" #include "xla/service/gpu/hlo_fusion_analysis.h" #include "xla/stream_executor/device_description.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/status_matchers.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { using ::testing::ElementsAre; using ::tsl::testing::StatusIs; class TritonFusionTest : public HloTestBase {}; TEST_F(TritonFusionTest, TritonFusionWithBlockLevelFusionConfig_LaunchConfigIsCorrect) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"( triton_computation { param_0 = f32[125,127] parameter(0) ROOT abs = f32[125,127] abs(param_0) } ENTRY entry_computation { param_0 = f32[125,127] parameter(0) ROOT fusion.1 = f32[125,127] fusion(param_0), kind=kCustom, calls=triton_computation, backend_config={"fusion_backend_config":{ "kind":"__triton", "block_level_fusion_config":{"output_tile_sizes":["3","127"], "num_warps":"4"}}} })")); stream_executor::DeviceDescription device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo(); auto* root = module->entry_computation()->root_instruction(); HloFusionAnalysis analysis = HloFusionAnalysis::Create(*root, device_info); std::unique_ptr<FusionInterface> emitter = GetFusionEmitter(PreBufferAssignmentFusionInfo{analysis}); auto triton_fusion = dynamic_cast<TritonFusion*>(emitter.get()); ASSERT_NE(triton_fusion, nullptr); std::optional<TritonFusion::LaunchConfig> launch_config = triton_fusion->launch_config(); ASSERT_NE(launch_config, std::nullopt); EXPECT_EQ(launch_config->launch_dimensions.num_blocks(), 42); EXPECT_EQ(launch_config->launch_dimensions.num_threads_per_block(), 128); EXPECT_THAT(launch_config->block_level_parameters.output_tile_sizes, ElementsAre(3, 127)); } TEST_F(TritonFusionTest, TritonFusionWithoutBlockLevelFusionConfig_LaunchConfigIsNullopt) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"( triton_computation { param_0 = f32[125,127] parameter(0) ROOT abs = f32[125,127] abs(param_0) } ENTRY entry_computation { param_0 = f32[125,127] parameter(0) ROOT fusion = f32[125,127] fusion(param_0), kind=kCustom, calls=triton_computation, backend_config={"fusion_backend_config":{"kind":"__triton"}} })")); stream_executor::DeviceDescription device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo(); auto* root = module->entry_computation()->root_instruction(); HloFusionAnalysis analysis = HloFusionAnalysis::Create(*root, device_info); std::unique_ptr<FusionInterface> emitter = GetFusionEmitter(PreBufferAssignmentFusionInfo{analysis}); auto triton_fusion_emitter = dynamic_cast<TritonFusion*>(emitter.get()); ASSERT_NE(triton_fusion_emitter, nullptr); EXPECT_EQ(triton_fusion_emitter->launch_config(), std::nullopt); mlir::MLIRContext mlir_context; EXPECT_THAT(triton_fusion_emitter->GenerateTritonKernelAndWrapper( *::xla::Cast<HloFusionInstruction>(root), "random_name", device_info, nullptr, &mlir_context), StatusIs(absl::StatusCode::kInvalidArgument)); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/triton.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/triton_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
3829685b-1880-4132-b56c-60ef31326b82
cpp
tensorflow/tensorflow
xla_gpu_ops
third_party/xla/xla/service/gpu/fusions/ir/xla_gpu_ops.cc
third_party/xla/xla/service/gpu/fusions/ir/xla_gpu_ops_test.cc
#include "xla/service/gpu/fusions/ir/xla_gpu_ops.h" #include <cstdint> #include <optional> #include <utility> #include <vector> #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/STLFunctionalExtras.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/Support/Casting.h" #include "llvm/Support/LogicalResult.h" #include "mlir/Dialect/Arith/IR/Arith.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/AffineExpr.h" #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/DialectImplementation.h" #include "mlir/IR/MLIRContext.h" #include "mlir/IR/OpDefinition.h" #include "mlir/IR/OpImplementation.h" #include "mlir/IR/OperationSupport.h" #include "mlir/IR/PatternMatch.h" #include "mlir/IR/SymbolTable.h" #include "mlir/IR/TypeRange.h" #include "mlir/IR/TypeUtilities.h" #include "mlir/IR/Types.h" #include "mlir/IR/Value.h" #include "mlir/IR/ValueRange.h" #include "mlir/Support/LLVM.h" #include "mlir/Support/LogicalResult.h" #include "xla/service/gpu/fusions/ir/xla_gpu_dialect.cc.inc" #include "xla/service/gpu/model/indexing_map.h" #include "xla/service/gpu/model/indexing_map_serialization.h" namespace xla { namespace gpu { namespace { using llvm::ArrayRef; using mlir::AffineExpr; using mlir::AffineMap; using mlir::Block; using mlir::DenseI64ArrayAttr; using mlir::failure; using mlir::getAffineConstantExpr; using mlir::getAffineDimExpr; using mlir::getAffineSymbolExpr; using mlir::Location; using mlir::LogicalResult; using mlir::MLIRContext; using mlir::OpAsmParser; using mlir::OpAsmPrinter; using mlir::OpBuilder; using mlir::OperationState; using mlir::ParseResult; using mlir::PatternRewriter; using mlir::RankedTensorType; using mlir::Region; using mlir::SmallVector; using mlir::success; using mlir::Type; using mlir::TypeRange; using mlir::Value; using mlir::ValueRange; namespace arith = mlir::arith; } LogicalResult PureCallOp::verifySymbolUses( mlir::SymbolTableCollection& symbolTable) { auto callee = getCalleeAttr(); auto function = symbolTable.lookupNearestSymbolFrom<mlir::func::FuncOp>(*this, callee); if (!function) { return emitError("'f' attribute refers to an undefined function: ") << callee; } int func_arg_count = function.getFunctionType().getNumInputs(); int arg_count = getOperands().size(); if (arg_count != func_arg_count) { return emitError() << "argument count mismatch: 'operands' has " << arg_count << " arguments, but '" << callee << "' expects " << func_arg_count; } return success(); } void AllocateSharedOp::getAsmResultNames( llvm::function_ref<void(mlir::Value, mlir::StringRef)> setNameFn) { setNameFn(getResult(), "shmem"); } void ApplyIndexingOp::build(OpBuilder& builder, OperationState& result, ValueRange dims, ValueRange symbols, const IndexingMap& indexing_map) { SmallVector<Value, 4> operands; operands.reserve(dims.size() + symbols.size()); operands.append(dims.begin(), dims.end()); operands.append(symbols.begin(), symbols.end()); build(builder, result, operands, indexing_map); } void ApplyIndexingOp::build(OpBuilder& builder, OperationState& result, ValueRange operands, const IndexingMap& indexing_map) { SmallVector<Type, 2> result_types(indexing_map.GetAffineMap().getNumResults(), builder.getIndexType()); IndexingMapAttr indexing_map_attr = IndexingMapAttr::get(builder.getContext(), indexing_map); build(builder, result, result_types, operands, indexing_map_attr); } void ApplyIndexingOp::build(OpBuilder& builder, OperationState& result, ValueRange operands, AffineMap affine_map, ArrayRef<IndexingMap::Variable> dim_vars, ArrayRef<IndexingMap::Variable> range_vars) { IndexingMap indexing_map(affine_map, dim_vars, range_vars, {}); build(builder, result, operands, indexing_map); } ParseResult parseOperands( OpAsmParser& parser, SmallVector<OpAsmParser::UnresolvedOperand, 4>* operands) { OpAsmParser::UnresolvedOperand operand; return parser.parseCommaSeparatedList( [&]() { return parser.parseOperand(operands->emplace_back()); }); } ParseResult ApplyIndexingOp::parse(OpAsmParser& parser, OperationState& result) { mlir::Builder& builder = parser.getBuilder(); auto index_type = builder.getIndexType(); IndexingMapAttr indexing_map_attr; if (parser.parseAttribute(indexing_map_attr, "indexing_map_attr", result.attributes)) { return failure(); } SmallVector<OpAsmParser::UnresolvedOperand, 4> operands; SmallVector<int64_t, 4> lower_bounds, upper_bounds; if (succeeded(parser.parseOptionalLParen())) { if (parseOperands(parser, &operands) || parser.parseRParen()) { return failure(); } } if (succeeded(parser.parseOptionalLSquare())) { if (parseOperands(parser, &operands) || parser.parseRSquare()) { return failure(); } } if (parser.resolveOperands(operands, index_type, result.operands) || parser.parseOptionalAttrDict(result.attributes)) { return failure(); } auto map = indexing_map_attr.getIndexingMap().GetAffineMap(); result.addTypes(SmallVector<Type, 2>(map.getNumResults(), index_type)); return success(); } void ApplyIndexingOp::print(OpAsmPrinter& p) { AffineMap affine_map = getIndexingMapAttr().getIndexingMap().GetAffineMap(); p << " " << getIndexingMapAttr(); auto operands = getOperands(); unsigned num_dimensions = affine_map.getNumDims(); if (num_dimensions > 0) { p << '('; auto dimension_operands = operands.slice(0, num_dimensions); llvm::interleaveComma(dimension_operands, p); p << ')'; } unsigned num_symbols = affine_map.getNumSymbols(); if (num_symbols > 0) { p << '['; auto symbol_operands = operands.slice(num_dimensions, num_symbols); llvm::interleaveComma(symbol_operands, p); p << ']'; } p.printOptionalAttrDict((*this)->getAttrs(), {"indexing_map_attr"}); } LogicalResult ApplyIndexingOp::verify() { auto affine_map = getIndexingMapAttr().getIndexingMap().GetAffineMap(); unsigned num_variables = affine_map.getNumDims() + affine_map.getNumSymbols(); if (getOperands().size() != num_variables) { return emitOpError( "operand count must match the number of dimensions and symbols in the " "affine map"); } if (!getIndexingMap().GetConstraints().empty()) { return emitOpError("apply indexing op cannot have any constraints"); } return success(); } IndexingMap ApplyIndexingOp::getIndexingMap() { return getIndexingMapAttr().getIndexingMap(); } namespace { struct IndexingMapWithAdditions { IndexingMap indexing_map; SmallVector<Value> added_dim_args; SmallVector<Value> added_sym_args; }; IndexingMapWithAdditions GetNewIndexingMapAfterFoldingSequence( IndexingMap indexing_map, SmallVector<std::pair<int, ApplyIndexingOp>, 2> apply_indexing_ops, mlir::DenseMap<Value, AffineExpr> operand_exprs, MLIRContext* ctx) { int num_dims = indexing_map.GetDimensionCount(); int num_syms = indexing_map.GetSymbolCount(); SmallVector<Value> added_dim_args; SmallVector<Value> added_sym_args; auto new_dim_vars = indexing_map.GetDimVars(); auto new_sym_vars = indexing_map.GetRangeVars(); mlir::DenseMap<AffineExpr, AffineExpr> replacements; for (auto& [operand_id, producer] : apply_indexing_ops) { auto producer_map = producer.getIndexingMap(); mlir::OpResult producer_result = producer->getOpResult(0); int producer_result_id = producer_result.getResultNumber(); int num_producer_dims = producer.getAffineMap().getNumDims(); SmallVector<AffineExpr> producer_dim_replacements; SmallVector<AffineExpr> producer_sym_replacements; for (auto& producer_operand : producer->getOpOperands()) { int producer_operand_number = producer_operand.getOperandNumber(); bool is_dim = producer_operand_number < num_producer_dims; auto& replacement_expr = operand_exprs[producer_operand.get()]; if (!replacement_expr) { if (is_dim) { int dim_num = producer_operand_number; replacement_expr = getAffineDimExpr(num_dims + added_dim_args.size(), ctx); added_dim_args.push_back(producer_operand.get()); new_dim_vars.push_back(producer_map.GetDimVars(dim_num)); } else { int sym_num = producer_operand_number - producer.getAffineMap().getNumDims(); replacement_expr = getAffineSymbolExpr(num_syms + added_sym_args.size(), ctx); added_sym_args.push_back(producer_operand.get()); new_sym_vars.push_back(producer_map.GetRangeVar(sym_num)); } } if (is_dim) { producer_dim_replacements.push_back(replacement_expr); } else { producer_sym_replacements.push_back(replacement_expr); } } replacements[operand_exprs[producer_result]] = producer.getAffineMap() .getResult(producer_result_id) .replaceDimsAndSymbols(producer_dim_replacements, producer_sym_replacements); } auto new_affine_map = indexing_map.GetAffineMap().replace( replacements, num_dims + added_dim_args.size(), num_syms + added_sym_args.size()); IndexingMap new_indexing_map(new_affine_map, new_dim_vars, new_sym_vars, {}); return {new_indexing_map, added_dim_args, added_sym_args}; } } namespace { struct SimplifyIndexingMap : public mlir::OpRewritePattern<ApplyIndexingOp> { using OpRewritePattern<ApplyIndexingOp>::OpRewritePattern; LogicalResult matchAndRewrite(ApplyIndexingOp indexing_op, PatternRewriter& rewriter) const override { IndexingMap indexing_map = indexing_op.getIndexingMap(); if (!indexing_map.Simplify()) { return rewriter.notifyMatchFailure(indexing_op, "IndexingMap is already simplified"); } rewriter.replaceOpWithNewOp<ApplyIndexingOp>( indexing_op, indexing_op.getOperands(), indexing_map); return success(); } }; struct RemoveUnusedVariables : public mlir::OpRewritePattern<ApplyIndexingOp> { using OpRewritePattern<ApplyIndexingOp>::OpRewritePattern; LogicalResult matchAndRewrite(ApplyIndexingOp indexing_op, PatternRewriter& rewriter) const override { IndexingMap indexing_map = indexing_op.getIndexingMap(); auto unused_symbols_bit_vector = indexing_map.RemoveUnusedVars(); if (unused_symbols_bit_vector.count() == 0) { return rewriter.notifyMatchFailure(indexing_op, "IndexingMap stayed unchanged"); } SmallVector<Value, 4> operands; operands.reserve(unused_symbols_bit_vector.count()); for (int i = 0; i < unused_symbols_bit_vector.size(); ++i) { if (!unused_symbols_bit_vector[i]) { operands.push_back(indexing_op.getOperand(i)); } } rewriter.replaceOpWithNewOp<ApplyIndexingOp>(indexing_op, operands, indexing_map); return success(); } }; struct MoveSymbolsToDims : public mlir::OpRewritePattern<ApplyIndexingOp> { using OpRewritePattern<ApplyIndexingOp>::OpRewritePattern; LogicalResult matchAndRewrite(ApplyIndexingOp indexing_op, PatternRewriter& rewriter) const override { IndexingMap indexing_map = indexing_op.getIndexingMap(); if (indexing_map.GetSymbolCount() == 0) { return rewriter.notifyMatchFailure(indexing_op, "No symbols found"); } rewriter.replaceOpWithNewOp<ApplyIndexingOp>( indexing_op, indexing_op->getOperands(), indexing_map.ConvertSymbolsToDimensions()); return success(); } }; struct FoldApplyIndexingSequence : public mlir::OpRewritePattern<ApplyIndexingOp> { using OpRewritePattern<ApplyIndexingOp>::OpRewritePattern; LogicalResult matchAndRewrite(ApplyIndexingOp indexing_op, PatternRewriter& rewriter) const override { auto indexing_map = indexing_op.getIndexingMap(); SmallVector<std::pair<int, ApplyIndexingOp>, 2> apply_indexing_ops; bool all_apply_indexing_operands_have_one_use = true; for (auto& operand : indexing_op->getOpOperands()) { if (auto producer = operand.get().getDefiningOp<ApplyIndexingOp>()) { apply_indexing_ops.push_back({operand.getOperandNumber(), producer}); all_apply_indexing_operands_have_one_use &= producer->hasOneUse(); } } if (apply_indexing_ops.empty()) { return rewriter.notifyMatchFailure(indexing_op, "No apply_indexing sequences found"); } auto indexing_map_with_no_unused_vars = indexing_map; if (indexing_map_with_no_unused_vars.RemoveUnusedVars().count() > 0) { indexing_map_with_no_unused_vars.RemoveUnusedVars(); return rewriter.notifyMatchFailure(indexing_op, "IndexingMap has unused variables"); } MLIRContext* ctx = indexing_op.getContext(); int num_dims = indexing_op.getAffineMap().getNumDims(); int num_syms = indexing_op.getAffineMap().getNumSymbols(); mlir::DenseMap<Value, AffineExpr> operand_exprs; for (auto& operand : indexing_op->getOpOperands()) { int operand_number = operand.getOperandNumber(); operand_exprs[operand.get()] = operand_number < num_dims ? getAffineDimExpr(operand_number, ctx) : getAffineSymbolExpr(operand_number - num_dims, ctx); } auto replacement = GetNewIndexingMapAfterFoldingSequence( indexing_map, apply_indexing_ops, operand_exprs, ctx); if (!all_apply_indexing_operands_have_one_use && !replacement.indexing_map.Simplify()) { return rewriter.notifyMatchFailure( indexing_op, "Folded indexing map was not simplified"); } int new_num_operands = indexing_op->getNumOperands() + replacement.added_dim_args.size() + replacement.added_sym_args.size(); SmallVector<Value> new_operands; new_operands.reserve(new_num_operands); auto begin = indexing_op.getOperands().begin(); new_operands.append(begin, begin + num_dims); new_operands.append(replacement.added_dim_args); new_operands.append(begin + num_dims, begin + num_dims + num_syms); new_operands.append(replacement.added_sym_args); rewriter.replaceOpWithNewOp<ApplyIndexingOp>(indexing_op, new_operands, replacement.indexing_map); return success(); } }; struct FoldApplyIndexingOperands : public mlir::OpRewritePattern<ApplyIndexingOp> { using OpRewritePattern<ApplyIndexingOp>::OpRewritePattern; LogicalResult matchAndRewrite(ApplyIndexingOp indexing_op, PatternRewriter& rewriter) const override { IndexingMap indexing_map = indexing_op.getIndexingMap(); AffineMap affine_map = indexing_map.GetAffineMap(); MLIRContext* ctx = affine_map.getContext(); unsigned num_operands = indexing_op->getNumOperands(); unsigned num_dims = affine_map.getNumDims(); unsigned num_symbols = affine_map.getNumSymbols(); SmallVector<std::optional<int64_t>> constant_values(num_operands, std::nullopt); int num_constants = 0; for (auto& operand : indexing_op->getOpOperands()) { if (auto constant = operand.get().getDefiningOp<arith::ConstantIndexOp>()) { constant_values[operand.getOperandNumber()] = constant.value(); ++num_constants; } } if (num_constants == 0) { return rewriter.notifyMatchFailure(indexing_op, "No constant operands found"); } SmallVector<AffineExpr, 2> dim_replacements, symbol_replacements; dim_replacements.reserve(num_dims); symbol_replacements.reserve(num_symbols); unsigned new_num_operands = indexing_op->getNumOperands() - num_constants; SmallVector<Value, 4> new_operands; new_operands.reserve(new_num_operands); SmallVector<IndexingMap::Variable, 2> new_dim_vars; new_dim_vars.reserve(num_dims); SmallVector<IndexingMap::Variable, 2> new_range_vars; new_range_vars.reserve(num_symbols); unsigned new_num_dims = 0; unsigned new_num_symbols = 0; for (auto [operand, constant_value] : llvm::zip(indexing_op->getOpOperands(), constant_values)) { unsigned operand_id = operand.getOperandNumber(); if (constant_value.has_value()) { if (operand_id < num_dims) { dim_replacements.push_back( getAffineConstantExpr(*constant_value, ctx)); } else { symbol_replacements.push_back( getAffineConstantExpr(*constant_value, ctx)); } } else { new_operands.push_back(operand.get()); if (operand_id < num_dims) { dim_replacements.push_back(getAffineDimExpr(new_num_dims++, ctx)); new_dim_vars.push_back(indexing_map.GetDimVars(operand_id)); } else { symbol_replacements.push_back( getAffineSymbolExpr(new_num_symbols++, ctx)); new_range_vars.push_back( indexing_map.GetRangeVar(operand_id - num_dims)); } } } rewriter.replaceOpWithNewOp<ApplyIndexingOp>( indexing_op, new_operands, affine_map.replaceDimsAndSymbols(dim_replacements, symbol_replacements, new_num_dims, new_num_symbols), new_dim_vars, new_range_vars); return success(); } }; struct FoldApplyIndexingResults : public mlir::OpRewritePattern<ApplyIndexingOp> { using OpRewritePattern<ApplyIndexingOp>::OpRewritePattern; LogicalResult matchAndRewrite(ApplyIndexingOp indexing_op, PatternRewriter& rewriter) const override { Location loc = indexing_op.getLoc(); IndexingMap indexing_map = indexing_op.getIndexingMap(); if (indexing_map.IsKnownEmpty()) { return rewriter.notifyMatchFailure(indexing_op, "Domain of the indexing map is empty"); } AffineMap* affine_map = &indexing_map.GetMutableAffineMap(); unsigned num_results = affine_map->getNumResults(); SmallVector<AffineExpr, 4> new_exprs; new_exprs.reserve(num_results); SmallVector<Value, 4> new_values; new_values.reserve(num_results); for (mlir::OpResult opresult : indexing_op->getOpResults()) { if (opresult.use_empty()) { new_values.push_back(rewriter.create<arith::ConstantIndexOp>(loc, 0)); continue; } unsigned id = opresult.getResultNumber(); AffineExpr result_expr = affine_map->getResult(id); if (auto const_expr = mlir::dyn_cast<mlir::AffineConstantExpr>(result_expr)) { new_values.push_back(rewriter.create<arith::ConstantIndexOp>( loc, const_expr.getValue())); continue; } if (auto dim_expr = mlir::dyn_cast<mlir::AffineDimExpr>(result_expr)) { new_values.push_back(indexing_op.getOperand(dim_expr.getPosition())); continue; } if (auto symbol_expr = mlir::dyn_cast<mlir::AffineSymbolExpr>(result_expr)) { new_values.push_back(indexing_op.getOperand( indexing_map.GetDimVarsCount() + symbol_expr.getPosition())); continue; } new_exprs.push_back(result_expr); new_values.push_back(Value{}); } if (new_exprs.size() == num_results) { return rewriter.notifyMatchFailure( indexing_op, "No constant or dim/symbol expression found"); } *affine_map = AffineMap::get(affine_map->getNumDims(), affine_map->getNumSymbols(), new_exprs, affine_map->getContext()); auto new_indexing_op = rewriter.create<ApplyIndexingOp>( loc, indexing_op.getOperands(), indexing_map); for (int new_result_id = 0, new_indexing_op_result_id = 0; new_result_id < new_values.size(); ++new_result_id) { auto& new_value = new_values[new_result_id]; if (new_value) continue; new_value = new_indexing_op.getResult(new_indexing_op_result_id++); } rewriter.replaceOp(indexing_op, new_values); return success(); } }; } void ApplyIndexingOp::getCanonicalizationPatterns( mlir::RewritePatternSet& results, MLIRContext* context) { results.add<FoldApplyIndexingOperands, FoldApplyIndexingResults, FoldApplyIndexingSequence, MoveSymbolsToDims, RemoveUnusedVariables, SimplifyIndexingMap>(context); } mlir::LogicalResult ApplyIndexingOp::fold( FoldAdaptor adaptor, llvm::SmallVectorImpl<mlir::OpFoldResult>& results) { auto map = getAffineMap(); for (auto expr : map.getResults()) { if (auto dim = mlir::dyn_cast<mlir::AffineDimExpr>(expr)) { results.push_back(getOperand(dim.getPosition())); } else if (auto sym = mlir::dyn_cast<mlir::AffineSymbolExpr>(expr)) { results.push_back(getOperand(map.getNumDims() + sym.getPosition())); } else { results.clear(); return failure(); } } return success(); } void AtomicRMWOp::getAsmResultNames( llvm::function_ref<void(mlir::Value, mlir::StringRef)> setNameFn) { setNameFn(getResult(), "atomic_rmw"); } void AtomicRMWOp::build(OpBuilder& builder, OperationState& result, Value tensor, ValueRange ivs) { OpBuilder::InsertionGuard g(builder); result.addOperands(tensor); result.addOperands(ivs); result.addTypes(tensor.getType()); auto tensor_type = llvm::cast<RankedTensorType>(tensor.getType()); Region* body = result.addRegion(); builder.createBlock(body); body->addArgument(tensor_type.getElementType(), tensor.getLoc()); } mlir::OpFoldResult AtomicRMWOp::fold(FoldAdaptor adaptor) { auto* body = getBody(); if (&body->front() == body->getTerminator() && body->front().getOperand(0) == body->getArgument(0)) { return getOperand(0); } return {}; } void PureCallOp::getAsmResultNames( llvm::function_ref<void(mlir::Value, mlir::StringRef)> setNameFn) { for (auto result : getResults()) { setNameFn(result, "pure_call"); } } void SyncThreadsOp::getAsmResultNames( llvm::function_ref<void(mlir::Value, mlir::StringRef)> setNameFn) { for (auto result : getResults()) { setNameFn(result, "synced_tensor"); } } void LoopOp::getAsmResultNames( llvm::function_ref<void(mlir::Value, mlir::StringRef)> setNameFn) { for (auto result : getResults()) { setNameFn(result, "xla_loop"); } } void LoopOp::getAsmBlockArgumentNames(mlir::Region& region, mlir::OpAsmSetValueNameFn setFn) { char iv_name = 'i'; for (auto iv : getInductionVars()) { setFn(iv, std::string{iv_name}); if (iv_name <= 'n') { ++iv_name; } } std::string map_result_name = "ra"; char map_result_char = 'a'; for (auto map_result : getIndexingMapResults()) { setFn(map_result, map_result_name); if (map_result_char <= 'z') { ++map_result_char; map_result_name[1] = map_result_char; } } for (auto iv : getRegionIterArgs()) { setFn(iv, "iter"); } } void LoopOp::build(OpBuilder& builder, OperationState& result, IndexingMapAttr indexing_map_attr, ValueRange dims, ValueRange inits, BodyBuilderFn bodyBuilder) { OpBuilder::InsertionGuard guard(builder); int64_t num_ivs = indexing_map_attr.getRangeVars().size(); int64_t num_indexing_map_results = indexing_map_attr.getIndexingMap().GetNumResults(); int64_t num_inits = inits.size(); result.addOperands(dims); result.addOperands(inits); result.addTypes(TypeRange(inits)); Block* body_block = builder.createBlock(result.addRegion()); for (int i = 0, e = num_ivs + num_indexing_map_results; i < e; ++i) { body_block->addArgument(builder.getIndexType(), result.location); } for (auto init_type : TypeRange(inits)) { body_block->addArguments(init_type, result.location); } mlir::OperationName opname(LoopOp::getOperationName(), builder.getContext()); result.addAttribute(LoopOp::getIndexingMapAttrAttrName(opname), indexing_map_attr); result.addAttribute( LoopOp::getOperandSegmentSizesAttrName(opname), builder.getDenseI32ArrayAttr({static_cast<int32_t>(dims.size()), static_cast<int32_t>(inits.size())})); if (bodyBuilder) { OpBuilder::InsertionGuard guard(builder); builder.setInsertionPointToStart(body_block); bodyBuilder( builder, result.location, body_block->getArguments().take_front(num_ivs), body_block->getArguments().drop_front(num_ivs).drop_back(num_inits), body_block->getArguments().take_back(num_inits)); } } void LoopOp::build(OpBuilder& builder, OperationState& result, const IndexingMap& indexing_map, ValueRange dims, ValueRange inits, BodyBuilderFn bodyBuilder) { build(builder, result, IndexingMapAttr::get(builder.getContext(), indexing_map), dims, inits, bodyBuilder); } ParseResult LoopOp::parse(OpAsmParser& parser, OperationState& result) { SmallVector<OpAsmParser::Argument, 4> region_args, ivs, map_results, iter_args; SmallVector<OpAsmParser::UnresolvedOperand, 4> dim_operands; auto* ctx = parser.getContext(); OpBuilder b(ctx); Type index_type = b.getIndexType(); if (parser.parseOperandList(dim_operands, OpAsmParser::Delimiter::Paren) || parser.resolveOperands(dim_operands, index_type, result.operands)) return failure(); if (parser.parseArgumentList(ivs, OpAsmParser::Delimiter::Square)) return failure(); for (auto iv : ivs) { region_args.push_back(iv); region_args.back().type = index_type; } if (parser.parseArrow() || parser.parseArgumentList(map_results, OpAsmParser::Delimiter::Paren)) return failure(); for (auto map_result : map_results) { region_args.push_back(map_result); region_args.back().type = index_type; } IndexingMapAttr indexing_map_attr; if (parser.parseKeyword("in") || parser.parseAttribute(indexing_map_attr, "indexing_map_attr", result.attributes)) { return failure(); } SmallVector<OpAsmParser::UnresolvedOperand, 4> init_operands; if (parser.parseKeyword("iter_args") || parser.parseAssignmentList(iter_args, init_operands) || parser.parseArrowTypeList(result.types) || parser.resolveOperands(init_operands, result.types, parser.getNameLoc(), result.operands)) return failure(); for (auto [index, iter_arg] : llvm::enumerate(iter_args)) { region_args.push_back(iter_arg); region_args.back().type = result.types[index]; } if (region_args.size() != result.types.size() + ivs.size() + map_results.size()) { return parser.emitError( parser.getNameLoc(), "mismatch in number of induction variables + loop-carried values + " "number of indexing map results variables and the number of results"); } Region* body = result.addRegion(); if (parser.parseRegion(*body, region_args)) return failure(); LoopOp::ensureTerminator(*body, b, result.location); result.addAttribute( LoopOp::getOperandSegmentSizeAttr(), b.getDenseI32ArrayAttr({static_cast<int32_t>(dim_operands.size()), static_cast<int32_t>(iter_args.size())})); if (parser.parseOptionalAttrDict(result.attributes)) return failure(); return success(); } void LoopOp::print(OpAsmPrinter& p) { p << " (" << getDims() << ")[" << getInductionVars() << "] -> (" << getIndexingMapResults() << ") in " << getIndexingMapAttr() << " iter_args("; llvm::interleaveComma( llvm::zip(getRegionIterArgs(), getInits()), p, [&](auto it) { p << std::get<0>(it) << " = " << std::get<1>(it); }); p << ") -> (" << getInits().getTypes() << ") "; p.printRegion(getRegion(), false, true); p.printOptionalAttrDict((*this)->getAttrs(), { getIndexingMapAttrAttrName(), getOperandSegmentSizesAttrName(), }); } LogicalResult LoopOp::verify() { if (getInits().size() != getNumResults()) { return emitOpError("mismatch in number of loop-carried values and results"); } IndexingMap indexing_map = getIndexingMap(); if (indexing_map.GetRangeVarsCount() != getNumInductionVars()) { return emitOpError() << "mismatch in number of induction variables " << getNumInductionVars() << " and RangeVars in the indexing map " << ToString(indexing_map); } if (indexing_map.GetDimVarsCount() != getDims().size()) { return emitOpError() << "mismatch in number of dims operands " << getDims().size() << " and DimVars in the indexing map " << ToString(indexing_map); } for (auto [bb_arg, result_type, init] : llvm::zip(getRegionIterArgs(), getResultTypes(), getInits())) { if (bb_arg.getType() != result_type || init.getType() != result_type) { return emitOpError() << "block iter arg type = " << bb_arg.getType() << ", result type = " << result_type << " and init operand type = " << init.getType() << " should match"; } } return success(); } IndexingMap LoopOp::getIndexingMap() { return getIndexingMapAttr().getIndexingMap(); } namespace { struct SimplifyLoopOfApplyIndexing : public mlir::OpRewritePattern<LoopOp> { using OpRewritePattern<LoopOp>::OpRewritePattern; LogicalResult matchAndRewrite(LoopOp loop_op, PatternRewriter& rewriter) const override { auto loop_indexing_map = loop_op.getIndexingMap(); MLIRContext* ctx = loop_op.getContext(); int num_dims = loop_indexing_map.GetDimVarsCount(); SmallVector<std::pair<int, ApplyIndexingOp>, 2> apply_indexing_ops; bool all_apply_indexing_operands_have_one_use = true; for (auto& operand : loop_op->getOpOperands().take_front(num_dims)) { if (auto producer = operand.get().getDefiningOp<ApplyIndexingOp>()) { if (producer.getIndexingMap().GetSymbolCount() > 0) { continue; } apply_indexing_ops.push_back({operand.getOperandNumber(), producer}); all_apply_indexing_operands_have_one_use &= producer->hasOneUse(); } } if (apply_indexing_ops.empty()) { return rewriter.notifyMatchFailure( loop_op, "No loop(apply_indexing) patterns found. Note that producer " "apply_indexing should have already been simplified via " "MoveSymbolsToDims pattern."); } mlir::DenseMap<Value, AffineExpr> operand_exprs; for (auto& operand : loop_op->getOpOperands().take_front(num_dims)) { int operand_number = operand.getOperandNumber(); operand_exprs[operand.get()] = getAffineDimExpr(operand_number, ctx); } auto replacement = GetNewIndexingMapAfterFoldingSequence( loop_indexing_map, apply_indexing_ops, operand_exprs, ctx); if (!all_apply_indexing_operands_have_one_use && !replacement.indexing_map.Simplify()) { return rewriter.notifyMatchFailure( loop_op, "Folded indexing map of the loop op was not simplified"); } int new_num_dims = num_dims + replacement.added_dim_args.size(); SmallVector<Value> aggregate_dims; aggregate_dims.reserve(new_num_dims); auto begin = loop_op.getOperands().begin(); aggregate_dims.append(begin, begin + num_dims); aggregate_dims.append(replacement.added_dim_args); SmallVector<Value, 4> used_dims; used_dims.reserve(aggregate_dims.size()); auto used_dim_bit_vector = ~replacement.indexing_map.RemoveUnusedVars(); for (auto used_dim_idx : used_dim_bit_vector.set_bits()) { if (used_dim_idx < new_num_dims) { used_dims.push_back(aggregate_dims[used_dim_idx]); } } auto new_loop_op = rewriter.create<LoopOp>(loop_op.getLoc(), replacement.indexing_map, used_dims, loop_op.getInits()); Block* original_block = &loop_op.getRegion().front(); Block* new_block = &new_loop_op.getRegion().front(); rewriter.mergeBlocks(original_block, new_block, new_block->getArguments()); rewriter.replaceOp(loop_op, new_loop_op.getResults()); return success(); } }; } void LoopOp::getCanonicalizationPatterns(mlir::RewritePatternSet& results, MLIRContext* context) { results.add<SimplifyLoopOfApplyIndexing>(context); } VariableConstraints GetConstraintsForVariables(const IndexingMap& map) { VariableConstraints result; result.constraints_for_dims.resize(map.GetDimensionCount()); result.constraints_for_symbols.resize(map.GetSymbolCount()); for (const auto& constraint : map.GetConstraints()) { constraint.first.walk([&](mlir::AffineExpr leaf) { if (auto dim = mlir::dyn_cast<mlir::AffineDimExpr>(leaf)) { result.constraints_for_dims[dim.getPosition()].insert(constraint); } else if (auto sym = mlir::dyn_cast<mlir::AffineSymbolExpr>(leaf)) { result.constraints_for_symbols[sym.getPosition()].insert(constraint); } }); } return result; } LogicalResult MaterializeOp::verify() { IndexingMap map_in = getMap().getIndexingMap(); IndexingMap map_out = getResult().getType().getIndexingMapAttr().getIndexingMap(); if (getIndices().size() != map_in.GetDimVarsCount()) { return emitOpError() << "number of indices must match number of dimensions " "of indexing map"; } if (map_in.GetDimVarsCount() == 0 || map_out.GetDimVarsCount() == 0) { return emitOpError() << "must have thread_id dimension in both indexing maps"; } if (map_in.GetDimVars(0).bounds != map_out.GetDimVars(0).bounds) { return emitOpError() << "thread_id dimension must have the same bounds in " "both indexing maps"; } auto variable_constraints_in = GetConstraintsForVariables(map_in); auto variable_constraints_out = GetConstraintsForVariables(map_out); if (variable_constraints_in.constraints_for_dims[0] != variable_constraints_out.constraints_for_dims[0]) { return emitOpError() << "constraints of indexing maps must be equal for " << "the thread_id dimension"; } if (map_in.GetRangeVarsCount() != map_out.GetRangeVarsCount()) { return emitOpError() << "number of symbols in both indexing_maps must match"; } for (auto const& [range_in, range_out] : llvm::zip(map_in.GetRangeVars(), map_out.GetRangeVars())) { if (range_in.bounds != range_out.bounds) { return emitOpError() << "domain of symbols of indexing_maps must match"; } } if (variable_constraints_in.constraints_for_symbols != variable_constraints_out.constraints_for_symbols) { return emitOpError() << "constraints of indexing maps must be equal for all symbols"; } if (map_out.GetDimVarsCount() > 1) { for (auto expr : map_out.GetAffineMap().getResults()) { if (expr.isFunctionOfDim(1)) { return emitOpError() << "vector mapping indices must not depend on the " << "block ID"; } } } if (map_in.GetDimVarsCount() > 1 && map_out.GetDimVarsCount() > 1) { if (variable_constraints_in.constraints_for_dims[1] != variable_constraints_out.constraints_for_dims[1]) { return emitOpError() << "constraints of indexing maps must be equal for " << "the block_id dimension"; } } else if (map_in.GetDimVarsCount() > 1 && !variable_constraints_in.constraints_for_dims[1].empty()) { return emitOpError() << "constraints of indexing maps must be equal for " << "the block_id dimension"; } else if (map_out.GetDimVarsCount() > 1 && !variable_constraints_out.constraints_for_dims[1].empty()) { return emitOpError() << "constraints of indexing maps must be equal for " << "the block_id dimension"; } return success(); } LogicalResult InsertOp::verify() { if (!getMap().getIndexingMap().GetRangeVars().empty()) { return emitOpError() << "insert_op map must not have any symbols"; } int64_t vector_map_num_results = getSource().getType().getIndexingMapAttr().getNumResults(); if (vector_map_num_results != getMap().getIndexingMap().GetDimVars().size()) { return emitOpError() << "source map result count must equal insert_op's " "map's dimension count"; } return success(); } void ReindexOp::build(mlir::OpBuilder& builder, mlir::OperationState& result, mlir::Type type, mlir::Value operand, mlir::Value padding, const xla::gpu::IndexingMap& indexing_map) { IndexingMapAttr indexing_map_attr = IndexingMapAttr::get(builder.getContext(), indexing_map); build(builder, result, type, operand, padding, indexing_map_attr); } SmallVector<Type, 2> inferReductionResultTypes(TypeRange input_types, ArrayRef<int64_t> reduced_dims) { auto input_shape = mlir::cast<RankedTensorType>(input_types.front()).getShape(); auto num_reduced_dims = reduced_dims.size(); SmallVector<int64_t, 4> output_shape; output_shape.reserve(input_shape.size() - num_reduced_dims); int reduce_dim = 0; for (int64_t i = 0; i < input_shape.size(); ++i) { if (reduce_dim < num_reduced_dims && i == reduced_dims[reduce_dim]) { ++reduce_dim; continue; } output_shape.push_back(input_shape[i]); } SmallVector<Type, 2> result_types; result_types.reserve(input_types.size()); for (auto input_type : input_types) { result_types.push_back(RankedTensorType::get( output_shape, mlir::cast<RankedTensorType>(input_type).getElementType())); } return result_types; } SmallVector<Type, 2> inferReductionInitTypes(TypeRange input_types) { SmallVector<Type, 2> init_types; init_types.reserve(input_types.size()); for (auto input_type : input_types) { init_types.push_back( mlir::cast<RankedTensorType>(input_type).getElementType()); } return init_types; } LogicalResult ReduceOp::inferReturnTypes( MLIRContext* context, std::optional<Location> location, ValueRange operands, mlir::DictionaryAttr attributes, mlir::OpaqueProperties properties, mlir::RegionRange regions, mlir::SmallVectorImpl<Type>& inferredReturnTypes) { ReduceOp::Adaptor adaptor(operands, attributes, properties, regions); inferredReturnTypes.append(inferReductionResultTypes( TypeRange{adaptor.getInputs()}, adaptor.getDimensions())); return success(); } ParseResult ReduceOp::parse(OpAsmParser& parser, OperationState& result) { SmallVector<OpAsmParser::UnresolvedOperand, 4> inputs; SmallVector<OpAsmParser::UnresolvedOperand, 4> inits; SmallVector<int64_t, 2> dimensions; mlir::StringAttr combiner; SmallVector<Type, 2> input_types; SmallVector<Type, 2> result_types; if (parser.parseLParen() || parseOperands(parser, &inputs) || parser.parseRParen() || parser.parseKeyword("inits") || parser.parseLParen() || parseOperands(parser, &inits) || parser.parseRParen() || parser.parseKeyword("dimensions") || parser.parseEqual() || parser.parseCommaSeparatedList(OpAsmParser::Delimiter::Square, [&]() -> ParseResult { return parser.parseInteger( dimensions.emplace_back()); }) || parser.parseKeyword("combiner") || parser.parseEqual() || parser.parseSymbolName(combiner) || parser.parseOptionalAttrDict(result.attributes) || parser.parseColonTypeList(input_types) || parser.parseKeyword("to") || parser.parseTypeList(result_types)) { return failure(); } auto ctx = result.getContext(); mlir::OperationName opname(ReduceOp::getOperationName(), ctx); result.addAttribute(ReduceOp::getDimensionsAttrName(opname), DenseI64ArrayAttr::get(ctx, dimensions)); result.addAttribute(ReduceOp::getCombinerAttrName(opname), mlir::FlatSymbolRefAttr::get(ctx, combiner)); result.addTypes(result_types); auto init_types = inferReductionInitTypes(input_types); mlir::SMLoc loc = parser.getCurrentLocation(); if (parser.resolveOperands(inputs, input_types, loc, result.operands) || parser.resolveOperands(inits, init_types, loc, result.operands)) { return failure(); } return success(); } void ReduceOp::print(OpAsmPrinter& p) { p << '(' << getInputs() << ") inits(" << getInits() << ") dimensions=[" << getDimensions() << "] combiner=@" << getCombiner(); p.printOptionalAttrDict((*this)->getAttrs(), {getCombinerAttrName(), getDimensionsAttrName()}); p << " : " << TypeRange(getInputs()) << " to " << TypeRange(getResults()); } LogicalResult ReduceOp::verify() { auto inferred_init_types = inferReductionInitTypes(TypeRange(getInputs())); for (auto [inferred_init_type, init_type] : llvm::zip(inferred_init_types, TypeRange(getInits()))) { if (inferred_init_type != init_type) { return emitOpError() << "init type " << init_type << " does not match inferred type " << inferred_init_type; } } auto module = this->getOperation()->getParentOfType<mlir::ModuleOp>(); auto combiner = module.lookupSymbol<mlir::func::FuncOp>(getCombinerAttr()); if (!combiner) { return emitOpError() << "combiner `@" << getCombiner() << "` not found"; } SmallVector<Type, 2> combiner_operand_types; combiner_operand_types.reserve(getNumOperands()); combiner_operand_types.append(inferred_init_types); combiner_operand_types.append(inferred_init_types); auto expected_combiner_type = mlir::FunctionType::get( getContext(), combiner_operand_types, inferred_init_types); if (expected_combiner_type != combiner.getFunctionType()) { return emitOpError() << "provided combiner `@" << getCombiner() << " expected to have type " << expected_combiner_type << " but got " << combiner.getFunctionType(); } return success(); } ParseResult ShuffleReduceOp::parse(OpAsmParser& parser, OperationState& result) { SmallVector<OpAsmParser::UnresolvedOperand, 4> inputs; mlir::StringAttr combiner; int64_t max_distance; SmallVector<Type, 2> operand_types; mlir::SMLoc loc = parser.getCurrentLocation(); if (parser.parseLParen() || parseOperands(parser, &inputs) || parser.parseRParen() || parser.parseKeyword("to") || parser.parseInteger(max_distance) || parser.parseKeyword("combiner") || parser.parseEqual() || parser.parseSymbolName(combiner) || parser.parseOptionalAttrDict(result.attributes) || parser.parseColonTypeList(operand_types) || parser.resolveOperands(inputs, operand_types, loc, result.operands)) { return failure(); } auto ctx = result.getContext(); mlir::OperationName opname(ShuffleReduceOp::getOperationName(), ctx); result.addAttribute(ShuffleReduceOp::getCombinerAttrName(opname), mlir::FlatSymbolRefAttr::get(ctx, combiner)); result.addAttribute( ShuffleReduceOp::getMaxDistanceAttrName(opname), mlir::IntegerAttr::get(mlir::IntegerType::get(ctx, 64), max_distance)); result.addTypes(operand_types); return success(); } void ShuffleReduceOp::print(OpAsmPrinter& p) { p << '(' << getOperands() << ") to " << getMaxDistance() << " combiner=@" << getCombiner(); p.printOptionalAttrDict((*this)->getAttrs(), {getCombinerAttrName(), getMaxDistanceAttrName()}); p << " : " << TypeRange(getResultTypes()); } } } #define GET_OP_CLASSES #include "xla/service/gpu/fusions/ir/xla_gpu_ops.cc.inc"
#include "xla/service/gpu/fusions/ir/xla_gpu_ops.h" #include <gtest/gtest.h> #include "xla/service/gpu/model/indexing_map.h" #include "xla/service/gpu/model/indexing_map_serialization.h" #include "xla/service/gpu/model/indexing_test_utils.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/test.h" namespace xla::gpu { namespace { using ::testing::IsEmpty; using ::testing::Pair; using ::testing::UnorderedElementsAre; class XLAGPUOpsTest : public HloTestBase { public: mlir::MLIRContext mlir_context_; }; TEST_F(XLAGPUOpsTest, GetConstraintsForVariables) { auto map = *ParseIndexingMap(R"( (d0, d1)[s0, s1] -> (d0 + s0, d1 + s1), domain: d0 in [0, 5], d1 in [0, 2], s0 in [0, 32], s1 in [0, 1024], d1 + s1 in [0, 4], d1 mod 32 in [0, 6], s0 + s1 in [0, 3], s0 mod 4 in [0, 1], s1 mod 4 in [0, 2] )", &mlir_context_); auto constraints_for_variables = GetConstraintsForVariables(map); EXPECT_THAT(constraints_for_variables.constraints_for_dims[0], UnorderedElementsAre()); EXPECT_THAT( constraints_for_variables.constraints_for_dims[1], UnorderedElementsAre( Pair(ParseAffineExpr("s1 + d1", &mlir_context_), Interval{0, 4}), Pair(ParseAffineExpr("d1 mod 32", &mlir_context_), Interval{0, 6}))); EXPECT_THAT( constraints_for_variables.constraints_for_symbols[0], UnorderedElementsAre( Pair(ParseAffineExpr("s0 mod 4", &mlir_context_), Interval{0, 1}), Pair(ParseAffineExpr("s0 + s1", &mlir_context_), Interval{0, 3}))); EXPECT_THAT( constraints_for_variables.constraints_for_symbols[1], UnorderedElementsAre( Pair(ParseAffineExpr("s1 mod 4", &mlir_context_), Interval{0, 2}), Pair(ParseAffineExpr("s0 + s1", &mlir_context_), Interval{0, 3}), Pair(ParseAffineExpr("s1 + d1", &mlir_context_), Interval{0, 4}))); } TEST_F(XLAGPUOpsTest, GetConstraintsForVariablesEmpty) { auto map = *ParseIndexingMap(R"( (d0, d1)[s0, s1] -> (d0 + s0, d1 + s1), domain: d0 in [0, 5], d1 in [0, 2], s0 in [0, 32], s1 in [0, 1024], )", &mlir_context_); auto constraints_for_variables = GetConstraintsForVariables(map); EXPECT_THAT(constraints_for_variables.constraints_for_dims, ElementsAre(IsEmpty(), IsEmpty())); EXPECT_THAT(constraints_for_variables.constraints_for_symbols, ElementsAre(IsEmpty(), IsEmpty())); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/ir/xla_gpu_ops.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/ir/xla_gpu_ops_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
591c41b1-f54a-486f-8805-bb5cd8bfa084
cpp
tensorflow/tensorflow
triton_support_legacy
third_party/xla/xla/service/gpu/fusions/triton/triton_support_legacy.cc
third_party/xla/xla/service/gpu/fusions/triton/triton_support_legacy_test.cc
#include <cstdint> #include <iterator> #include <variant> #include <vector> #include "absl/algorithm/container.h" #include "absl/log/check.h" #include "absl/strings/str_format.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/layout.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/fusions/triton/triton_support.h" #include "xla/service/gpu/variant_visitor.h" #include "xla/stream_executor/device_description.h" #include "xla/xla_data.pb.h" #include "tsl/platform/tensor_float_32_utils.h" namespace xla { namespace gpu { namespace legacy_triton { bool IsDistributiveOverAddition(const HloInstruction& hlo) { if (hlo.opcode() == HloOpcode::kMultiply || hlo.opcode() == HloOpcode::kNegate || hlo.opcode() == HloOpcode::kBitcast || hlo.opcode() == HloOpcode::kReshape || hlo.opcode() == HloOpcode::kCopy || hlo.opcode() == HloOpcode::kTranspose || hlo.opcode() == HloOpcode::kConvert || hlo.opcode() == HloOpcode::kBroadcast || hlo.opcode() == HloOpcode::kSlice) { return true; } return false; } bool IsTritonSupportedDotOutputType( const PrimitiveType t, const se::GpuComputeCapability& gpu_version) { switch (t) { case F16: case F32: return true; case F8E5M2: return std::visit(VariantVisitor{[](const se::CudaComputeCapability& cc) { return cc.IsAtLeastAmpere(); }, [](const se::RocmComputeCapability& cc) { return false; }}, gpu_version); case F8E4M3FN: return std::visit(VariantVisitor{[](const se::CudaComputeCapability& cc) { return cc.IsAtLeastHopper(); }, [](const se::RocmComputeCapability& cc) { return false; }}, gpu_version); case BF16: return std::visit(VariantVisitor{[](const se::CudaComputeCapability& cc) { return true; }, [](const se::RocmComputeCapability& cc) { return cc.has_bf16_dtype_support(); }}, gpu_version); default: return false; } }; bool IsTritonSupportedDataType(PrimitiveType type, const se::GpuComputeCapability& gpu_version) { if (IsTritonSupportedDotOutputType(type, gpu_version)) { return true; } switch (type) { case PRED: case S8: case S16: case S32: return true; default: return false; } } CodegenDecision IsInstructionSupportsDataTypes( const HloInstruction& instr, const se::GpuComputeCapability& gpu_version) { if (!IsTritonSupportedDataType(instr.shape().element_type(), gpu_version)) { return CodegenDecision::Forbid("Unsupported output data type."); } for (const HloInstruction* operand : instr.operands()) { const auto operand_type = operand->shape().element_type(); switch (instr.opcode()) { case HloOpcode::kConvert: if (operand_type == S4) continue; [[fallthrough]]; default: if (!IsTritonSupportedDataType(operand_type, gpu_version)) { return CodegenDecision::Forbid("Unsupported input data type."); } } } return CodegenDecision::Allow(); } std::vector<HloOpcode> TritonSupportedUnaryElementwiseUpToFloatNormalization( PrimitiveType element_type) { std::vector<HloOpcode> ret = {HloOpcode::kConvert}; if (element_type == PrimitiveType::PRED) { ret.push_back(HloOpcode::kNot); return ret; } ret.push_back(HloOpcode::kAbs); ret.push_back(HloOpcode::kNegate); if (element_type == PrimitiveType::F32 || element_type == PrimitiveType::BF16 || element_type == PrimitiveType::F64) { absl::c_copy(std::vector<HloOpcode>{HloOpcode::kCos, HloOpcode::kExp, HloOpcode::kExpm1, HloOpcode::kFloor, HloOpcode::kCeil, HloOpcode::kLog, HloOpcode::kLog1p, HloOpcode::kRsqrt, HloOpcode::kSin, HloOpcode::kSqrt, HloOpcode::kCbrt, HloOpcode::kTan, HloOpcode::kTanh, HloOpcode::kErf}, std::back_inserter(ret)); } return ret; } std::vector<HloOpcode> TritonSupportedBinaryElementwiseUpToFloatNormalization( PrimitiveType element_type) { if (element_type == PrimitiveType::PRED) { return {HloOpcode::kAnd, HloOpcode::kOr, HloOpcode::kXor, HloOpcode::kCompare}; } std::vector<HloOpcode> ret = {HloOpcode::kAdd, HloOpcode::kCompare, HloOpcode::kMaximum, HloOpcode::kMinimum, HloOpcode::kMultiply, HloOpcode::kSubtract}; if (element_type == PrimitiveType::F32 || element_type == PrimitiveType::BF16 || element_type == PrimitiveType::F64) { ret.push_back(HloOpcode::kAtan2); ret.push_back(HloOpcode::kDivide); ret.push_back(HloOpcode::kPower); } return ret; } std::vector<HloOpcode> TritonSupportedTernaryElementwiseUpToFloatNormalization( PrimitiveType element_type) { return {HloOpcode::kSelect, HloOpcode::kClamp}; } bool IsTritonSupportedElementwiseUpToFloatNormalization( HloOpcode opcode, PrimitiveType element_type) { return absl::c_linear_search( TritonSupportedUnaryElementwiseUpToFloatNormalization( element_type), opcode) || absl::c_linear_search( TritonSupportedBinaryElementwiseUpToFloatNormalization( element_type), opcode) || absl::c_linear_search( TritonSupportedTernaryElementwiseUpToFloatNormalization( element_type), opcode); } CodegenDecision CanTritonHandleElementwise( const HloInstruction& instr, const se::GpuComputeCapability& gpu_version) { if (auto decision = IsInstructionSupportsDataTypes(instr, gpu_version); !decision.CanFuse()) { return decision; } if (instr.opcode() == HloOpcode::kConstant) { return CodegenDecision::Allow(); } else if (!IsTritonSupportedElementwiseUpToFloatNormalization( instr.opcode(), instr.operand(0)->shape().element_type())) { return CodegenDecision::Forbid("Unsupported elementwise operation."); } return CodegenDecision::Allow(); } bool IsDotAlgorithmSupportedByTriton( PrecisionConfig::Algorithm algorithm, const se::GpuComputeCapability& gpu_version) { auto cuda_compute_capability = std::get_if<se::CudaComputeCapability>(&gpu_version); auto rocm_compute_capability = std::get_if<se::RocmComputeCapability>(&gpu_version); switch (algorithm) { case PrecisionConfig::ALG_DOT_TF32_TF32_F32_X3: case PrecisionConfig::ALG_DOT_TF32_TF32_F32: if (cuda_compute_capability) { return true; } return false; case PrecisionConfig::ALG_DOT_BF16_BF16_F32: case PrecisionConfig::ALG_DOT_BF16_BF16_F32_X3: case PrecisionConfig::ALG_DOT_BF16_BF16_F32_X6: if (cuda_compute_capability) { return true; } if (rocm_compute_capability) { return rocm_compute_capability->has_bf16_dtype_support(); } return false; case PrecisionConfig::ALG_DOT_F16_F16_F32: case PrecisionConfig::ALG_DOT_F32_F32_F32: default: return false; } } CodegenDecision CanTritonHandleGEMM( const HloDotInstruction& dot, const se::GpuComputeCapability& gpu_version) { auto cuda_compute_capability = std::get_if<se::CudaComputeCapability>(&gpu_version); auto rocm_compute_capability = std::get_if<se::RocmComputeCapability>(&gpu_version); CHECK(cuda_compute_capability || rocm_compute_capability); if (dot.precision_config().algorithm() == PrecisionConfig::ALG_UNSET) { if (!tsl::tensor_float_32_execution_enabled() || absl::c_any_of(dot.precision_config().operand_precision(), [](int x) { return x != PrecisionConfig::DEFAULT; })) { return CodegenDecision::Forbid( "Having non-default operand precisions or TensorFloat-32 disabled " "for Dot op with unset algorithm."); } } else { if (!IsDotAlgorithmSupportedByTriton(dot.precision_config().algorithm(), gpu_version)) { return CodegenDecision::Forbid(absl::StrFormat( "Unsupported algorithm on the current device(s): %s", PrecisionConfig::Algorithm_Name(dot.precision_config().algorithm()))); } } if (!IsTritonSupportedDotOutputType(dot.shape().element_type(), gpu_version)) { return CodegenDecision::Forbid("Unsupported output data type for Dot op."); } if (!IsTritonSupportedDataType(dot.operand(0)->shape().element_type(), gpu_version) || !IsTritonSupportedDataType(dot.operand(1)->shape().element_type(), gpu_version)) { return CodegenDecision::Forbid("Unsupported input data type for Dot op."); } const DotDimensionNumbers& dim_numbers = dot.dot_dimension_numbers(); if (dim_numbers.lhs_batch_dimensions().size() > 1) { return CodegenDecision::Forbid("Multiple batch dimensions."); } return CodegenDecision::Allow(); } bool NoNonContractingDimension(const HloDotInstruction& dot) { const DotDimensionNumbers& dim_numbers = dot.dot_dimension_numbers(); if (dim_numbers.lhs_batch_dimensions().size() + dim_numbers.lhs_contracting_dimensions().size() == dot.operand(0)->shape().rank() || dim_numbers.rhs_batch_dimensions().size() + dim_numbers.rhs_contracting_dimensions().size() == dot.operand(1)->shape().rank()) { return true; } return false; } CodegenDecision IsTritonSupportedDynamicSlice( const HloDynamicSliceInstruction& instr) { for (const HloInstruction* index_operand : instr.index_operands()) { switch (index_operand->shape().element_type()) { case S8: case S16: case S32: break; default: return CodegenDecision::Forbid( "Dynamic slice is only supported with S8, S16, or S32 indices."); } } const HloInstruction* input = instr.operand(0); Layout in_layout = input->shape().layout(); int64_t majormost_dim_id = in_layout.minor_to_major(in_layout.minor_to_major_size() - 1); for (int i = 0; i < input->shape().dimensions_size(); ++i) { if (i == majormost_dim_id) { continue; } else if (input->shape().dimensions(i) != instr.slice_sizes(i)) { return CodegenDecision::Forbid( "Unsupported dynamic slice on non-major-most dimension."); } } return CodegenDecision::Allow(); } CodegenDecision IsTritonSupportedInstruction( const HloInstruction& instr, const se::GpuComputeCapability& gpu_version) { if (instr.IsElementwise()) { return CanTritonHandleElementwise(instr, gpu_version); } switch (instr.opcode()) { case HloOpcode::kDot: { auto* dot = Cast<HloDotInstruction>(&instr); if (NoNonContractingDimension(*dot)) { return CodegenDecision::Forbid("No non-contracting dimensions."); } return CanTritonHandleGEMM(*dot, gpu_version); } case HloOpcode::kTuple: { if (instr.IsRoot()) { return CodegenDecision::Allow(); } return CodegenDecision::Forbid("Only supports root tuples."); } case HloOpcode::kDynamicSlice: { return IsTritonSupportedDynamicSlice( *Cast<HloDynamicSliceInstruction>(&instr)); } case HloOpcode::kBitcast: case HloOpcode::kTranspose: case HloOpcode::kSlice: case HloOpcode::kReshape: case HloOpcode::kPad: case HloOpcode::kConcatenate: case HloOpcode::kParameter: case HloOpcode::kBroadcast: return CodegenDecision::Allow(); default: break; } return CodegenDecision::Forbid("Unsupported opcode."); } } } }
#include "xla/service/gpu/fusions/triton/triton_support_legacy.h" #include <memory> #include <string> #include <tuple> #include <utility> #include <variant> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/log/check.h" #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "absl/strings/substitute.h" #include "xla/error_spec.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/primitive_util.h" #include "xla/service/gpu/fusions/triton/triton_fusion_emitter.h" #include "xla/service/gpu/fusions/triton/triton_test_utils.h" #include "xla/service/gpu/gpu_device_info_for_tests.h" #include "xla/service/gpu/model/tiled_hlo_computation.h" #include "xla/service/gpu/triton_fusion_analysis.h" #include "xla/stream_executor/device_description.h" #include "xla/tsl/lib/core/status_test_util.h" #include "xla/xla.pb.h" #include "xla/xla_data.pb.h" #include "tsl/platform/status_matchers.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { se::GpuComputeCapability GetComputeCapability() { return se::CudaComputeCapability::Ampere(); } bool CombinationCrashesTriton(PrimitiveType lhs_type, PrimitiveType rhs_type, PrimitiveType output_type, se::GpuComputeCapability gpu_compute_capability) { if (std::holds_alternative<se::CudaComputeCapability>( gpu_compute_capability)) { auto cuda_compute_capability = std::get<se::CudaComputeCapability>(gpu_compute_capability); if (!cuda_compute_capability.IsAtLeastHopper() && (lhs_type == F8E4M3FN || rhs_type == F8E4M3FN || output_type == F8E4M3FN)) { return true; } } return false; } class DotTest : public TritonSupportTestBaseWithParam { protected: void TestDotWithTypes(PrimitiveType lhs_type, PrimitiveType rhs_type, PrimitiveType output_type) { if (lhs_type == BF16 && !SupportsBF16(GetComputeCapability())) { GTEST_SKIP(); } const HloOpcode opcode = HloOpcode::kDot; const std::string lhs = primitive_util::LowercasePrimitiveTypeName(lhs_type); const std::string rhs = primitive_util::LowercasePrimitiveTypeName(rhs_type); const std::string output = primitive_util::LowercasePrimitiveTypeName(output_type); const std::string kHloTestTemplate = R"( triton_computation { parameter_0 = $0[92,11]{1,0} parameter(0) parameter_1 = $1[11,63]{1,0} parameter(1) ROOT dot = $2[92,63]{1,0} $3(parameter_0, parameter_1), lhs_contracting_dims={1}, rhs_contracting_dims={0} } ENTRY e { parameter_0 = $0[92,11]{1,0} parameter(0) parameter_1 = $1[11,63]{1,0} parameter(1) ROOT triton_op = $2[92,63]{1,0} fusion(parameter_0, parameter_1), kind=kCustom, calls=triton_computation, backend_config={"fusion_backend_config":{"kind":"__triton_gemm", triton_gemm_config: {"block_m":16,"block_n":32,"block_k":512, "split_k":1,"num_stages":4,"num_warps":8, "num_ctas":1}}} })"; const std::string hlo_test = absl::Substitute( kHloTestTemplate, lhs, rhs, output, HloOpcodeString(opcode)); TF_ASSERT_OK_AND_ASSIGN( TestedInstruction ti, ParseTemplateAndGetInstruction(hlo_test, {}, opcode)); if (legacy_triton::IsTritonSupportedInstruction(ti.Instruction(), GetComputeCapability())) { TF_EXPECT_OK( ApplyFloatNormalization(ti.Module().get(), GetComputeCapability())); EXPECT_TRUE(RunAndCompareNoHloPasses( std::move(ti.Module()), ErrorSpec{primitive_util::IsF8Type(lhs_type) ? 1.0 : 2e-4, 2e-4})); } else { if (CombinationCrashesTriton(lhs_type, rhs_type, output_type, GetComputeCapability())) { return; } const se::DeviceDescription dev_info = TestGpuDeviceInfo::RTXA6000DeviceInfo(GetComputeCapability()); BlockLevelParameters block_level_parameters; block_level_parameters.num_ctas = 1; block_level_parameters.num_stages = 4; block_level_parameters.num_warps = 8; EXPECT_THAT( TritonWrapper("test_fn", &ti.TritonFusion(), GetComputeCapability(), dev_info, block_level_parameters, &llvm_module_, mlir_context_), tsl::testing::StatusIs( absl::StatusCode::kInternal, ::testing::HasSubstr("Failed to compile Triton kernel"))); } } }; TEST_P(DotTest, IsTritonSupportedExecutesCorrectlyForDot) { PrimitiveType data_type; HloOpcode opcode; std::tie(data_type, opcode) = GetParam(); CHECK_EQ(opcode, HloOpcode::kDot); TestDotWithTypes(data_type, data_type, data_type); switch (data_type) { case F8E5M2: TestDotWithTypes(F8E5M2, F8E4M3FN, F32); TestDotWithTypes(F8E5M2, F8E5M2, F16); TestDotWithTypes(F8E5M2, F8E5M2, F32); break; case F8E4M3FN: TestDotWithTypes(F8E4M3FN, F8E5M2, F32); TestDotWithTypes(F8E4M3FN, F8E4M3FN, F16); TestDotWithTypes(F8E4M3FN, F8E4M3FN, F32); break; default: break; } } INSTANTIATE_TEST_SUITE_P(DotTestTestSuite, DotTest, ::testing::Combine(::testing::Values(F16, F32, BF16, F8E5M2, F8E4M3FN), ::testing::Values(HloOpcode::kDot)), TritonSupportTestTypeAndOpcodeToString); struct DynamicSliceTestParam { PrimitiveType data_type; PrimitiveType index_type; bool is_the_majormost_dim_being_sliced; using TupleType = std::tuple<PrimitiveType, PrimitiveType, bool>; explicit DynamicSliceTestParam(const TupleType& tuple) : data_type(std::get<0>(tuple)), index_type(std::get<1>(tuple)), is_the_majormost_dim_being_sliced(std::get<2>(tuple)) {} }; std::string DynamicSliceTestParamToString( const ::testing::TestParamInfo<DynamicSliceTestParam::TupleType>& info) { const DynamicSliceTestParam param(info.param); return absl::StrCat( primitive_util::LowercasePrimitiveTypeName(param.data_type), "_", primitive_util::LowercasePrimitiveTypeName(param.index_type), "_", param.is_the_majormost_dim_being_sliced ? "majormost" : "not_majormost"); } class DynamicSliceTest : public TritonSupportTestBase, public ::testing::WithParamInterface<DynamicSliceTestParam::TupleType> {}; TEST_P(DynamicSliceTest, IsTritonSupportedDynamicSlice) { const DynamicSliceTestParam param(GetParam()); if (param.data_type == BF16 && !SupportsBF16(GetComputeCapability())) { GTEST_SKIP(); } constexpr absl::string_view kHloTestTemplate = R"( triton_computation { dynamic_slice_input = $0[$2,$3] parameter(0) dot_rhs = f32[2,4] parameter(1) start_index0 = $1[] parameter(2) start_index1 = $1[] parameter(3) dynamic_slice = $0[5,2] dynamic-slice(dynamic_slice_input, start_index0, start_index1), dynamic_slice_sizes={5,2} convert = f32[5,2] convert(dynamic_slice) ROOT dot = f32[5, 4] dot(convert, dot_rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0} } ENTRY e { dynamic_slice_input = $0[$2,$3] parameter(0) dot_rhs = f32[2,4] parameter(1) start_index0 = $1[] constant($4) start_index1 = $1[] constant($5) ROOT fusion = f32[5,4] fusion(dynamic_slice_input, dot_rhs, start_index0, start_index1), kind=kCustom, calls=triton_computation, backend_config={ "fusion_backend_config":{ "kind":"__triton_gemm","triton_gemm_config":{ "block_m":"32","block_n":"32","block_k":"32","split_k":"1", "num_stages":"1","num_warps":"4","num_ctas":"1"}}} })"; const std::string hlo_test = absl::Substitute( kHloTestTemplate, primitive_util::LowercasePrimitiveTypeName(param.data_type), primitive_util::LowercasePrimitiveTypeName(param.index_type), param.is_the_majormost_dim_being_sliced ? 7 : 5, param.is_the_majormost_dim_being_sliced ? 2 : 4, param.is_the_majormost_dim_being_sliced ? 1 : 0, param.is_the_majormost_dim_being_sliced ? 0 : 1 ); TF_ASSERT_OK_AND_ASSIGN(TestedInstruction ti, ParseTemplateAndGetInstruction( hlo_test, {}, HloOpcode::kDynamicSlice)); const bool is_supported_instruction = legacy_triton::IsTritonSupportedInstruction(ti.Instruction(), GetComputeCapability()) .CanFuse(); const bool is_supported_dynamic_slice = legacy_triton::IsTritonSupportedDynamicSlice( *Cast<HloDynamicSliceInstruction>(&ti.Instruction())) .CanFuse(); EXPECT_EQ(is_supported_instruction, is_supported_dynamic_slice); if (is_supported_instruction) { TF_EXPECT_OK( ApplyFloatNormalization(ti.Module().get(), GetComputeCapability())); EXPECT_TRUE(RunAndCompareNoHloPasses( std::move(ti.Module()), ErrorSpec{2e-4, 2e-4})); } else { EXPECT_THAT(TritonFusionAnalysis::Execute(ti.TritonComputation()), tsl::testing::StatusIs(absl::StatusCode::kFailedPrecondition)); } } INSTANTIATE_TEST_SUITE_P( All, DynamicSliceTest, ::testing::Combine(::testing::Values(F16, BF16, F32), ::testing::Values(S8, S16, S32, S64, U8, U16, U32, U64), ::testing::Bool()), DynamicSliceTestParamToString); TEST_F(TritonSupportTestBase, UnsupportedDotOutputTypeFailsGracefullyWithTriton) { const std::string kHloTest = R"( triton_computation { parameter_0 = f32[92,11]{1,0} parameter(0) parameter_1 = f32[11,63]{1,0} parameter(1) ROOT dot = pred[92,63]{1,0} dot(parameter_0, parameter_1), lhs_contracting_dims={1}, rhs_contracting_dims={0} } ENTRY e { parameter_0 = f32[92,11]{1,0} parameter(0) parameter_1 = f32[11,63]{1,0} parameter(1) ROOT triton_op = pred[92,63]{1,0} fusion(parameter_0, parameter_1), kind=kCustom, calls=triton_computation, backend_config={"fusion_backend_config":{"kind":"__triton_gemm", triton_gemm_config: {"block_m":16,"block_n":32,"block_k":512, "split_k":1,"num_stages":4,"num_warps":8, "num_ctas":1}}} })"; TF_ASSERT_OK_AND_ASSIGN(TestedInstruction ti, ParseTemplateAndGetInstruction( kHloTest, {}, HloOpcode::kDot)); const se::DeviceDescription dev_info = TestGpuDeviceInfo::RTXA6000DeviceInfo(GetComputeCapability()); EXPECT_THAT(legacy_triton::IsTritonSupportedInstruction( ti.Instruction(), GetComputeCapability()) .Explain(), ::testing::HasSubstr("Unsupported output data type for Dot op.")); BlockLevelParameters block_level_parameters; block_level_parameters.num_ctas = 1; block_level_parameters.num_stages = 4; block_level_parameters.num_warps = 8; EXPECT_THAT( TritonWrapper("test_fn", &ti.TritonFusion(), GetComputeCapability(), dev_info, block_level_parameters, &llvm_module_, mlir_context_), tsl::testing::StatusIs( absl::StatusCode::kInternal, ::testing::HasSubstr("Failed to verify Triton module for fusion"))); } TEST_F(TritonSupportTestBase, UnsupportedDotWithMultipleBatchDimensionsFailsGracefullyWithTriton) { const std::string kHloTest = R"( triton_computation { parameter_0 = f32[2,2,2,2]{3,2,1,0} parameter(0) parameter_1 = f32[2,2,2,2]{3,2,1,0} parameter(1) ROOT dot = f32[2,2,2,2]{3,2,1,0} dot(parameter_0, parameter_1), lhs_contracting_dims={3}, lhs_batch_dims={1,0}, rhs_contracting_dims={2}, rhs_batch_dims={1,0} } ENTRY e { parameter_0 = f32[2,2,2,2]{3,2,1,0} parameter(0) parameter_1 = f32[2,2,2,2]{3,2,1,0} parameter(1) ROOT triton_op = f32[2,2,2,2]{3,2,1,0} fusion(parameter_0, parameter_1), kind=kCustom, calls=triton_computation, backend_config={"fusion_backend_config":{"kind":"__triton_gemm", triton_gemm_config: {"block_m":16,"block_n":32,"block_k":512, "split_k":1,"num_stages":4,"num_warps":8, "num_ctas":1}}} })"; TF_ASSERT_OK_AND_ASSIGN(TestedInstruction ti, ParseTemplateAndGetInstruction( kHloTest, {}, HloOpcode::kDot)); const se::DeviceDescription dev_info = TestGpuDeviceInfo::RTXA6000DeviceInfo(GetComputeCapability()); EXPECT_THAT(legacy_triton::IsTritonSupportedInstruction( ti.Instruction(), GetComputeCapability()) .Explain(), ::testing::HasSubstr("Multiple batch dimensions")); BlockLevelParameters block_level_parameters; block_level_parameters.num_ctas = 1; block_level_parameters.num_stages = 4; block_level_parameters.num_warps = 8; EXPECT_THAT( TritonWrapper("test_fn", &ti.TritonFusion(), GetComputeCapability(), dev_info, block_level_parameters, &llvm_module_, mlir_context_), tsl::testing::StatusIs(absl::StatusCode::kInternal, ::testing::HasSubstr("num_batch_dims <= 1"))); } TEST_F(TritonSupportTestBase, UnsupportedDotWithNoNonContractingDimensionsFailsGracefullyWithTriton) { const std::string kHloTest = R"( triton_computation { parameter_0 = f32[2]{0} parameter(0) parameter_1 = f32[2]{0} parameter(1) ROOT dot = f32[] dot(parameter_0, parameter_1), lhs_contracting_dims={0}, rhs_contracting_dims={0} } ENTRY e { parameter_0 = f32[2]{0} parameter(0) parameter_1 = f32[2]{0} parameter(1) ROOT triton_op = f32[] fusion(parameter_0, parameter_1), kind=kCustom, calls=triton_computation, backend_config={"fusion_backend_config":{"kind":"__triton_gemm"}} })"; TF_ASSERT_OK_AND_ASSIGN(TestedInstruction ti, ParseTemplateAndGetInstruction( kHloTest, {}, HloOpcode::kDot)); EXPECT_THAT(legacy_triton::IsTritonSupportedInstruction( ti.Instruction(), GetComputeCapability()) .Explain(), ::testing::HasSubstr("No non-contracting dimensions.")); EXPECT_THAT(TritonFusionAnalysis::Execute(ti.TritonComputation()), tsl::testing::StatusIs( absl::StatusCode::kInternal, ::testing::HasSubstr("non_contracting_dims.size() == 1"))); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/triton/triton_support_legacy.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/triton/triton_support_legacy_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
93afc18b-9012-463a-bf70-e385a2f499ec
cpp
tensorflow/tensorflow
triton_support
third_party/xla/xla/service/gpu/fusions/triton/triton_support.cc
third_party/xla/xla/service/gpu/fusions/triton/triton_support_test.cc
#include "xla/service/gpu/fusions/triton/triton_support.h" #include <variant> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/layout.h" #include "xla/primitive_util.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/ir_emission_utils.h" #include "xla/shape_util.h" #include "xla/stream_executor/device_description.h" #include "xla/xla_data.pb.h" namespace xla { namespace gpu { namespace { bool IsTritonSupportedDataType(PrimitiveType type, const se::GpuComputeCapability& gpu_version) { switch (type) { case PRED: case S4: case S8: case S16: case S32: case S64: case F16: case F32: case F64: return true; case F8E5M2: case F8E4M3FN: return std::holds_alternative<se::CudaComputeCapability>(gpu_version); case BF16: return std::holds_alternative<se::CudaComputeCapability>(gpu_version) || (std::holds_alternative<se::RocmComputeCapability>(gpu_version) && std::get<se::RocmComputeCapability>(gpu_version) .has_bf16_dtype_support()); default: return false; } } absl::flat_hash_set<HloOpcode> TritonSupportedUnaryElementwiseOps( PrimitiveType element_type) { if (element_type == PrimitiveType::PRED) { return {HloOpcode::kConvert, HloOpcode::kNot}; } if (element_type == PrimitiveType::U16) { return {HloOpcode::kAbs}; } absl::flat_hash_set<HloOpcode> ret{HloOpcode::kAbs, HloOpcode::kConvert}; if (element_type != PrimitiveType::F8E5M2 && element_type != PrimitiveType::F8E4M3FN) { ret.insert(HloOpcode::kNegate); } if (primitive_util::IsIntegralType(element_type)) { ret.insert(HloOpcode::kNot); } if (element_type == PrimitiveType::F32 || element_type == PrimitiveType::F64) { absl::flat_hash_set<HloOpcode> additional_opcodes{ HloOpcode::kCos, HloOpcode::kExp, HloOpcode::kExpm1, HloOpcode::kFloor, HloOpcode::kCeil, HloOpcode::kLog, HloOpcode::kLog1p, HloOpcode::kRsqrt, HloOpcode::kSin, HloOpcode::kSqrt, HloOpcode::kCbrt, HloOpcode::kTan, HloOpcode::kTanh, HloOpcode::kErf}; ret.insert(additional_opcodes.begin(), additional_opcodes.end()); } if (element_type == PrimitiveType::BF16 || element_type == PrimitiveType::F16) { absl::flat_hash_set<HloOpcode> additional_opcodes{HloOpcode::kFloor, HloOpcode::kCeil}; ret.insert(additional_opcodes.begin(), additional_opcodes.end()); } if (primitive_util::IsFloatingPointType(element_type)) { ret.insert(HloOpcode::kReducePrecision); } return ret; } CodegenDecision IsTritonSupportedConversion( PrimitiveType output, PrimitiveType input, const se::GpuComputeCapability& gpu_version) { auto any_is = [=](PrimitiveType compare) { return input == compare || output == compare; }; auto error_message = [&]() { return CodegenDecision::Forbid( absl::StrCat("Unsupported conversion in Triton: ", primitive_util::LowercasePrimitiveTypeName(input), " to ", primitive_util::LowercasePrimitiveTypeName(output))); }; if (input != output && any_is(PrimitiveType::F8E4M3FN) && std::holds_alternative<se::CudaComputeCapability>(gpu_version) && !std::get<se::CudaComputeCapability>(gpu_version).IsAtLeastHopper()) { return error_message(); } if (input != output && (any_is(PrimitiveType::F8E4M3FN) || any_is(PrimitiveType::F8E5M2)) && !(any_is(PrimitiveType::F16) || any_is(PrimitiveType::BF16) || any_is(PrimitiveType::F32))) { return error_message(); } if (IsTritonSupportedDataType(input, gpu_version) && (IsTritonSupportedDataType(output, gpu_version) || output == PrimitiveType::S4)) { return CodegenDecision::Allow(); } return error_message(); } absl::flat_hash_set<HloOpcode> TritonSupportedBinaryElementwiseOps( PrimitiveType element_type, const se::GpuComputeCapability& gpu_version) { if (element_type == PrimitiveType::U16 || element_type == PrimitiveType::F8E5M2 || element_type == PrimitiveType::F8E4M3FN) { return {}; } absl::flat_hash_set<HloOpcode> ret{HloOpcode::kAdd, HloOpcode::kCompare, HloOpcode::kMaximum, HloOpcode::kMinimum, HloOpcode::kMultiply}; if (element_type == PrimitiveType::PRED) { ret.insert(HloOpcode::kAnd); ret.insert(HloOpcode::kOr); ret.insert(HloOpcode::kXor); return ret; } ret.insert(HloOpcode::kSubtract); if (primitive_util::IsIntegralType(element_type)) { ret.insert(HloOpcode::kDivide); ret.insert(HloOpcode::kAnd); ret.insert(HloOpcode::kOr); ret.insert(HloOpcode::kXor); } if (element_type == PrimitiveType::F32 || element_type == PrimitiveType::F64) { ret.insert(HloOpcode::kAtan2); ret.insert(HloOpcode::kDivide); ret.insert(HloOpcode::kRemainder); ret.insert(HloOpcode::kPower); } return ret; } absl::flat_hash_set<HloOpcode> TritonSupportedTernaryElementwiseOps( PrimitiveType element_type, const se::GpuComputeCapability& gpu_version) { if (element_type == PrimitiveType::U16) { return {}; } if (element_type == PrimitiveType::F8E5M2 || element_type == PrimitiveType::F8E4M3FN) { return {HloOpcode::kSelect}; } return {HloOpcode::kSelect, HloOpcode::kClamp}; } bool IsTritonSupportedElementwise(HloOpcode opcode, PrimitiveType element_type, const se::GpuComputeCapability& gpu_version) { return TritonSupportedUnaryElementwiseOps(element_type).contains(opcode) || TritonSupportedBinaryElementwiseOps(element_type, gpu_version) .contains(opcode) || TritonSupportedTernaryElementwiseOps(element_type, gpu_version) .contains(opcode); } CodegenDecision IsTritonSupportedInstructionImpl( const HloInstruction& instr, const se::GpuComputeCapability& gpu_version, bool is_within_reduction_computation); CodegenDecision CanTritonHandleReduce( const HloReduceInstruction& reduce, const se::GpuComputeCapability& gpu_version) { if (reduce.shape().element_type() == PrimitiveType::F8E4M3FN || reduce.shape().element_type() == PrimitiveType::F8E5M2) { return CodegenDecision::Forbid( "F8E4M3FN and F8E5M2 are not supported for reductions."); } bool is_triton_supported_reduction_computation = absl::c_all_of( reduce.to_apply()->instructions(), [&](const HloInstruction* instr) { return IsTritonSupportedInstructionImpl( *instr, gpu_version, true) .CanFuse(); }); if (!is_triton_supported_reduction_computation) { return CodegenDecision::Forbid( "Unsupported reduction computation by Triton."); } if (reduce.dimensions().size() == 1 && reduce.operand_count() == 2) { return CodegenDecision::Allow(); } return CodegenDecision::Forbid( "Reduction is not a row-reduction of a single operand."); } CodegenDecision IsTritonSupportedInstructionImpl( const HloInstruction& instr, const se::GpuComputeCapability& gpu_version, bool is_within_reduction_computation) { if (internal::IsTritonUnsupportedOpcode(instr.opcode())) { return CodegenDecision::Forbid("Unsupported opcode."); } if (IsUnsupported0DTensor(instr, is_within_reduction_computation)) { return CodegenDecision::Forbid("Unsupported 0D tensor"); } if (instr.opcode() == HloOpcode::kConvert) { return IsTritonSupportedConversion(instr.shape().element_type(), instr.operand(0)->shape().element_type(), gpu_version); } auto type = instr.shape().element_type(); bool output_type_is_supported = IsTritonSupportedDataType(type, gpu_version); if (!output_type_is_supported) { return CodegenDecision::Forbid("Unsupported output data type."); } bool input_types_are_supported = absl::c_all_of(instr.operands(), [&](const HloInstruction* operand) { return IsTritonSupportedDataType(operand->shape().element_type(), gpu_version); }); if (!input_types_are_supported) { return CodegenDecision::Forbid("Unsupported input data type."); } if (instr.opcode() == HloOpcode::kConstant) { return ShapeUtil::IsScalar(instr.shape()) ? CodegenDecision::Allow() : CodegenDecision::Forbid( "Only scalar constants are supported in Triton."); } if (instr.opcode() == HloOpcode::kIota) { PrimitiveType element_type = instr.shape().element_type(); return element_type != PrimitiveType::F8E4M3FN && element_type != PrimitiveType::F8E5M2 ? CodegenDecision::Allow() : CodegenDecision::Forbid( "F8E4M3FN and F8E5M2 are not supported for iota."); } if (instr.IsElementwise()) { if (!IsTritonSupportedElementwise( instr.opcode(), instr.operand(instr.operand_count() - 1)->shape().element_type(), gpu_version)) { return CodegenDecision::Forbid("Unsupported elementwise operation."); } return CodegenDecision::Allow(); } switch (instr.opcode()) { case HloOpcode::kReduce: { return CanTritonHandleReduce(*Cast<HloReduceInstruction>(&instr), gpu_version); } case HloOpcode::kSlice: case HloOpcode::kTranspose: case HloOpcode::kParameter: case HloOpcode::kBroadcast: case HloOpcode::kBitcast: case HloOpcode::kReshape: return CodegenDecision::Allow(); default: VLOG(2) << "Unsupported instruction: " << instr.ToString(); break; } return CodegenDecision::Forbid("Unsupported opcode."); } } namespace internal { bool IsTritonUnsupportedOpcode(HloOpcode opcode) { switch (opcode) { case HloOpcode::kAddDependency: case HloOpcode::kAfterAll: case HloOpcode::kBatchNormGrad: case HloOpcode::kBatchNormInference: case HloOpcode::kBatchNormTraining: case HloOpcode::kBitcastConvert: case HloOpcode::kCall: case HloOpcode::kCholesky: case HloOpcode::kCollectiveBroadcast: case HloOpcode::kCollectivePermuteDone: case HloOpcode::kCollectivePermuteStart: case HloOpcode::kComplex: case HloOpcode::kConcatenate: case HloOpcode::kConditional: case HloOpcode::kConvolution: case HloOpcode::kCopy: case HloOpcode::kCopyDone: case HloOpcode::kCopyStart: case HloOpcode::kCustomCall: case HloOpcode::kDomain: case HloOpcode::kDot: case HloOpcode::kDynamicReshape: case HloOpcode::kDynamicSlice: case HloOpcode::kDynamicUpdateSlice: case HloOpcode::kFft: case HloOpcode::kFusion: case HloOpcode::kGather: case HloOpcode::kGetDimensionSize: case HloOpcode::kGetTupleElement: case HloOpcode::kInfeed: case HloOpcode::kMap: case HloOpcode::kOptimizationBarrier: case HloOpcode::kOutfeed: case HloOpcode::kPad: case HloOpcode::kPartitionId: case HloOpcode::kRecv: case HloOpcode::kRecvDone: case HloOpcode::kReduceWindow: case HloOpcode::kReplicaId: case HloOpcode::kReverse: case HloOpcode::kRngBitGenerator: case HloOpcode::kRngGetAndUpdateState: case HloOpcode::kScatter: case HloOpcode::kSelectAndScatter: case HloOpcode::kSend: case HloOpcode::kSendDone: case HloOpcode::kSetDimensionSize: case HloOpcode::kSort: case HloOpcode::kStochasticConvert: case HloOpcode::kTopK: case HloOpcode::kTriangularSolve: case HloOpcode::kTuple: case HloOpcode::kWhile: return true; default: return false; } } } absl::Status EnsureTritonSupportsComputeCapability( const se::GpuComputeCapability& gpu_compute_capability) { auto cuda_compute_capability = std::get_if<se::CudaComputeCapability>(&gpu_compute_capability); auto rocm_compute_capability = std::get_if<se::RocmComputeCapability>(&gpu_compute_capability); if (!cuda_compute_capability && !rocm_compute_capability) { return absl::FailedPreconditionError( "Triton support is only enabled for CUDA and ROCm GPUs."); } if (cuda_compute_capability && !cuda_compute_capability->IsAtLeastAmpere()) { return absl::FailedPreconditionError( absl::StrCat("CUDA Triton support is only enabled for Ampere GPUs ", "(compute capability 8.0) and up, but got compute ", "capability ", cuda_compute_capability->major, ".", cuda_compute_capability->minor, ".")); } return absl::OkStatus(); } CodegenDecision IsTritonSupportedInstruction( const HloInstruction& instr, const se::GpuComputeCapability& gpu_version) { CodegenDecision decision = IsTritonSupportedInstructionImpl( instr, gpu_version, false); VLOG(2) << "IsTritonSupportedInstruction: " << instr.ToString() << " " << bool(decision); return decision; } CodegenDecision IsTritonSupportedComputation( const HloComputation& computation, const se::GpuComputeCapability& gpu_compute_capability) { for (const auto* instruction : computation.instructions()) { if (CodegenDecision can_codegen = IsTritonSupportedInstruction(*instruction, gpu_compute_capability); !can_codegen) { return can_codegen; } } return CodegenDecision::Allow(); } bool IsTritonFusedComputation(const HloComputation& computation) { HloFusionInstruction* fusion = static_cast<HloFusionInstruction*>(computation.FusionInstruction()); return fusion != nullptr && fusion->fusion_kind() == HloInstruction::FusionKind::kCustom && fusion->backend_config<gpu::GpuBackendConfig>() ->fusion_backend_config() .kind() == kTritonGemmFusionKind; } bool IsUnsupported0DTensor(const HloInstruction& instr, bool is_within_reduction_computation) { if (!instr.shape().IsArray() || instr.shape().rank() != 0 || is_within_reduction_computation || instr.opcode() == HloOpcode::kConstant) { return false; } if (instr.user_count() > 0 && absl::c_all_of(instr.users(), [&](const HloInstruction* user) { return user->opcode() == HloOpcode::kBroadcast; })) { return false; } if (instr.IsElementwise() && !instr.IsRoot() && absl::c_all_of(instr.operands(), [&](const HloInstruction* operand) { return operand->shape().IsArray() && operand->shape().rank() == 0; })) { return false; } return true; } } }
#include "xla/service/gpu/fusions/triton/triton_support.h" #include <array> #include <cstdint> #include <string> #include <tuple> #include <utility> #include <variant> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_set.h" #include "absl/strings/string_view.h" #include "absl/strings/substitute.h" #include "absl/types/span.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/primitive_util.h" #include "xla/service/gpu/fusions/triton/triton_fusion_emitter.h" #include "xla/service/gpu/fusions/triton/triton_test_utils.h" #include "xla/service/gpu/gpu_device_info_for_tests.h" #include "xla/service/gpu/model/tiled_hlo_computation.h" #include "xla/stream_executor/device_description.h" #include "xla/xla.pb.h" #include "xla/xla_data.pb.h" #include "tsl/platform/protobuf.h" #include "tsl/platform/status_matchers.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { using ::testing::Not; using ::tsl::testing::IsOk; std::vector<xla::PrimitiveType> AllXlaDataTypes() { std::vector<xla::PrimitiveType> xla_data_types; std::vector<xla::PrimitiveType> to_filter_out = {PRIMITIVE_TYPE_INVALID, TUPLE, OPAQUE_TYPE, TOKEN}; const tsl::protobuf::EnumDescriptor* xla_type_descriptor = tsl::protobuf::GetEnumDescriptor<xla::PrimitiveType>(); for (int enum_ix = 0; enum_ix < xla_type_descriptor->value_count(); ++enum_ix) { xla::PrimitiveType xla_type = static_cast<xla::PrimitiveType>( xla_type_descriptor->value(enum_ix)->number()); if (!absl::c_linear_search(to_filter_out, xla_type)) { xla_data_types.push_back(xla_type); } } return xla_data_types; } bool DoesOpSupportType(HloOpcode opcode, PrimitiveType type) { namespace pu = ::xla::primitive_util; switch (opcode) { case HloOpcode::kAnd: case HloOpcode::kOr: case HloOpcode::kXor: case HloOpcode::kNot: return type == PRED || pu::IsIntegralType(type); case HloOpcode::kCos: case HloOpcode::kExp: case HloOpcode::kExpm1: case HloOpcode::kLog: case HloOpcode::kLog1p: case HloOpcode::kRsqrt: case HloOpcode::kSin: case HloOpcode::kSqrt: case HloOpcode::kCbrt: case HloOpcode::kTan: case HloOpcode::kTanh: case HloOpcode::kReal: case HloOpcode::kImag: case HloOpcode::kLogistic: return pu::IsFloatingPointType(type) || pu::IsComplexType(type); case HloOpcode::kErf: case HloOpcode::kFloor: case HloOpcode::kCeil: case HloOpcode::kIsFinite: case HloOpcode::kRoundNearestAfz: case HloOpcode::kRoundNearestEven: case HloOpcode::kReducePrecision: return pu::IsFloatingPointType(type); case HloOpcode::kClz: case HloOpcode::kShiftRightArithmetic: case HloOpcode::kShiftRightLogical: case HloOpcode::kShiftLeft: case HloOpcode::kPopulationCount: return pu::IsIntegralType(type); case HloOpcode::kAbs: case HloOpcode::kSign: return pu::IsSignedIntegralType(type) || pu::IsFloatingPointType(type) || pu::IsComplexType(type); case HloOpcode::kPower: case HloOpcode::kAtan2: case HloOpcode::kDivide: case HloOpcode::kRemainder: case HloOpcode::kSubtract: case HloOpcode::kNegate: case HloOpcode::kIota: return type != PRED; case HloOpcode::kRng: return !pu::IsComplexType(type); default: return true; } } auto AllDevicesToTest() { using cc = se::GpuComputeCapability; #ifdef TENSORFLOW_USE_ROCM se::RocmComputeCapability example_rocm_compute_capability = TestGpuDeviceInfo::AMDMI210DeviceInfo().rocm_compute_capability(); return std::vector<cc>{cc(example_rocm_compute_capability)}; #else return std::vector<cc>{cc(se::CudaComputeCapability::Ampere()), cc(se::CudaComputeCapability::Hopper())}; #endif } auto AllTestCombinationsForOpcodes(absl::Span<const HloOpcode> opcodes) { std::vector<std::tuple<PrimitiveType, HloOpcode, se::GpuComputeCapability>> test_combinations; for (PrimitiveType data_type : AllXlaDataTypes()) { for (HloOpcode opcode : opcodes) { if (DoesOpSupportType(opcode, data_type)) { for (se::GpuComputeCapability cc : AllDevicesToTest()) { test_combinations.push_back({data_type, opcode, cc}); } } } } return ::testing::ValuesIn(test_combinations); }; class TritonSupportTest : public TritonSupportTestBase { public: void RunSupportTest(TestedInstruction ti, std::vector<int64_t> output_tile_sizes, se::GpuComputeCapability cc, bool skip_failure_branch_to_avoid_crash = false) { if (ti.Instruction().shape().IsArray()) { ASSERT_EQ(output_tile_sizes.size(), ti.Instruction().shape().rank()); } BlockLevelParameters block_level_parameters = FromOutputTileSizes(std::move(output_tile_sizes)); const se::DeviceDescription dev_info = std::holds_alternative<se::CudaComputeCapability>(cc) ? TestGpuDeviceInfo::RTXA6000DeviceInfo(cc) : TestGpuDeviceInfo::AMDMI210DeviceInfo(); auto run_triton_codegen = [&]() { return TritonWrapper("test_fn", &ti.TritonFusion(), cc, dev_info, block_level_parameters, &llvm_module_, mlir_context_); }; if (IsTritonSupportedInstruction(ti.Instruction(), cc)) { EXPECT_THAT(run_triton_codegen(), IsOk()); } else { if (skip_failure_branch_to_avoid_crash) { EXPECT_DEATH( try { run_triton_codegen().IgnoreError(); } catch (...) { abort(); }, ""); } else { EXPECT_THAT(run_triton_codegen(), Not(IsOk())); } } } }; class TritonSupportTestWithTypeAndOpcodeAndDeviceParam : public TritonSupportTest, public ::testing::WithParamInterface< std::tuple<PrimitiveType, HloOpcode, se::GpuComputeCapability>> {}; using BitcastOrReshapeTest = TritonSupportTestWithTypeAndOpcodeAndDeviceParam; TEST_P(BitcastOrReshapeTest, IsTritonSupportedBitcastOrReshape) { auto [data_type, opcode, cc] = GetParam(); const std::string kHloTestTemplate = R"( ENTRY triton_computation { parameter_0 = $0[1,16,4] parameter(0) ROOT bitcast_or_reshape = $0[64] $1(parameter_0) })"; TF_ASSERT_OK_AND_ASSIGN( TestedInstruction ti, ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode)); RunSupportTest(std::move(ti), {16}, cc); } constexpr std::array kTestedOpsBitcastReshape = {HloOpcode::kBitcast, HloOpcode::kReshape}; INSTANTIATE_TEST_SUITE_P( BitcastOrReshapeTestSuite, BitcastOrReshapeTest, AllTestCombinationsForOpcodes(kTestedOpsBitcastReshape), TritonSupportTestTypeAndOpcodeAndDeviceToString); using UnaryElementwiseTest = TritonSupportTestWithTypeAndOpcodeAndDeviceParam; TEST_P(UnaryElementwiseTest, IsTritonSupportedUnaryElementwise) { auto [data_type, opcode, cc] = GetParam(); const std::string kDefaultHloTemplate = R"( ENTRY triton_computation { parameter_0 = $0[33,68] parameter(0) ROOT unary = $0[33,68] $1(parameter_0) })"; const std::string kF64OutputTemplate = R"( ENTRY triton_computation { parameter_0 = $0[33,68] parameter(0) ROOT unary = f64[33,68] $1(parameter_0) })"; const std::string kPredOutputTemplate = R"( ENTRY triton_computation { parameter_0 = $0[33,68] parameter(0) ROOT unary = pred[33,68] $1(parameter_0) })"; const std::string kReducePrecisionTemplate = R"( ENTRY triton_computation { parameter_0 = $0[33,68] parameter(0) ROOT unary = $0[33,68] $1(parameter_0), exponent_bits=2, mantissa_bits=2 })"; bool f64_output = opcode == HloOpcode::kReal || opcode == HloOpcode::kImag || (opcode == HloOpcode::kAbs && primitive_util::IsComplexType(data_type)); TF_ASSERT_OK_AND_ASSIGN( TestedInstruction ti, ParseTemplateAndGetInstruction( f64_output ? kF64OutputTemplate : (opcode == HloOpcode::kIsFinite ? kPredOutputTemplate : (opcode == HloOpcode::kReducePrecision ? kReducePrecisionTemplate : kDefaultHloTemplate)), data_type, opcode)); RunSupportTest(std::move(ti), {1, 32}, cc); } constexpr std::array kTestedOpsUnaryElementwise = {HloOpcode::kAbs, HloOpcode::kCbrt, HloOpcode::kCeil, HloOpcode::kClz, HloOpcode::kCos, HloOpcode::kErf, HloOpcode::kExp, HloOpcode::kExpm1, HloOpcode::kFloor, HloOpcode::kImag, HloOpcode::kIsFinite, HloOpcode::kLog, HloOpcode::kLog1p, HloOpcode::kLogistic, HloOpcode::kNegate, HloOpcode::kNot, HloOpcode::kPopulationCount, HloOpcode::kReal, HloOpcode::kReducePrecision, HloOpcode::kRoundNearestAfz, HloOpcode::kRoundNearestEven, HloOpcode::kRsqrt, HloOpcode::kSign, HloOpcode::kSin, HloOpcode::kSqrt, HloOpcode::kTan, HloOpcode::kTanh}; INSTANTIATE_TEST_SUITE_P( UnaryElementwiseTestSuite, UnaryElementwiseTest, AllTestCombinationsForOpcodes(kTestedOpsUnaryElementwise), TritonSupportTestTypeAndOpcodeAndDeviceToString); class ConvertTest : public TritonSupportTest, public ::testing::WithParamInterface< std::tuple<PrimitiveType, PrimitiveType, se::GpuComputeCapability>> { }; TEST_P(ConvertTest, Convert) { auto [data_type_in, data_type_out, cc] = GetParam(); const std::string hlo_text = absl::Substitute( R"( ENTRY triton_computation { parameter_0 = $0[33,68] parameter(0) ROOT convert = $1[33,68] convert(parameter_0) })", primitive_util::LowercasePrimitiveTypeName(data_type_in), primitive_util::LowercasePrimitiveTypeName(data_type_out)); TF_ASSERT_OK_AND_ASSIGN( TestedInstruction ti, ParseTemplateAndGetInstruction( hlo_text, data_type_in, HloOpcode::kConvert)); bool skip_failure_branch_to_avoid_crash = false; PrimitiveType captured_in = data_type_in; PrimitiveType captured_out = data_type_out; auto any_is = [=](PrimitiveType compare) { return captured_in == compare || captured_out == compare; }; if (data_type_in != data_type_out && any_is(PrimitiveType::F8E4M3FN) && std::holds_alternative<se::CudaComputeCapability>(cc) && !std::get<se::CudaComputeCapability>(cc).IsAtLeastHopper()) { skip_failure_branch_to_avoid_crash |= any_is(F16) || any_is(BF16) || any_is(F32); skip_failure_branch_to_avoid_crash |= (data_type_in == PrimitiveType::F8E4M3FN && data_type_out == PrimitiveType::F64); } skip_failure_branch_to_avoid_crash |= (any_is(PrimitiveType::F8E4M3FN) && any_is(PrimitiveType::F8E5M2)) || (data_type_in == PrimitiveType::F64 && (data_type_out == PrimitiveType::F8E4M3FN || data_type_out == PrimitiveType::F8E5M2)); skip_failure_branch_to_avoid_crash |= (data_type_out == PrimitiveType::F64 && (data_type_in == PrimitiveType::F8E4M3FN || data_type_in == PrimitiveType::F8E5M2)); RunSupportTest(std::move(ti), {1, 32}, cc, skip_failure_branch_to_avoid_crash); } constexpr std::array kTestedOpsConvert = {HloOpcode::kConvert}; INSTANTIATE_TEST_SUITE_P( ConvertTestSuite, ConvertTest, ::testing::Combine(::testing::ValuesIn(AllXlaDataTypes()), ::testing::ValuesIn(AllXlaDataTypes()), ::testing::ValuesIn(AllDevicesToTest())), TritonSupportTestTwoTypesAndDeviceToString); using BinaryElementwiseTest = TritonSupportTestWithTypeAndOpcodeAndDeviceParam; TEST_P(BinaryElementwiseTest, IsTritonSupportedBinaryElementwise) { auto [data_type, opcode, cc] = GetParam(); const std::string kHloTestTemplate = R"( ENTRY triton_computation { parameter_0 = $0[11,63] parameter(0) parameter_1 = $0[11,63] parameter(1) ROOT binary = $0[11,63] $1(parameter_0, parameter_1) })"; const std::string kHloCompareTestTemplate = R"( ENTRY triton_computation { parameter_0 = $0[11,63] parameter(0) parameter_1 = $0[11,63] parameter(1) ROOT compare = pred[11,63] $1(parameter_0, parameter_1), direction=GE })"; TF_ASSERT_OK_AND_ASSIGN( TestedInstruction ti, ParseTemplateAndGetInstruction(opcode == HloOpcode::kCompare ? kHloCompareTestTemplate : kHloTestTemplate, data_type, opcode)); bool skip_failure_branch_to_avoid_crash = opcode == HloOpcode::kDivide && (data_type == PrimitiveType::BF16 || data_type == PrimitiveType::F16 || data_type == PrimitiveType::F8E5M2 || data_type == PrimitiveType::F8E4M3FN); RunSupportTest(std::move(ti), {1, 32}, cc, skip_failure_branch_to_avoid_crash); } TEST_P(BinaryElementwiseTest, IsTritonSupportedBinaryElementwise0D) { auto [data_type, opcode, cc] = GetParam(); const std::string kHloTestTemplate = R"( ENTRY triton_computation { parameter_0 = $0[] parameter(0) parameter_1 = $0[] parameter(1) ROOT binary = $0[] $1(parameter_0, parameter_1) })"; const std::string kHloCompareTestTemplate = R"( ENTRY triton_computation { parameter_0 = $0[] parameter(0) parameter_1 = $0[] parameter(1) ROOT compare = pred[] $1(parameter_0, parameter_1), direction=GE })"; TF_ASSERT_OK_AND_ASSIGN( TestedInstruction ti, ParseTemplateAndGetInstruction(opcode == HloOpcode::kCompare ? kHloCompareTestTemplate : kHloTestTemplate, data_type, opcode)); RunSupportTest(std::move(ti), {}, cc); } constexpr std::array kTestedOpsBinaryElementwise = { HloOpcode::kAnd, HloOpcode::kOr, HloOpcode::kXor, HloOpcode::kAdd, HloOpcode::kMultiply, HloOpcode::kMaximum, HloOpcode::kMinimum, HloOpcode::kSubtract, HloOpcode::kAtan2, HloOpcode::kDivide, HloOpcode::kRemainder, HloOpcode::kPower, HloOpcode::kShiftLeft, HloOpcode::kShiftRightArithmetic, HloOpcode::kShiftRightLogical, HloOpcode::kCompare}; INSTANTIATE_TEST_SUITE_P( BinaryElementwiseTestSuite, BinaryElementwiseTest, AllTestCombinationsForOpcodes(kTestedOpsBinaryElementwise), TritonSupportTestTypeAndOpcodeAndDeviceToString); using TernaryElementwiseTest = TritonSupportTestWithTypeAndOpcodeAndDeviceParam; TEST_P(TernaryElementwiseTest, IsTritonSupportedTernaryElementwise) { auto [data_type, opcode, cc] = GetParam(); const std::string kHloTestTemplate = R"( ENTRY triton_computation { parameter_0 = $2[13,63] parameter(0) parameter_1 = $0[13,63] parameter(1) parameter_2 = $0[13,63] parameter(2) ROOT ternary = $0[13,63] $1(parameter_0, parameter_1, parameter_2) })"; auto type = primitive_util::LowercasePrimitiveTypeName(data_type); const std::string hlo_text = absl::Substitute(kHloTestTemplate, type, HloOpcodeString(opcode), opcode == HloOpcode::kSelect ? "pred" : type); TF_ASSERT_OK_AND_ASSIGN( TestedInstruction ti, ParseTemplateAndGetInstruction(hlo_text, data_type, opcode)); RunSupportTest(std::move(ti), {1, 32}, cc); } constexpr std::array kTestedOpsTernaryElementwise = {HloOpcode::kSelect, HloOpcode::kClamp}; INSTANTIATE_TEST_SUITE_P( TernaryElementwiseTestSuite, TernaryElementwiseTest, AllTestCombinationsForOpcodes(kTestedOpsTernaryElementwise), TritonSupportTestTypeAndOpcodeAndDeviceToString); using ReduceTest = TritonSupportTestWithTypeAndOpcodeAndDeviceParam; TEST_P(ReduceTest, IsTritonSupportedReduction) { auto [data_type, opcode, cc] = GetParam(); bool dtype_is_complex = data_type == C64 || data_type == C128; const std::string kHloTestTemplate = absl::Substitute(R"( add { Arg_0 = $0[] parameter(0) Arg_1 = $0[] parameter(1) ROOT add = $0[] add(Arg_0, Arg_1) } ENTRY triton_computation { parameter_0 = $0[125,127] parameter(0) constant_0 = $0[] constant($1) ROOT reduce = $0[125] reduce(parameter_0, constant_0), dimensions={1}, to_apply=add })", "$0", dtype_is_complex ? "(0, 0)" : "0"); TF_ASSERT_OK_AND_ASSIGN( TestedInstruction ti, ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode)); RunSupportTest(std::move(ti), {1}, cc); } TEST_F(ReduceTest, IsTritonSupportedReductionWithMultidimensionalTile) { const std::string kHloTestTemplate = R"( add { Arg_0 = $0[] parameter(0) Arg_1 = $0[] parameter(1) ROOT add = $0[] add(Arg_0, Arg_1) } ENTRY triton_computation { parameter_0 = $0[3,125,127] parameter(0) constant_0 = $0[] constant(0) ROOT reduce = $0[3,125] reduce(parameter_0, constant_0), dimensions={2}, to_apply=add })"; TF_ASSERT_OK_AND_ASSIGN(TestedInstruction ti, ParseTemplateAndGetInstruction(kHloTestTemplate, F32, HloOpcode::kReduce)); RunSupportTest(std::move(ti), {3, 4}, se::CudaComputeCapability::Ampere()); } TEST_P( ReduceTest, UnsupportedReduceWithMoreThanOneReduceDimensionsFailsGracefullyWithTriton) { auto [data_type, opcode, cc] = GetParam(); bool dtype_is_complex = data_type == C64 || data_type == C128; const std::string kHloTestTemplate = absl::Substitute(R"( add { Arg_0 = $0[] parameter(0) Arg_1 = $0[] parameter(1) ROOT add = $0[] add(Arg_0, Arg_1) } ENTRY triton_computation { parameter_0 = $0[2,125,127] parameter(0) constant_0 = $0[] constant($1) ROOT reduce = $0[2] reduce(parameter_0, constant_0), dimensions={1,2}, to_apply=add })", "$0", dtype_is_complex ? "(0, 0)" : "0"); TF_ASSERT_OK_AND_ASSIGN( TestedInstruction ti, ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode)); EXPECT_FALSE(IsTritonSupportedInstruction(ti.Instruction(), cc)); RunSupportTest(std::move(ti), {1}, cc); } TEST_P(ReduceTest, IsTritonSupportedReduceWithNonLastReduceDimension) { auto [data_type, opcode, cc] = GetParam(); bool dtype_is_complex = data_type == C64 || data_type == C128; const std::string kHloTestTemplate = absl::Substitute(R"( add { Arg_0 = $0[] parameter(0) Arg_1 = $0[] parameter(1) ROOT add = $0[] add(Arg_0, Arg_1) } ENTRY triton_computation { parameter_0 = $0[125,127] parameter(0) constant_0 = $0[] constant($1) ROOT reduce = $0[127] reduce(parameter_0, constant_0), dimensions={0}, to_apply=add })", "$0", dtype_is_complex ? "(0, 0)" : "0"); TF_ASSERT_OK_AND_ASSIGN( TestedInstruction ti, ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode)); RunSupportTest(std::move(ti), {1}, cc); } TEST_P(ReduceTest, UnsupportedReduceWithMoreThanOneOperandsFailsGracefullyWithTriton) { auto [data_type, opcode, cc] = GetParam(); bool dtype_is_complex = data_type == C64 || data_type == C128; const std::string kHloTestTemplate = absl::Substitute(R"( add { Arg_0 = $0[] parameter(0) Arg_1 = $0[] parameter(1) Arg_2 = $0[] parameter(2) Arg_3 = $0[] parameter(3) add_0 = $0[] add(Arg_0, Arg_2) add_1 = $0[] add(Arg_1, Arg_3) ROOT pair = ($0[], $0[]) tuple(add_0, add_1) } ENTRY triton_computation { parameter_0 = $0[125,127] parameter(0) constant_0 = $0[] constant($1) tuple = ($0[125], $0[125]) reduce( parameter_0, parameter_0, constant_0, constant_0), dimensions={1}, to_apply=add ROOT reduce = $0[125] get-tuple-element(tuple), index=0 })", "$0", dtype_is_complex ? "(0, 0)" : "0"); TF_ASSERT_OK_AND_ASSIGN( TestedInstruction ti, ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode)); EXPECT_FALSE(IsTritonSupportedInstruction(ti.Instruction(), cc)); RunSupportTest(std::move(ti), {1}, cc); } TEST_F(ReduceTest, ReduceWithNonConstReduceValueIsSupportedWithTriton) { const se::GpuComputeCapability cc = se::CudaComputeCapability::Ampere(); const std::string kHloTestTemplate = R"( add { Arg_0 = $0[] parameter(0) Arg_1 = $0[] parameter(1) ROOT add = $0[] add(Arg_0, Arg_1) } ENTRY triton_computation { parameter_0 = $0[125,127] parameter(0) init = $0[] parameter(1) ROOT reduce = $0[125] reduce(parameter_0, init), dimensions={1}, to_apply=add })"; TF_ASSERT_OK_AND_ASSIGN(TestedInstruction ti, ParseTemplateAndGetInstruction(kHloTestTemplate, F32, HloOpcode::kReduce)); EXPECT_TRUE(IsTritonSupportedInstruction(ti.Instruction(), cc)); RunSupportTest(std::move(ti), {2}, cc); } TEST_P(ReduceTest, UnsupportedReductionComputationFailsGracefullyWithTriton) { auto [data_type, opcode, cc] = GetParam(); bool dtype_is_complex = data_type == C64 || data_type == C128; const std::string kHloTestTemplate = absl::Substitute(R"( custom_call { Arg_0 = $0[] parameter(0) Arg_1 = $0[] parameter(1) ROOT custom_call = $0[] custom-call(Arg_0, Arg_1), custom_call_target="foo" } ENTRY triton_computation { parameter_0 = $0[125,127] parameter(0) constant_0 = $0[] constant($1) ROOT reduce = $0[125] reduce(parameter_0, constant_0), dimensions={1}, to_apply=custom_call })", "$0", dtype_is_complex ? "(0, 0)" : "0"); TF_ASSERT_OK_AND_ASSIGN( TestedInstruction ti, ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode)); EXPECT_FALSE(IsTritonSupportedInstruction(ti.Instruction(), cc)); RunSupportTest(std::move(ti), {1}, cc); } constexpr std::array kTestedOpsReduction = {HloOpcode::kReduce}; INSTANTIATE_TEST_SUITE_P(ReduceTestSuite, ReduceTest, AllTestCombinationsForOpcodes(kTestedOpsReduction), TritonSupportTestTypeAndOpcodeAndDeviceToString); using ReductionComputationTest = TritonSupportTestWithTypeAndOpcodeAndDeviceParam; TEST_P(ReductionComputationTest, DifferentBinaryOps) { auto [data_type, opcode, cc] = GetParam(); bool dtype_is_complex = data_type == C64 || data_type == C128; const std::string kHloTestTemplate = absl::Substitute( R"( reduce_computation { Arg_0 = $0[] parameter(0) Arg_1 = $0[] parameter(1) ROOT output = $0[] $1(Arg_0, Arg_1) } ENTRY triton_computation { parameter_0 = $0[125,127] parameter(0) constant_0 = $0[] constant($2) ROOT reduce = $0[125] reduce(parameter_0, constant_0), dimensions={1}, to_apply=reduce_computation })", "$0", HloOpcodeString(opcode), dtype_is_complex ? "(0, 0)" : "0"); TF_ASSERT_OK_AND_ASSIGN(TestedInstruction ti, ParseTemplateAndGetInstruction( kHloTestTemplate, data_type, HloOpcode::kReduce)); bool skip_failure_branch_to_avoid_crash = opcode == HloOpcode::kDivide && (data_type == BF16 || data_type == F16 || data_type == F8E4M3FN || data_type == F8E5M2); RunSupportTest(std::move(ti), {1}, cc, skip_failure_branch_to_avoid_crash); } std::vector<HloOpcode> ExcludeOps(absl::Span<const HloOpcode> all_ops, absl::Span<const HloOpcode> ops_to_exclude) { std::vector<HloOpcode> ret; for (HloOpcode op : all_ops) { if (!absl::c_linear_search(ops_to_exclude, op)) { ret.push_back(op); } } return ret; } INSTANTIATE_TEST_SUITE_P( ReductionComputationTestSuite, ReductionComputationTest, AllTestCombinationsForOpcodes(ExcludeOps(kTestedOpsBinaryElementwise, {HloOpcode::kCompare})), TritonSupportTestTypeAndOpcodeAndDeviceToString); using TransposeTest = TritonSupportTestWithTypeAndOpcodeAndDeviceParam; TEST_P(TransposeTest, LoadTranspose3D) { auto [data_type, opcode, cc] = GetParam(); const std::string kHloTestTemplate = R"( ENTRY triton_computation { parameter_0 = $0[125,127,37] parameter(0) ROOT transpose = $0[127,37,125] $1(parameter_0), dimensions={1,2,0} })"; TF_ASSERT_OK_AND_ASSIGN( TestedInstruction ti, ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode)); RunSupportTest(std::move(ti), {1, 32, 16}, cc); } constexpr std::array kTestedOpsTranspose = {HloOpcode::kTranspose}; INSTANTIATE_TEST_SUITE_P(TransposeTestSuite, TransposeTest, AllTestCombinationsForOpcodes(kTestedOpsTranspose), TritonSupportTestTypeAndOpcodeAndDeviceToString); class TritonSupportTestWithTypeAndDeviceParam : public TritonSupportTest, public ::testing::WithParamInterface< std::tuple<PrimitiveType, se::GpuComputeCapability>> {}; using SliceTest = TritonSupportTestWithTypeAndOpcodeAndDeviceParam; TEST_P(SliceTest, ContinuousSlice) { auto [data_type, opcode, cc] = GetParam(); const std::string kHloTestTemplate = (R"( ENTRY triton_computation { p = $0[128,32] parameter(0) ROOT slice = $0[12,5] $1(p), slice={[116:128], [20:25]} })"); TF_ASSERT_OK_AND_ASSIGN( TestedInstruction ti, ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode)); RunSupportTest(std::move(ti), {8, 4}, cc); } TEST_P(SliceTest, NonContinuousSliceWhereStrideDividesOffsetEvenly) { auto [data_type, opcode, cc] = GetParam(); const std::string kHloTestTemplate = (R"( ENTRY triton_computation { p = f32[16,16,32] parameter(0) ROOT slice = f32[4,4,8] slice(p), slice={[2:10:2], [2:6], [3:11]} })"); TF_ASSERT_OK_AND_ASSIGN( TestedInstruction ti, ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode)); RunSupportTest(std::move(ti), {2, 2, 2}, cc); } TEST_P(SliceTest, NonContinuousSliceWhereStrideDoesNotDivideOffsetEvenly) { auto [data_type, opcode, cc] = GetParam(); const std::string kHloTestTemplate = (R"( ENTRY triton_computation { p = f32[16,16,32] parameter(0) ROOT slice = f32[4,4,8] slice(p), slice={[3:11:2], [2:6], [3:11]} })"); TF_ASSERT_OK_AND_ASSIGN( TestedInstruction ti, ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode)); RunSupportTest(std::move(ti), {2, 2, 2}, cc); } constexpr std::array kTestedOpsSlice = {HloOpcode::kSlice}; INSTANTIATE_TEST_SUITE_P(SliceTestSuite, SliceTest, AllTestCombinationsForOpcodes(kTestedOpsSlice), TritonSupportTestTypeAndOpcodeAndDeviceToString); using CollectiveTest = TritonSupportTestWithTypeAndDeviceParam; TEST_P(CollectiveTest, UnsupportedAllGatherFailsGracefullyWithTriton) { auto [data_type, cc] = GetParam(); const std::string kHloTestTemplate = R"( ENTRY triton_computation { input = $0[128,32] parameter(0) ROOT all-gather = $0[128,128] all-gather(input), replica_groups={}, dimensions={1} })"; TF_ASSERT_OK_AND_ASSIGN(TestedInstruction ti, ParseTemplateAndGetInstruction( kHloTestTemplate, data_type, HloOpcode::kAllGather)); EXPECT_FALSE(IsTritonSupportedInstruction(ti.Instruction(), cc)); RunSupportTest(std::move(ti), {2, 2}, cc); } TEST_P(CollectiveTest, UnsupportedAllGatherStartFailsGracefullyWithTriton) { auto [data_type, cc] = GetParam(); const std::string kHloTestTemplate = R"( ENTRY triton_computation { input = $0[128,32] parameter(0) ROOT all-gather-start = ($0[128,32], $0[256,32]) all-gather-start(input), replica_groups={{0,1}}, dimensions={0} })"; TF_ASSERT_OK_AND_ASSIGN( TestedInstruction ti, ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, HloOpcode::kAllGatherStart)); EXPECT_FALSE(IsTritonSupportedInstruction(ti.Instruction(), cc)); RunSupportTest(std::move(ti), {2, 2}, cc); } TEST_P(CollectiveTest, UnsupportedAllGatherDoneFailsGracefullyWithTriton) { auto [data_type, cc] = GetParam(); const std::string kHloTestTemplate = R"( ENTRY triton_computation { input = ($0[128,32], $0[128,32]) parameter(0) ROOT all-gather-done = $0[128,32] all-gather-done(input) })"; TF_ASSERT_OK_AND_ASSIGN(TestedInstruction ti, ParseTemplateAndGetInstruction( kHloTestTemplate, data_type, HloOpcode::kAllGatherDone)); EXPECT_FALSE(IsTritonSupportedInstruction(ti.Instruction(), cc)); RunSupportTest(std::move(ti), {2, 2}, cc); } TEST_P(CollectiveTest, UnsupportedAllReduceFailsGracefullyWithTriton) { auto [data_type, cc] = GetParam(); const std::string kHloTestTemplate = R"( apply_op { x = $0[] parameter(0) y = $0[] parameter(1) ROOT apply_op = $0[] add(x, y) } ENTRY triton_computation { input = $0[128,32] parameter(0) ROOT all-reduce = $0[128,32] all-reduce(input), replica_groups={}, to_apply=apply_op })"; TF_ASSERT_OK_AND_ASSIGN(TestedInstruction ti, ParseTemplateAndGetInstruction( kHloTestTemplate, data_type, HloOpcode::kAllReduce)); EXPECT_FALSE(IsTritonSupportedInstruction(ti.Instruction(), cc)); RunSupportTest(std::move(ti), {2, 2}, cc); } TEST_P(CollectiveTest, UnsupportedAllReduceStartAndDoneFailGracefullyWithTriton) { auto [data_type, cc] = GetParam(); const std::string kHloTestTemplate = R"( apply_op { x = $0[] parameter(0) y = $0[] parameter(1) ROOT apply_op = $0[] add(x, y) } ENTRY triton_computation { input = $0[128,32] parameter(0) all-reduce-start = $0[128,32] all-reduce-start(input), replica_groups={}, to_apply=apply_op ROOT all-reduce-done = $0[128,32] all-reduce-done(all-reduce-start) })"; TF_ASSERT_OK_AND_ASSIGN( TestedInstruction ti, ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, HloOpcode::kAllReduceStart)); EXPECT_FALSE(IsTritonSupportedInstruction(ti.Instruction(), cc)); EXPECT_FALSE(IsTritonSupportedInstruction( *ti.TritonComputation().root_instruction(), cc)); RunSupportTest(std::move(ti), {2, 2}, cc); } TEST_P(CollectiveTest, UnsupportedAllToAllFailsGracefullyWithTriton) { auto [data_type, cc] = GetParam(); const std::string kHloTestTemplate = R"( ENTRY triton_computation { input = $0[128,32] parameter(0) ROOT a2a = ($0[128,32]) all-to-all(input), replica_groups={} })"; TF_ASSERT_OK_AND_ASSIGN(TestedInstruction ti, ParseTemplateAndGetInstruction( kHloTestTemplate, data_type, HloOpcode::kAllToAll)); EXPECT_FALSE(IsTritonSupportedInstruction(ti.Instruction(), cc)); RunSupportTest(std::move(ti), {2, 2}, cc); } TEST_P(CollectiveTest, UnsupportedCollectivePermuteFailsGracefullyWithTriton) { auto [data_type, cc] = GetParam(); const std::string kHloTestTemplate = R"( ENTRY triton_computation { a = $0[128,32] parameter(0) ROOT collective-permute = $0[128,32] collective-permute(a), source_target_pairs={{1,0}, {0,1}, {2,2}} })"; TF_ASSERT_OK_AND_ASSIGN( TestedInstruction ti, ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, HloOpcode::kCollectivePermute)); EXPECT_FALSE(IsTritonSupportedInstruction(ti.Instruction(), cc)); RunSupportTest(std::move(ti), {2, 2}, cc); } TEST_P(CollectiveTest, UnsupportedReduceScatterFailsGracefullyWithTriton) { auto [data_type, cc] = GetParam(); const std::string kHloTestTemplate = R"( apply_op { lhs = $0[] parameter(0) rhs = $0[] parameter(1) ROOT apply_op = $0[] add(lhs, rhs) } ENTRY triton_computation { input = $0[8] parameter(0) ROOT result = $0[4] reduce-scatter(input), replica_groups={}, dimensions={0}, to_apply=apply_op })"; TF_ASSERT_OK_AND_ASSIGN(TestedInstruction ti, ParseTemplateAndGetInstruction( kHloTestTemplate, data_type, HloOpcode::kReduceScatter)); EXPECT_FALSE(IsTritonSupportedInstruction(ti.Instruction(), cc)); RunSupportTest(std::move(ti), {1}, cc); } TEST_P(CollectiveTest, UnsupportedAsyncStartAndUpdateAndDoneFailGracefullyWithTriton) { auto [data_type, cc] = GetParam(); const std::string kHloTestTemplate = R"( async_computation { ROOT p0 = $0[10] parameter(0) } ENTRY triton_computation { input = $0[10] parameter(0) async-start = (($0[10]), $0[10]) async-start(input), calls=async_computation async-update = (($0[10]), $0[10]) async-update(async-start), calls=async_computation ROOT async-done = $0[10] async-done(async-update), calls=async_computation })"; TF_ASSERT_OK_AND_ASSIGN(TestedInstruction ti, ParseTemplateAndGetInstruction( kHloTestTemplate, data_type, HloOpcode::kAsyncStart)); EXPECT_FALSE(IsTritonSupportedInstruction(ti.Instruction(), cc)); EXPECT_FALSE(IsTritonSupportedInstruction( *ti.TritonComputation().root_instruction(), cc)); EXPECT_FALSE(IsTritonSupportedInstruction( *ti.TritonComputation().root_instruction()->operand(0), cc)); RunSupportTest(std::move(ti), {1}, cc); } constexpr std::array kTestedOpsCollectives = { HloOpcode::kAllGather, HloOpcode::kAllGatherStart, HloOpcode::kAllGatherDone, HloOpcode::kAllReduce, HloOpcode::kAllReduceStart, HloOpcode::kAllReduceDone, HloOpcode::kAsyncDone, HloOpcode::kAsyncStart, HloOpcode::kAsyncUpdate, HloOpcode::kAllToAll, HloOpcode::kCollectivePermute, HloOpcode::kReduceScatter}; INSTANTIATE_TEST_SUITE_P( CollectiveTestSuite, CollectiveTest, ::testing::Combine(::testing::ValuesIn(AllXlaDataTypes()), ::testing::ValuesIn(AllDevicesToTest())), TritonSupportTestTypeAndDeviceToString); using BroadcastTest = TritonSupportTestWithTypeAndDeviceParam; TEST_P(BroadcastTest, Broadcast) { auto [data_type, cc] = GetParam(); const std::string kHloTestTemplate = R"( ENTRY triton_computation { input = $0[35,131] parameter(0) ROOT bcast = $0[3,35,131,12] broadcast(input), dimensions={1,2} })"; TF_ASSERT_OK_AND_ASSIGN(TestedInstruction ti, ParseTemplateAndGetInstruction( kHloTestTemplate, data_type, HloOpcode::kBroadcast)); RunSupportTest(std::move(ti), {2, 16, 32, 8}, cc); } constexpr std::array kTestedOpsBroadcast = {HloOpcode::kBroadcast}; INSTANTIATE_TEST_SUITE_P( BroadcastTestSuite, BroadcastTest, ::testing::Combine(::testing::ValuesIn(AllXlaDataTypes()), ::testing::ValuesIn(AllDevicesToTest())), TritonSupportTestTypeAndDeviceToString); using ParameterTest = TritonSupportTestWithTypeAndDeviceParam; TEST_P(ParameterTest, Parameter) { auto [data_type, cc] = GetParam(); const std::string kHloTestTemplate = R"( ENTRY triton_computation { input = $0[35,131] parameter(0) ROOT noop = $0[35,131] convert(input) })"; TF_ASSERT_OK_AND_ASSIGN(TestedInstruction ti, ParseTemplateAndGetInstruction( kHloTestTemplate, data_type, HloOpcode::kParameter)); RunSupportTest(std::move(ti), {16, 32}, cc); } constexpr std::array kTestedOpsParameter = {HloOpcode::kParameter}; INSTANTIATE_TEST_SUITE_P( ParameterTestSuite, ParameterTest, ::testing::Combine(::testing::ValuesIn(AllXlaDataTypes()), ::testing::ValuesIn(AllDevicesToTest())), TritonSupportTestTypeAndDeviceToString); using ConstantTest = TritonSupportTestWithTypeAndDeviceParam; TEST_P(ConstantTest, Constant2D) { auto [data_type, cc] = GetParam(); bool dtype_is_complex = data_type == C64 || data_type == C128; const std::string kHloTestTemplate = absl::Substitute(R"( ENTRY triton_computation { ROOT const = $0[3,3] constant({{$1,$1,$1},{$1,$1,$1},{$1,$1,$1}}) })", "$0", dtype_is_complex ? "(0, 0)" : "0"); TF_ASSERT_OK_AND_ASSIGN(TestedInstruction ti, ParseTemplateAndGetInstruction( kHloTestTemplate, data_type, HloOpcode::kConstant)); RunSupportTest(std::move(ti), {2, 2}, cc); } constexpr std::array kTestedOpsConstant = {HloOpcode::kConstant}; INSTANTIATE_TEST_SUITE_P( ConstantTestSuite, ConstantTest, ::testing::Combine(::testing::ValuesIn(AllXlaDataTypes()), ::testing::ValuesIn(AllDevicesToTest())), TritonSupportTestTypeAndDeviceToString); using IotaTest = TritonSupportTestWithTypeAndOpcodeAndDeviceParam; TEST_P(IotaTest, Iota2D) { auto [data_type, opcode, cc] = GetParam(); const std::string kHloTestTemplate = R"( ENTRY triton_computation { ROOT input = $0[35,131] iota(), iota_dimension=0 })"; TF_ASSERT_OK_AND_ASSIGN( TestedInstruction ti, ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode)); RunSupportTest(std::move(ti), {16, 32}, cc); } constexpr std::array kTestedOpsIota = {HloOpcode::kIota}; INSTANTIATE_TEST_SUITE_P(IotaTestSuite, IotaTest, AllTestCombinationsForOpcodes(kTestedOpsIota), TritonSupportTestTypeAndOpcodeAndDeviceToString); using RngTest = TritonSupportTestWithTypeAndOpcodeAndDeviceParam; TEST_P(RngTest, Rng) { auto [data_type, opcode, cc] = GetParam(); const std::string kHloTestTemplate = R"( ENTRY triton_computation { low = $0[] parameter(0) high = $0[] parameter(1) ROOT root = $0[33,77] rng(low, high), distribution=rng_uniform })"; TF_ASSERT_OK_AND_ASSIGN( TestedInstruction ti, ParseTemplateAndGetInstruction(kHloTestTemplate, data_type, opcode)); RunSupportTest(std::move(ti), {16, 32}, cc); } constexpr std::array kTestedOpsRng = {HloOpcode::kRng}; INSTANTIATE_TEST_SUITE_P(RngTestSuite, RngTest, AllTestCombinationsForOpcodes(kTestedOpsRng), TritonSupportTestTypeAndOpcodeAndDeviceToString); constexpr std::array kUnsupportedOps = {HloOpcode::kAddDependency, HloOpcode::kAfterAll, HloOpcode::kBatchNormGrad, HloOpcode::kBatchNormInference, HloOpcode::kBatchNormTraining, HloOpcode::kBitcastConvert, HloOpcode::kCall, HloOpcode::kCholesky, HloOpcode::kCollectiveBroadcast, HloOpcode::kCollectivePermuteDone, HloOpcode::kCollectivePermuteStart, HloOpcode::kComplex, HloOpcode::kConcatenate, HloOpcode::kConditional, HloOpcode::kConvolution, HloOpcode::kCopy, HloOpcode::kCopyDone, HloOpcode::kCopyStart, HloOpcode::kCustomCall, HloOpcode::kDomain, HloOpcode::kDot, HloOpcode::kDynamicReshape, HloOpcode::kDynamicSlice, HloOpcode::kDynamicUpdateSlice, HloOpcode::kFft, HloOpcode::kFusion, HloOpcode::kGather, HloOpcode::kGetDimensionSize, HloOpcode::kGetTupleElement, HloOpcode::kInfeed, HloOpcode::kMap, HloOpcode::kOptimizationBarrier, HloOpcode::kOutfeed, HloOpcode::kPad, HloOpcode::kPartitionId, HloOpcode::kRecv, HloOpcode::kRecvDone, HloOpcode::kReduceWindow, HloOpcode::kReplicaId, HloOpcode::kReverse, HloOpcode::kRngBitGenerator, HloOpcode::kRngGetAndUpdateState, HloOpcode::kScatter, HloOpcode::kSelectAndScatter, HloOpcode::kSend, HloOpcode::kSendDone, HloOpcode::kSetDimensionSize, HloOpcode::kSort, HloOpcode::kStochasticConvert, HloOpcode::kTopK, HloOpcode::kTriangularSolve, HloOpcode::kTuple, HloOpcode::kWhile}; absl::flat_hash_set<HloOpcode> AllTestedOpcodes() { absl::flat_hash_set<HloOpcode> ret; ret.insert(kTestedOpsBitcastReshape.begin(), kTestedOpsBitcastReshape.end()); ret.insert(kTestedOpsUnaryElementwise.begin(), kTestedOpsUnaryElementwise.end()); ret.insert(kTestedOpsConvert.begin(), kTestedOpsConvert.end()); ret.insert(kTestedOpsBinaryElementwise.begin(), kTestedOpsBinaryElementwise.end()); ret.insert(kTestedOpsTernaryElementwise.begin(), kTestedOpsTernaryElementwise.end()); ret.insert(kTestedOpsReduction.begin(), kTestedOpsReduction.end()); ret.insert(kTestedOpsSlice.begin(), kTestedOpsSlice.end()); ret.insert(kTestedOpsTranspose.begin(), kTestedOpsTranspose.end()); ret.insert(kTestedOpsCollectives.begin(), kTestedOpsCollectives.end()); ret.insert(kTestedOpsBroadcast.begin(), kTestedOpsBroadcast.end()); ret.insert(kTestedOpsParameter.begin(), kTestedOpsParameter.end()); ret.insert(kTestedOpsConstant.begin(), kTestedOpsConstant.end()); ret.insert(kTestedOpsIota.begin(), kTestedOpsIota.end()); ret.insert(kTestedOpsRng.begin(), kTestedOpsRng.end()); ret.insert(kUnsupportedOps.begin(), kUnsupportedOps.end()); return ret; } TEST(OpCoverage, UnsupportedOpcodes) { for (HloOpcode opcode : kUnsupportedOps) { EXPECT_TRUE(internal::IsTritonUnsupportedOpcode(opcode)); } } TEST(OpCoverage, AllOpcodesAreTested) { absl::flat_hash_set<HloOpcode> tested_opcodes = AllTestedOpcodes(); for (int opcode_index = 0; opcode_index < HloOpcodeCount(); ++opcode_index) { auto opcode = static_cast<HloOpcode>(opcode_index); EXPECT_TRUE(tested_opcodes.contains(opcode)) << "Opcode `" << HloOpcodeString(opcode) << "` is not tested."; } } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/triton/triton_support.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/triton/triton_support_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
52d6522f-ddf2-4de4-9a6c-6004ec920947
cpp
tensorflow/tensorflow
input_slices
third_party/xla/xla/service/gpu/fusions/legacy/input_slices.cc
third_party/xla/xla/service/gpu/fusions/legacy/input_slices_test.cc
#include "xla/service/gpu/fusions/legacy/input_slices.h" #include <cstddef> #include <cstdint> #include <optional> #include <vector> #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "absl/types/span.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Value.h" #include "mlir/IR/MLIRContext.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/elemental_ir_emitter.h" #include "xla/service/gpu/elemental_ir_emitter.h" #include "xla/service/gpu/ir_emission_utils.h" #include "xla/service/gpu/ir_emitter_context.h" #include "xla/service/gpu/launch_dimensions.h" #include "xla/service/gpu/model/indexing_map.h" #include "xla/service/gpu/parallel_loop_emitter.h" #include "xla/service/llvm_ir/fused_ir_emitter.h" #include "xla/service/llvm_ir/ir_array.h" #include "xla/service/llvm_ir/kernel_support_library.h" #include "xla/service/llvm_ir/llvm_loop.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/util.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { absl::Status EmitElementForInputFusibleSlices( ElementalIrEmitter& elemental_emitter, const HloComputation* fused_computation, const std::vector<llvm_ir::IrArray>& inputs, const std::vector<llvm_ir::IrArray>& outputs, const llvm_ir::IrArray::Index& index, llvm::IRBuilder<>* builder) { VLOG(10) << "Emitting slice input fusion for " << fused_computation->ToString(); HloInstruction* slice_or_tuple = fused_computation->root_instruction(); auto slice_instructions = [&]() -> absl::Span<HloInstruction* const> { if (slice_or_tuple->opcode() == HloOpcode::kSlice) { return absl::Span<HloInstruction* const>(&slice_or_tuple, 1); } CHECK_EQ(slice_or_tuple->opcode(), HloOpcode::kTuple); return slice_or_tuple->operands(); }(); std::vector<llvm::Value*> input_ir_values; FusedIrEmitter fused_emitter(elemental_emitter); for (int i = 0; i < fused_computation->num_parameters(); i++) { fused_emitter.BindGenerator( *fused_computation->parameter_instruction(i), [&inputs, i, builder](llvm_ir::IrArray::Index index) { return inputs[i].EmitReadArrayElement(index, builder); }); } for (const HloInstruction* slice : slice_instructions) { auto input_generator = *fused_emitter.GetGenerator(*slice->operand(0)); input_ir_values.push_back(input_generator(index).value()); } KernelSupportLibrary ksl(builder, llvm_ir::UnrollMode::kDefaultUnroll); for (int64_t i = 0; i < slice_instructions.size(); ++i) { HloInstruction* slice = slice_instructions[i]; std::vector<llvm::Value*> index_within_ranges; for (size_t dim = 0; dim < slice->slice_starts().size(); ++dim) { CHECK_EQ(slice->slice_strides(dim), 1); auto larger_or_equal_than_start = builder->CreateICmpSGE( index.multidim()[dim], index.GetConstantWithIndexType(slice->slice_starts(dim))); llvm::Value* smaller_than_limit = builder->CreateICmpSLT( index.multidim()[dim], index.GetConstantWithIndexType(slice->slice_limits(dim))); llvm::Value* within_range = builder->CreateAnd(larger_or_equal_than_start, smaller_than_limit); index_within_ranges.push_back(within_range); } llvm::Value* guarding_cond = builder->CreateAnd(index_within_ranges); auto emit_slice_elem_func = [&] { const std::vector<llvm::Value*>& src_multidim = index.multidim(); std::vector<llvm::Value*> dst_multidim(src_multidim.size()); for (size_t dim = 0; dim < src_multidim.size(); ++dim) { dst_multidim[dim] = builder->CreateSub( src_multidim[dim], index.GetConstantWithIndexType(slice->slice_starts(dim))); } const llvm_ir::IrArray& src_ir_array = outputs[i]; llvm_ir::IrArray::Index slice_dst_index(dst_multidim, slice->shape(), index.GetType()); src_ir_array.EmitWriteArrayElement(slice_dst_index, input_ir_values[i], builder); }; ksl.If(absl::StrCat("slice", i), guarding_cond, emit_slice_elem_func); } return absl::OkStatus(); } absl::StatusOr<Shape> GetConsistentInputShapeForRootSlices( const HloComputation* fused_computation) { const HloInstruction& root = *fused_computation->root_instruction(); if (root.opcode() == HloOpcode::kSlice) { return root.operands()[0]->shape(); } CHECK_EQ(root.opcode(), HloOpcode::kTuple); const Shape& first_slice_operand_shape = root.operands()[0]->operands()[0]->shape(); for (size_t i = 1; i < root.operands().size(); ++i) { const HloInstruction* slice = root.operands()[i]; const Shape& operand_shape = slice->operands()[0]->shape(); if (!ShapeUtil::EqualIgnoringElementType(first_slice_operand_shape, operand_shape)) { return FailedPrecondition( "Fused slices do not have the same input shape, fused computation = " "%s.", root.parent()->name()); } } return first_slice_operand_shape; } } LaunchDimensions InputSlicesFusion::launch_dimensions() const { const auto& root = analysis_.fusion_root(0).instruction(); const auto& shape = root.operand(0)->shape(); return CalculateLaunchDimensions(shape, analysis_.device_info(), {unroll_factor_}); } std::optional<IndexingMap> InputSlicesFusion::ComputeThreadIdToOutputIndexing( int64_t output_id, mlir::MLIRContext* ctx) const { auto launch_dims = launch_dimensions(); const auto& shape = analysis_.fusion_root(output_id).shape(); return GetDefaultThreadIdIndexingMap(launch_dims, unroll_factor_, shape, ctx); } absl::Status InputSlicesFusion::EmitKernel( IrEmitterContext& ir_emitter_context, const HloFusionInstruction& fusion, const LaunchDimensions& launch_dims, std::vector<llvm_ir::IrArray> inputs, std::vector<llvm_ir::IrArray> outputs, llvm::IRBuilder<>* builder) const { TF_ASSIGN_OR_RETURN(Shape element_shape, GetConsistentInputShapeForRootSlices( fusion.fused_instructions_computation())); LaunchDimensionsConfig launch_config; launch_config.unroll_factor = unroll_factor_; GpuElementalIrEmitter elemental_emitter(ir_emitter_context, builder); return ParallelLoopEmitter( [&](const llvm_ir::IrArray::Index index) -> absl::Status { return EmitElementForInputFusibleSlices( elemental_emitter, fusion.fused_instructions_computation(), inputs, outputs, index, builder); }, element_shape, launch_dims, builder, launch_config) .EmitLoop( fusion.name(), GetIndexTypeForKernel(&fusion, launch_dims.launch_bound(), builder)); } } }
#include "xla/service/gpu/fusions/legacy/input_slices.h" #include <optional> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "mlir/IR/MLIRContext.h" #include "xla/service/gpu/fusions/fusions.h" #include "xla/service/gpu/gpu_device_info_for_tests.h" #include "xla/service/gpu/hlo_fusion_analysis.h" #include "xla/service/gpu/model/indexing_map_serialization.h" #include "xla/service/gpu/model/indexing_test_utils.h" #include "xla/stream_executor/device_description.h" #include "xla/tests/hlo_test_base.h" namespace xla { namespace gpu { namespace { class InputSlicesTest : public HloTestBase { protected: DebugOptions GetDebugOptionsForTest() override { auto opts = HloTestBase::GetDebugOptionsForTest(); opts.set_xla_gpu_mlir_emitter_level(0); return opts; } mlir::MLIRContext mlir_context_; }; TEST_F(InputSlicesTest, ThreadIndexing) { auto module = ParseAndReturnVerifiedModule(R"( HloModule module fused_computation { %input = f32[2,3,5,7]{2,1,0,3} parameter(0) slice0 = f32[1,2,3,5]{2,1,0,3} slice(input), slice={[0:1],[1:3],[0:3],[2:7]} slice1 = f32[1,2,3,5]{2,1,0,3} slice(input), slice={[0:1],[0:2],[0:3],[2:7]} ROOT tuple = (f32[1,2,3,5]{2,1,0,3}, f32[1,2,3,5]{2,1,0,3}) tuple(slice0, slice1) } ENTRY entry { %input = f32[2,3,5,7]{2,1,0,3} parameter(0) ROOT %fusion = (f32[1,2,3,5]{2,1,0,3}, f32[1,2,3,5]{2,1,0,3}) fusion(%input), kind=kLoop, calls=fused_computation })") .value(); stream_executor::DeviceDescription device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo(); auto* root = module->entry_computation()->root_instruction(); auto analysis_fused = HloFusionAnalysis::Create(*root, device_info); auto emitter = GetFusionEmitter(PreBufferAssignmentFusionInfo{analysis_fused}); auto fusion = dynamic_cast<InputSlicesFusion*>(emitter.get()); ASSERT_NE(fusion, nullptr); auto thread_id_to_output_indexing = fusion->ComputeThreadIdToOutputIndexing(0, &mlir_context_); EXPECT_THAT(ToString(*thread_id_to_output_indexing, {"th_x", "th_y", "th_z", "bl_x", "bl_y", "bl_z"}, {"chunk_id", "unroll_id"}, {}), MatchIndexingString(R"( (th_x, th_y, th_z, bl_x, bl_y, bl_z)[chunk_id, unroll_id] -> (0, ((bl_x * 128 + th_x) floordiv 3) mod 2, (bl_x * 128 + th_x) mod 3, (bl_x * 128 + th_x) floordiv 6), domain: th_x in [0, 127], th_y in [0, 0], th_z in [0, 0], bl_x in [0, 1], bl_y in [0, 0], bl_z in [0, 0], chunk_id in [0, 0], unroll_id in [0, 0], bl_x * 128 + th_x in [0, 29] )")); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/legacy/input_slices.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/legacy/input_slices_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
6ff23afd-0656-478f-be98-99d1185d6c17
cpp
tensorflow/tensorflow
in_place_dynamic_update_slice
third_party/xla/xla/service/gpu/fusions/legacy/in_place_dynamic_update_slice.cc
third_party/xla/xla/service/gpu/fusions/legacy/in_place_dynamic_update_slice_test.cc
#include "xla/service/gpu/fusions/legacy/in_place_dynamic_update_slice.h" #include <optional> #include <utility> #include <vector> #include "absl/status/status.h" #include "llvm/ADT/STLExtras.h" #include "llvm/IR/IRBuilder.h" #include "mlir/IR/MLIRContext.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/service/gpu/elemental_ir_emitter.h" #include "xla/service/gpu/ir_emitter_context.h" #include "xla/service/gpu/launch_dimensions.h" #include "xla/service/gpu/model/indexing_map.h" #include "xla/service/llvm_ir/dynamic_update_slice_util.h" #include "xla/service/llvm_ir/fused_ir_emitter.h" #include "xla/service/llvm_ir/ir_array.h" namespace xla { namespace gpu { namespace { constexpr int kDUSUpdateIndex = 1; } LaunchDimensions InPlaceDynamicUpdateSliceFusion::launch_dimensions() const { const auto& update_shape = dus_ops_.front().GetOperand(1).shape(); return CalculateLaunchDimensions(update_shape, analysis_.device_info()); } std::optional<IndexingMap> InPlaceDynamicUpdateSliceFusion::ComputeThreadIdToInputIndexing( int64_t root_index, int64_t hero_operand_index, mlir::MLIRContext* mlir_context) const { if (hero_operand_index != kDUSUpdateIndex) { return std::nullopt; } auto launch_dims = launch_dimensions(); const auto& update_shape = dus_ops_.front().GetOperand(kDUSUpdateIndex).shape(); return GetDefaultThreadIdIndexingMap(launch_dims, 1, update_shape, mlir_context); } absl::Status InPlaceDynamicUpdateSliceFusion::EmitKernel( IrEmitterContext& ir_emitter_context, const HloFusionInstruction& fusion, const LaunchDimensions& launch_dims, std::vector<llvm_ir::IrArray> inputs, std::vector<llvm_ir::IrArray> outputs, llvm::IRBuilder<>* builder) const { for (auto [op, output] : llvm::zip(dus_ops_, outputs)) { output = output.CastToShape(op.shape(), builder); } auto* fused_computation = fusion.fused_instructions_computation(); GpuElementalIrEmitter elemental_emitter(ir_emitter_context, builder); FusedIrEmitter fused_emitter(elemental_emitter); for (auto [index, input] : llvm::enumerate(inputs)) { auto fused_operand = fused_computation->parameter_instruction(index); fused_emitter.BindGenerator( *fused_operand, [input = input, builder, fused_operand](const llvm_ir::IrArray::Index& index) { return input.EmitReadArrayElement(index, builder, fused_operand->name()); }); } std::vector<std::pair<const HloInstruction*, const llvm_ir::IrArray>> dus_and_output_array; dus_and_output_array.reserve(dus_ops_.size()); for (auto [op, output] : llvm::zip(dus_ops_, outputs)) { dus_and_output_array.push_back(std::make_pair(&op.instruction(), output)); } return llvm_ir::EmitParallelFusedDynamicUpdateSliceInPlace( fused_computation, dus_and_output_array, &fused_emitter, launch_dims, builder); } } }
#include "xla/service/gpu/fusions/legacy/in_place_dynamic_update_slice.h" #include <optional> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "mlir/IR/MLIRContext.h" #include "xla/service/gpu/fusions/fusions.h" #include "xla/service/gpu/gpu_device_info_for_tests.h" #include "xla/service/gpu/hlo_fusion_analysis.h" #include "xla/service/gpu/model/indexing_map_serialization.h" #include "xla/service/gpu/model/indexing_test_utils.h" #include "xla/stream_executor/device_description.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { class InPlaceDynamicUpdateSliceFusionTest : public HloTestBase { protected: DebugOptions GetDebugOptionsForTest() override { auto opts = HloTestBase::GetDebugOptionsForTest(); opts.set_xla_gpu_mlir_emitter_level(0); return opts; } mlir::MLIRContext mlir_context_; stream_executor::DeviceDescription device_info_ = TestGpuDeviceInfo::RTXA6000DeviceInfo(); }; TEST_F(InPlaceDynamicUpdateSliceFusionTest, ThreadIndexing) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"( HloModule module fused_computation { in = f32[20,30] parameter(0) updates = f32[5,6] parameter(1) i0 = s32[] parameter(2) i1 = s32[] parameter(3) ROOT updated = f32[20,30] dynamic-update-slice(in, updates, i0, i1) } ENTRY entry { in = f32[20,30] parameter(0) updates = f32[5,6] parameter(1) i0 = s32[] constant(2) i1 = s32[] constant(3) ROOT fusion = f32[20,30] fusion(in, updates, i0, i1), kind=kLoop, calls=fused_computation } )")); auto* root = module->entry_computation()->root_instruction(); auto analysis_fused = HloFusionAnalysis::Create(*root, device_info_); auto emitter = GetFusionEmitter(PreBufferAssignmentFusionInfo{analysis_fused}); auto fusion = dynamic_cast<InPlaceDynamicUpdateSliceFusion*>(emitter.get()); ASSERT_NE(fusion, nullptr); auto thread_id_update_indexing = fusion->ComputeThreadIdToInputIndexing( 0, 1, &mlir_context_); EXPECT_THAT(ToString(*thread_id_update_indexing, {"th_x", "th_y", "th_z", "bl_x", "bl_y", "bl_z"}, {"chunk_id", "unroll_id"}, {}), MatchIndexingString(R"( (th_x, th_y, th_z, bl_x, bl_y, bl_z)[chunk_id, unroll_id] -> ( th_x floordiv 6, th_x mod 6), domain: th_x in [0, 29], th_y in [0, 0], th_z in [0, 0], bl_x in [0, 0], bl_y in [0, 0], bl_z in [0, 0], chunk_id in [0, 0], unroll_id in [0, 0] )")); auto thread_id_dst_indexing = fusion->ComputeThreadIdToInputIndexing( 0, 0, &mlir_context_); EXPECT_THAT(thread_id_dst_indexing, ::testing::Eq(std::nullopt)); } TEST_F(InPlaceDynamicUpdateSliceFusionTest, ProduceConsumerFusion) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"( HloModule m fused_computation.1 { param_0 = bf16[1,2,5,1,2] parameter(0) bitcast = bf16[1,5,1,2,2] bitcast(param_0) param_1 = bf16[1,1,1,2,2] parameter(1) param_2 = s32[] parameter(2) param_3 = s32[] parameter(3) ROOT dynamic-update-slice = bf16[1,5,1,2,2] dynamic-update-slice(bitcast, param_1, param_2, param_3, param_2, param_2, param_2) } ENTRY entry_computation { param_0.2 = bf16[1,2,5,1,2] parameter(3) param_1.2 = bf16[1,1,1,2,2] parameter(0) param_2.2 = s32[] parameter(1) param_3.2 = s32[] parameter(2) fusion = bf16[1,5,1,2,2] fusion(param_0.2, param_1.2, param_2.2, param_3.2), kind=kLoop, calls=fused_computation.1 ROOT bitcast.1 = bf16[1,2,5,1,2] bitcast(fusion) } )")); auto* root = module->entry_computation()->root_instruction(); auto analysis_fused = HloFusionAnalysis::Create(*root->operand(0), *root, device_info_); auto emitter = GetFusionEmitter(PreBufferAssignmentFusionInfo{analysis_fused}); auto fusion = dynamic_cast<InPlaceDynamicUpdateSliceFusion*>(emitter.get()); ASSERT_NE(fusion, nullptr); EXPECT_EQ(fusion->launch_dimensions().launch_bound(), 4 ); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/legacy/in_place_dynamic_update_slice.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/legacy/in_place_dynamic_update_slice_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
749dd783-2f81-4bc8-8995-fe9627988a02
cpp
tensorflow/tensorflow
loop
third_party/xla/xla/service/gpu/fusions/legacy/loop.cc
third_party/xla/xla/service/gpu/fusions/legacy/loop_test.cc
#include "xla/service/gpu/fusions/legacy/loop.h" #include <algorithm> #include <cstdint> #include <optional> #include <tuple> #include <utility> #include <vector> #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/numeric/bits.h" #include "absl/status/status.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Type.h" #include "mlir/IR/MLIRContext.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/layout_util.h" #include "xla/service/gpu/elemental_ir_emitter.h" #include "xla/service/gpu/gpu_fusible.h" #include "xla/service/gpu/hlo_fusion_analysis.h" #include "xla/service/gpu/hlo_traversal.h" #include "xla/service/gpu/ir_emission_utils.h" #include "xla/service/gpu/ir_emitter_context.h" #include "xla/service/gpu/launch_dimensions.h" #include "xla/service/gpu/model/indexing_analysis.h" #include "xla/service/gpu/model/indexing_map.h" #include "xla/service/gpu/parallel_loop_emitter.h" #include "xla/service/llvm_ir/fused_ir_emitter.h" #include "xla/service/llvm_ir/ir_array.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/util.h" #include "tsl/platform/macros.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { const Shape& GetElementShape(const HloFusionAnalysis& analysis) { const Shape* shape = &analysis.fusion_root(0).shape(); while (shape->IsTuple()) { shape = &shape->tuple_shapes(0); } return *shape; } } LoopFusion::LoopFusion(const HloFusionAnalysis& analysis) : analysis_(analysis), config_(ComputeLoopFusionConfig(analysis)) {} std::optional<IndexingMap> LoopFusion::ComputeThreadIdToOutputIndexing( int64_t root_index, mlir::MLIRContext* ctx) const { auto launch_dims = launch_dimensions(); return GetDefaultThreadIdIndexingMap(launch_dims, config_.unroll_factor, GetElementShape(analysis_), ctx); } std::optional<IndexingMap> LoopFusion::ComputeThreadIdToInputIndexing( int64_t root_index, int64_t hero_operand_index, mlir::MLIRContext* ctx) const { std::optional<IndexingMap> thread_id_to_output_indexing = ComputeThreadIdToOutputIndexing(root_index, ctx); if (!thread_id_to_output_indexing.has_value()) { return std::nullopt; } const HloInstruction* fusion_root = &analysis_.fusion_root(root_index).instruction(); auto output_to_input_indexing = ComputeOutputToInputIndexing(fusion_root, 0, ctx); IndexingMapSet output_to_input_indexing_set = output_to_input_indexing.indexing_maps[hero_operand_index]; CHECK_EQ(output_to_input_indexing_set.size(), 1); IndexingMap thread_id_to_input_indexing_map = ComposeIndexingMaps( *thread_id_to_output_indexing, *output_to_input_indexing_set.begin()); thread_id_to_input_indexing_map.Simplify(); return thread_id_to_input_indexing_map; } absl::Status LoopFusion::EmitKernel(IrEmitterContext& ir_emitter_context, const HloFusionInstruction& fusion, const LaunchDimensions& launch_dims, std::vector<llvm_ir::IrArray> inputs, std::vector<llvm_ir::IrArray> outputs, llvm::IRBuilder<>* builder) const { GpuElementalIrEmitter elemental_emitter(ir_emitter_context, builder); FusedIrEmitter fused_emitter(elemental_emitter); for (int i = 0; i < fusion.fused_parameters().size(); i++) { fused_emitter.BindGenerator( *fusion.fused_parameter(i), [&, i](llvm_ir::IrArray::Index index) { return inputs[i].EmitReadArrayElement(index, builder); }); } TF_ASSIGN_OR_RETURN( auto element_generator, fused_emitter.GetGenerator(*fusion.fused_expression_root())); llvm::Type* index_type = GetIndexTypeForKernel(&fusion, launch_dims.launch_bound(), builder); return ParallelLoopEmitter(element_generator, outputs, launch_dims, builder, config_) .EmitLoop(fusion.name(), index_type); } LaunchDimensions LoopFusion::launch_dimensions() const { return CalculateLaunchDimensions(GetElementShape(analysis_), analysis_.device_info(), config_); } } }
#include <memory> #include <optional> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/status/statusor.h" #include "mlir/IR/MLIRContext.h" #include "xla/service/gpu/fusions/fusion_emitter.h" #include "xla/service/gpu/fusions/fusions.h" #include "xla/service/gpu/gpu_device_info_for_tests.h" #include "xla/service/gpu/hlo_fusion_analysis.h" #include "xla/service/gpu/model/indexing_map_serialization.h" #include "xla/service/gpu/model/indexing_test_utils.h" #include "xla/status_macros.h" #include "xla/stream_executor/device_description.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { class LoopTest : public HloTestBase { protected: stream_executor::DeviceDescription device_info_ = TestGpuDeviceInfo::RTXA6000DeviceInfo(); mlir::MLIRContext mlir_context_; }; absl::StatusOr<std::unique_ptr<KernelFusionInterface>> GetFusion( const HloFusionAnalysis& analysis) { auto emitter = GetFusionEmitter(PreBufferAssignmentFusionInfo{analysis}); auto fusion = dynamic_cast<KernelFusionInterface*>(emitter.get()); TF_RET_CHECK(fusion != nullptr); emitter.release(); return std::unique_ptr<KernelFusionInterface>{fusion}; } TEST_F(LoopTest, ThreadIndexingUnrolled) { auto module = ParseAndReturnVerifiedModule(R"( HloModule module neg { %input = f32[100,200,300] parameter(0) ROOT neg = f32[100,200,300] negate(%input) } ENTRY entry { %input = f32[100,200,300] parameter(0) ROOT %fusion = f32[100,200,300] fusion(%input), kind=kLoop, calls=neg })") .value(); auto* root = module->entry_computation()->root_instruction(); auto analysis = HloFusionAnalysis::Create(*root, device_info_); TF_ASSERT_OK_AND_ASSIGN(auto loop_fusion, GetFusion(analysis)); auto thread_id_to_output_indexing = loop_fusion->ComputeThreadIdToOutputIndexing(0, &mlir_context_); mlir::SmallVector<std::string> dim_names = {"th_x", "th_y", "th_z", "bl_x", "bl_y", "bl_z"}; mlir::SmallVector<std::string> range_names = {"chunk_id", "unroll_id"}; EXPECT_THAT( ToString(*thread_id_to_output_indexing, dim_names, range_names, {}), MatchIndexingString(R"( (th_x, th_y, th_z, bl_x, bl_y, bl_z)[chunk_id, unroll_id] -> ( (bl_x * 128 + th_x) floordiv 15000, ((bl_x * 128 + th_x) floordiv 75) mod 200, ((bl_x * 128 + th_x) mod 75) * 4 + unroll_id ), domain: th_x in [0, 127], th_y in [0, 0], th_z in [0, 0], bl_x in [0, 11718], bl_y in [0, 0], bl_z in [0, 0], chunk_id in [0, 0], unroll_id in [0, 3], bl_x * 128 + th_x in [0, 1499999] )")); } TEST_F(LoopTest, ThreadIndexingNotUnrolled) { auto module = ParseAndReturnVerifiedModule(R"( HloModule module neg { %input = f32[20] parameter(0) ROOT neg = f32[20] negate(%input) } ENTRY entry { %input = f32[20] parameter(0) ROOT %fusion = f32[20] fusion(%input), kind=kLoop, calls=neg })") .value(); auto* root = module->entry_computation()->root_instruction(); auto analysis = HloFusionAnalysis::Create(*root, device_info_); TF_ASSERT_OK_AND_ASSIGN(auto loop_fusion, GetFusion(analysis)); auto thread_id_to_output_indexing = loop_fusion->ComputeThreadIdToOutputIndexing(0, &mlir_context_); mlir::SmallVector<std::string> dim_names = {"th_x", "th_y", "th_z", "bl_x", "bl_y", "bl_z"}; mlir::SmallVector<std::string> range_names = {"chunk_id", "unroll_id"}; EXPECT_THAT( ToString(*thread_id_to_output_indexing, dim_names, range_names, {}), MatchIndexingString(R"( (th_x, th_y, th_z, bl_x, bl_y, bl_z)[chunk_id, unroll_id] -> (th_x), domain: th_x in [0, 19], th_y in [0, 0], th_z in [0, 0], bl_x in [0, 0], bl_y in [0, 0], bl_z in [0, 0], chunk_id in [0, 0], unroll_id in [0, 0] )")); auto thread_id_to_input_indexing = loop_fusion->ComputeThreadIdToInputIndexing( 0, 0, &mlir_context_); EXPECT_THAT( ToString(*thread_id_to_input_indexing, dim_names, range_names, {}), MatchIndexingString(R"( (th_x, th_y, th_z, bl_x, bl_y, bl_z)[chunk_id, unroll_id] -> (th_x), domain: th_x in [0, 19], th_y in [0, 0], th_z in [0, 0], bl_x in [0, 0], bl_y in [0, 0], bl_z in [0, 0], chunk_id in [0, 0], unroll_id in [0, 0] )")); } TEST_F(LoopTest, Broadcast) { auto module = ParseAndReturnVerifiedModule(R"( HloModule module bcast { %input = f32[20] parameter(0) ROOT bcast = f32[10, 20, 30] broadcast(%input), dimensions={1} } ENTRY entry { %input = f32[20] parameter(0) ROOT %fusion = f32[10, 20, 30] fusion(%input), kind=kLoop, calls=bcast })") .value(); auto* root = module->entry_computation()->root_instruction(); auto analysis = HloFusionAnalysis::Create(*root, device_info_); TF_ASSERT_OK_AND_ASSIGN(auto loop_fusion, GetFusion(analysis)); auto thread_id_to_output_indexing = loop_fusion->ComputeThreadIdToOutputIndexing(0, &mlir_context_); mlir::SmallVector<std::string> dim_names = {"th_x", "th_y", "th_z", "bl_x", "bl_y", "bl_z"}; mlir::SmallVector<std::string> range_names = {"chunk_id", "unroll_id"}; EXPECT_THAT( ToString(*thread_id_to_output_indexing, dim_names, range_names, {}), MatchIndexingString(R"( (th_x, th_y, th_z, bl_x, bl_y, bl_z)[chunk_id, unroll_id] -> ( (bl_x * 128 + th_x) floordiv 600, ((bl_x * 128 + th_x) floordiv 30) mod 20, (bl_x * 128 + th_x) mod 30), domain: th_x in [0, 127], th_y in [0, 0], th_z in [0, 0], bl_x in [0, 46], bl_y in [0, 0], bl_z in [0, 0], chunk_id in [0, 0], unroll_id in [0, 0], bl_x * 128 + th_x in [0, 5999] )")); auto thread_id_to_input_indexing = loop_fusion->ComputeThreadIdToInputIndexing( 0, 0, &mlir_context_); EXPECT_THAT( ToString(*thread_id_to_input_indexing, dim_names, range_names, {}), MatchIndexingString(R"( (th_x, th_y, th_z, bl_x, bl_y, bl_z)[chunk_id, unroll_id] -> (((bl_x * 128 + th_x) floordiv 30) mod 20), domain: th_x in [0, 127], th_y in [0, 0], th_z in [0, 0], bl_x in [0, 46], bl_y in [0, 0], bl_z in [0, 0], chunk_id in [0, 0], unroll_id in [0, 0], bl_x * 128 + th_x in [0, 5999] )")); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/legacy/loop.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/legacy/loop_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
06a605f6-b2f0-43a5-adfd-11795f4448f4
cpp
tensorflow/tensorflow
scatter
tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/scatter.cc
third_party/xla/xla/service/gpu/fusions/legacy/scatter_test.cc
#include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/scatter.h" #include <cstdint> #include <type_traits> #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/Sequence.h" #include "llvm/ADT/SmallVector.h" #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/BuiltinTypeInterfaces.h" #include "mlir/IR/ImplicitLocOpBuilder.h" #include "mlir/IR/Operation.h" #include "mlir/IR/ValueRange.h" #include "mlir/Support/LLVM.h" #include "mlir/Support/LogicalResult.h" #include "mlir/Transforms/DialectConversion.h" #include "tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/util.h" #include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h" #include "xla/mlir_hlo/mhlo/IR/hlo_ops.h" namespace mlir { namespace odml { LogicalResult CanonicalizeScatterUpdates( Operation* scatter_op, llvm::ArrayRef<int64_t> update_window_dims, const Value& indices, const ShapedType& indices_type, Value& updates, ShapedType& updates_type, ConversionPatternRewriter& rewriter) { auto canonical_update_window_dims = llvm::to_vector( llvm::seq<int64_t>(indices_type.getRank() - 1, updates_type.getRank())); if (canonical_update_window_dims == update_window_dims) return success(); if (!IsIotaAttr(update_window_dims, update_window_dims.size())) return rewriter.notifyMatchFailure( scatter_op, "update_window_dims are not leading or trailing indices"); SmallVector<int64_t, 4> permutation_array(updates_type.getRank()); int64_t dim = 0; const auto permutation_array_size = permutation_array.size(); for (int64_t i = update_window_dims.size(); i < permutation_array_size; ++i) { permutation_array[i] = dim; ++dim; } for (int64_t i = 0; i < update_window_dims.size(); ++i) { permutation_array[i] = dim; ++dim; } auto permutation_and_shape = GetPermutationAndTransposedShape( permutation_array, updates_type, rewriter); auto transposed_updates = rewriter.create<mhlo::TransposeOp>( scatter_op->getLoc(), permutation_and_shape.shape, updates, permutation_and_shape.permutation); updates = transposed_updates; updates_type = permutation_and_shape.shape; return success(); } template <typename BinaryOp, typename TfOp> LogicalResult ConvertScatterOp<BinaryOp, TfOp>::matchAndRewrite( mhlo::ScatterOp scatter_op, OpAdaptor adaptor, ConversionPatternRewriter& rewriter) const { OperandRange operands = scatter_op.getInputs(); Value indices = scatter_op.getScatterIndices(); OperandRange updates = scatter_op.getUpdates(); if (operands.size() != 1 || updates.size() != 1) return failure(); ShapedType operand_type = mlir::cast<ShapedType>(operands[0].getType()); ShapedType indices_type = mlir::cast<ShapedType>(indices.getType()); ShapedType updates_type = mlir::cast<ShapedType>(updates[0].getType()); Value new_updates = updates[0]; if (!operand_type.hasStaticShape() || !indices_type.hasStaticShape() || !updates_type.hasStaticShape()) { return failure(); } if (failed(MatchBinaryReduceFunction<BinaryOp>( scatter_op.getUpdateComputation()))) { return failure(); } auto scatter_dimension_numbers = scatter_op.getScatterDimensionNumbers(); int64_t index_vector_dim = scatter_dimension_numbers.getIndexVectorDim(); if (failed(NormalizeIndexVector(scatter_op, indices, indices_type, index_vector_dim, rewriter))) { return failure(); } auto update_window_dims = scatter_dimension_numbers.getUpdateWindowDims(); if (failed(CanonicalizeScatterUpdates(scatter_op, update_window_dims, indices, indices_type, new_updates, updates_type, rewriter))) { return failure(); } auto inserted_window_dims = scatter_dimension_numbers.getInsertedWindowDims(); auto scatter_dims_to_operand_dims = scatter_dimension_numbers.getScatterDimsToOperandDims(); if (IsIotaAttr(inserted_window_dims, indices_type.getShape().back()) && IsIotaAttr(scatter_dims_to_operand_dims, indices_type.getShape().back())) { rewriter.replaceOpWithNewOp<TfOp>(scatter_op, scatter_op.getResult(0).getType(), operands[0], indices, new_updates); return success(); } if (scatter_dims_to_operand_dims != inserted_window_dims) { return rewriter.notifyMatchFailure( scatter_op, "unsupported scatter_dims_to_operand_dims"); } SmallVector<int64_t, 4> permutation_array; for (int64_t i = 0; i < scatter_dims_to_operand_dims.size(); ++i) { permutation_array.push_back(scatter_dims_to_operand_dims[i]); } for (int64_t i = 0; i < operand_type.getRank(); ++i) { if (!llvm::is_contained(scatter_dims_to_operand_dims, i)) { permutation_array.push_back(i); } } auto permutation_and_shape = GetPermutationAndTransposedShape( permutation_array, operand_type, rewriter); Location loc = scatter_op.getLoc(); auto transposed_operand = rewriter.create<mhlo::TransposeOp>( loc, permutation_and_shape.shape, operands[0], permutation_and_shape.permutation); Value new_indices = indices; int64_t index_depth = permutation_and_shape.shape.getRank() - inserted_window_dims.size(); int64_t num_updates = indices_type.getDimSize(0); if (std::is_same<TfOp, TF::TensorScatterUpdateOp>::value && indices_type.getRank() == 1 && updates_type.getRank() == 1 && index_depth == 1 && num_updates == 1) { ImplicitLocOpBuilder builder(loc, rewriter); auto indices_shape = BuildIntArrayConstOp( builder, rewriter, llvm::SmallVector<int64_t>({num_updates, index_depth}), rewriter.getI32Type()); new_indices = rewriter.create<TF::ReshapeOp>( loc, RankedTensorType::get({num_updates, index_depth}, indices_type.getElementType()), indices, indices_shape); auto updates_shape = BuildIntArrayConstOp( builder, rewriter, llvm::SmallVector<int64_t>({num_updates, updates_type.getDimSize(0)}), rewriter.getI32Type()); new_updates = rewriter.create<TF::ReshapeOp>( loc, RankedTensorType::get({1, updates_type.getDimSize(0)}, updates_type.getElementType()), new_updates, updates_shape); } auto tf_scatter_op = rewriter.create<TfOp>(loc, permutation_and_shape.shape, transposed_operand, new_indices, new_updates); auto inverse_permutation = GetInversePermutation(permutation_array, rewriter); rewriter.replaceOpWithNewOp<mhlo::TransposeOp>( scatter_op, scatter_op.getResult(0).getType(), tf_scatter_op, inverse_permutation); return success(); } } }
#include "xla/service/gpu/fusions/legacy/scatter.h" #include <optional> #include <string> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "mlir/IR/MLIRContext.h" #include "xla/service/gpu/fusions/fusions.h" #include "xla/service/gpu/gpu_device_info_for_tests.h" #include "xla/service/gpu/hlo_fusion_analysis.h" #include "xla/service/gpu/model/indexing_map_serialization.h" #include "xla/service/gpu/model/indexing_test_utils.h" #include "xla/stream_executor/device_description.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { class ScatterFusionTest : public HloTestBase { DebugOptions GetDebugOptionsForTest() override { auto opts = HloTestBase::GetDebugOptionsForTest(); opts.set_xla_gpu_mlir_emitter_level(0); return opts; } protected: mlir::MLIRContext mlir_context_; }; TEST_F(ScatterFusionTest, ScatterFusion) { auto module = ParseAndReturnVerifiedModule(R"( HloModule module add (lhs: f32[], rhs: f32[]) -> f32[] { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT sum = f32[] add(lhs, rhs) } fused_computation { %input = f32[2,9] parameter(0) %indices = s32[3] parameter(1) %updates = f32[3,9] parameter(2) ROOT %scatter = f32[2,9] scatter(%input, %indices, %updates), to_apply=add, update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1 } ENTRY entry { %input = f32[2,9] parameter(0) %indices = s32[3] parameter(1) %updates = f32[3,9] parameter(2) ROOT %fusion = f32[2,9] fusion(%input, %indices, %updates), kind=kLoop, calls=fused_computation })") .value(); stream_executor::DeviceDescription device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo(); auto* root = module->entry_computation()->root_instruction(); auto analysis_fused = HloFusionAnalysis::Create(*root, device_info); auto emitter = GetFusionEmitter(PreBufferAssignmentFusionInfo{analysis_fused}); auto scatter_fusion = dynamic_cast<ScatterFusion*>(emitter.get()); ASSERT_NE(scatter_fusion, nullptr); EXPECT_EQ(scatter_fusion->launch_dimensions().launch_bound(), 3 * 9 ); } TEST_F(ScatterFusionTest, ThreadIdIndexing) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"( HloModule module computation { %p0 = f32[] parameter(0) %p1 = f32[] parameter(1) %p2 = f32[] parameter(2) %p3 = f32[] parameter(3) ROOT %tuple = (f32[], f32[]) tuple(f32[] %p2, f32[] %p3) } scatter { %operand0 = f32[300,200] parameter(0) %operand1 = f32[300,200] parameter(1) %indices = s32[42,1] parameter(2) %update.1 = f32[42,10,20] parameter(3) %update.2 = f32[42,10,20]parameter(4) ROOT %scatter = (f32[300,200], f32[300,200]) scatter( f32[300,200] %operand0, f32[300,200] %operand1, s32[42,1] %indices, f32[42,10,20] %update.1, f32[42,10,20] %update.2 ), update_window_dims={1,2}, inserted_window_dims={}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=computation } ENTRY entry { %operand0 = f32[300,200] parameter(0) %operand1 = f32[300,200] parameter(1) %indices = s32[42,1] parameter(2) %update.1 = f32[42,10,20] parameter(3) %update.2 = f32[42,10,20]parameter(4) ROOT %fusion = (f32[300,200], f32[300,200]) fusion( %operand0, %operand1, %indices, %update.1, %update.2), kind=kLoop, calls=scatter } )")); stream_executor::DeviceDescription device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo(); auto* root = module->entry_computation()->root_instruction(); auto analysis_fused = HloFusionAnalysis::Create(*root, device_info); auto emitter = GetFusionEmitter(PreBufferAssignmentFusionInfo{analysis_fused}); auto fusion = dynamic_cast<ScatterFusion*>(emitter.get()); ASSERT_NE(fusion, nullptr); constexpr auto kUpdatesIndexing = R"( (th_x, th_y, th_z, bl_x, bl_y, bl_z)[chunk_id, unroll_id] -> ( (bl_x * 128 + th_x) floordiv 200, ((bl_x * 128 + th_x) floordiv 20) mod 10, (bl_x * 128 + th_x) mod 20 ), domain: th_x in [0, 127], th_y in [0, 0], th_z in [0, 0], bl_x in [0, 65], bl_y in [0, 0], bl_z in [0, 0], chunk_id in [0, 0], unroll_id in [0, 0], bl_x * 128 + th_x in [0, 8399] )"; mlir::SmallVector<std::string> dim_names = {"th_x", "th_y", "th_z", "bl_x", "bl_y", "bl_z"}; mlir::SmallVector<std::string> range_names = {"chunk_id", "unroll_id"}; EXPECT_THAT( ToString(*fusion->ComputeThreadIdToInputIndexing( 0, 3, &mlir_context_), dim_names, range_names, {}), MatchIndexingString(kUpdatesIndexing)); EXPECT_THAT( ToString(*fusion->ComputeThreadIdToInputIndexing( 0, 4, &mlir_context_), dim_names, range_names, {}), MatchIndexingString(kUpdatesIndexing)); EXPECT_THAT( ToString(*fusion->ComputeThreadIdToInputIndexing( 1, 3, &mlir_context_), dim_names, range_names, {}), MatchIndexingString(kUpdatesIndexing)); EXPECT_THAT( ToString(*fusion->ComputeThreadIdToInputIndexing( 1, 4, &mlir_context_), dim_names, range_names, {}), MatchIndexingString(kUpdatesIndexing)); range_names.push_back("index_id"); constexpr auto kIndicesIndexing = R"( (th_x, th_y, th_z, bl_x, bl_y, bl_z)[chunk_id, unroll_id, index_id] -> ((bl_x * 128 + th_x) floordiv 200, 0), domain: th_x in [0, 127], th_y in [0, 0], th_z in [0, 0], bl_x in [0, 65], bl_y in [0, 0], bl_z in [0, 0], chunk_id in [0, 0], unroll_id in [0, 0], index_id in [0, 0], bl_x * 128 + th_x in [0, 8399] )"; EXPECT_THAT( ToString(*fusion->ComputeThreadIdToInputIndexing( 0, 2, &mlir_context_), dim_names, range_names, {}), MatchIndexingString(kIndicesIndexing)); EXPECT_THAT( ToString(*fusion->ComputeThreadIdToInputIndexing( 1, 2, &mlir_context_), dim_names, range_names, {}), MatchIndexingString(kIndicesIndexing)); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/compiler/mlir/lite/stablehlo/transforms/legalize_hlo_conversions/scatter.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/legacy/scatter_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
8eeae95c-0b1a-4694-a63f-32376624902a
cpp
tensorflow/tensorflow
concatenate
tensorflow/lite/experimental/shlo/legacy/src/concatenate.cc
tensorflow/lite/experimental/shlo/legacy/test/concatenate_test.cc
#include <algorithm> #include <cstddef> #include <type_traits> #include "absl/status/status.h" #include "absl/types/span.h" #include "tensorflow/lite/experimental/shlo/legacy/include/shlo.h" #include "tensorflow/lite/experimental/shlo/legacy/src/dispatch.h" #include "tensorflow/lite/experimental/shlo/legacy/src/storage.h" #include "tensorflow/lite/experimental/shlo/legacy/src/util.h" namespace stablehlo { namespace { template <typename Value> absl::Status CheckParameters(absl::Span<const Value*> inputs, DimensionSize dimension, Value& result) { for (auto i = 1; i < inputs.size(); ++i) { if (!(inputs[i]->element_type() == inputs[0]->element_type())) { return absl::InvalidArgumentError( "Constraint violation: same(element_type(inputs...))"); } } for (size_t ax = 0; ax < inputs[0]->rank(); ++ax) { if (ax != dimension) { for (auto i = 1; i < inputs.size(); ++i) { if (!(inputs[i]->dim(ax) == inputs[0]->dim(ax))) { return absl::InvalidArgumentError( "Constraint violation: same(shape(inputs...)) except for " "dim(inputs..., dimension)"); } } } } if (inputs.empty()) { return absl::InvalidArgumentError("Constraint violation: 0 < size(inputs)"); } else if (!(dimension >= 0 && dimension < inputs[0]->rank())) { return absl::InvalidArgumentError( "Constraint violation: 0 <= dimension < rank(inputs[0])"); } else if (!(result.element_type() == inputs[0]->element_type())) { return absl::InvalidArgumentError( "Constraint violation: element_type(result) = element_type(inputs[0])"); } else { for (size_t ax = 0; ax < result.rank(); ++ax) { DimensionSize expected_dim_size = 0; if (ax == dimension) { for (auto* x : inputs) { expected_dim_size += x->dim(ax); } } else { expected_dim_size = inputs[0]->dim(ax); } if (!(result.dim(ax) == expected_dim_size)) { return absl::InvalidArgumentError( "Constraint violation: element_type(result) = " "element_type(inputs[0])"); } } } if (std::any_of(inputs.begin(), inputs.end(), [](auto* x) { return x->layout().has_strides(); }) || result.layout().has_strides()) { return absl::InvalidArgumentError("Stides not supported yet"); } return absl::OkStatus(); } template <ElementType storage_type, ElementType expressed_type, typename Value> absl::Status Concatenate(absl::Span<const Value*> inputs, DimensionSize dimension, Value& result) { if (auto check = CheckParameters(inputs, dimension, result); !check.ok()) { return check; } using S = Storage<storage_type>; auto result_buffer = result.buffer(); if constexpr (std::is_same_v<Value, Tensor>) { if (storage_type != result.element_type()) { return absl::InvalidArgumentError("Unexpected tensor element type"); } DimensionSize dimension_offset = 0; TensorIndex result_index(result.shape()); for (auto* input : inputs) { auto input_buffer = input->buffer(); for (TensorIndexIterator input_iter{input->shape()}; input_iter.has_next(); ++input_iter) { const TensorIndex& input_index = *input_iter; result_index.set(input_index); auto new_dim_size = result_index[dimension] + dimension_offset; result_index.set(dimension, new_dim_size); auto linearized_input_index = input_index.linearize(); auto linearized_result_index = result_index.linearize(); auto value = S::Get(input_buffer, linearized_input_index); S::Set(result_buffer, linearized_result_index, value); } dimension_offset += input->dim(dimension); } } else { static_assert(std::is_same_v<Value, QuantizedTensor>); if (storage_type != result.storage_type()) { return absl::InvalidArgumentError("Unexpected storage type"); } else if (expressed_type != result.expressed_type()) { return absl::InvalidArgumentError("Unexpected expressed type"); } using ET = typename Storage<expressed_type>::Type; const QuantizedParameter& result_quant_param = result.type().element_type().parameters(0); ET result_scale_inv = ET(1.0) / static_cast<ET>(result_quant_param.scale); DimensionSize dimension_offset = 0; TensorIndex result_index(result.shape()); for (auto* input : inputs) { auto input_buffer = input->buffer(); const QuantizedParameter& input_quant_param = input->type().element_type().parameters(0); for (TensorIndexIterator input_iter{input->shape()}; input_iter.has_next(); ++input_iter) { const TensorIndex& input_index = *input_iter; result_index.set(input_index); auto new_dim_size = result_index[dimension] + dimension_offset; result_index.set(dimension, new_dim_size); auto linearized_input_index = input_index.linearize(); auto linearized_result_index = result_index.linearize(); auto input_storage = S::Get(input_buffer, linearized_input_index); auto result_storage = DequantizeOpQuantizePartial<storage_type, expressed_type>( input_storage, input_quant_param, result_scale_inv, result_quant_param.zero_point, [](auto x) { return x; }); S::Set(result_buffer, linearized_result_index, result_storage); } dimension_offset += input->dim(dimension); } if (auto status = CompleteQuantization<storage_type>(result); !status.ok()) { return status; } } return absl::OkStatus(); } } absl::Status Concatenate(absl::Span<const Tensor*> inputs, DimensionSize dimension, Tensor& result) { DISPATCH_BOOL_INT_FLOAT(Concatenate, result.element_type(), inputs, dimension, result); } absl::Status Concatenate(absl::Span<const QuantizedTensor*> inputs, DimensionSize dimension, QuantizedTensor& result) { DISPATCH_QUANTIZED(Concatenate, result.storage_type(), result.expressed_type(), inputs, dimension, result); } }
#include <cstddef> #include <initializer_list> #include <sstream> #include <string> #include <string_view> #include <utility> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/types/span.h" #include "tensorflow/lite/experimental/shlo/legacy/include/shlo.h" #include "tensorflow/lite/experimental/shlo/legacy/src/debug.h" #include "tensorflow/lite/experimental/shlo/legacy/src/storage.h" #include "tensorflow/lite/experimental/shlo/legacy/test/util.h" namespace stablehlo { namespace testing { template <ElementType element_type> struct TensorConst { std::initializer_list<DimensionSize>&& shape; std::vector<typename Storage<element_type>::Type>&& values; }; template <typename T> std::string ToString(std::string_view name, const std::vector<const T*>& tensors) { std::ostringstream result; for (size_t i = 0; i < tensors.size(); ++i) { result << name << "[" << i << "]: " << *tensors[i] << "\n"; } return result.str(); } template <ElementType element_type> void test(std::initializer_list<TensorConst<element_type>>&& inputs_, DimensionSize dimension, TensorConst<element_type>&& expected_) { std::vector<Tensor> inputs_storage; for (auto& x : inputs_) { inputs_storage.emplace_back( Tensor(TensorType(Shape(x.shape), element_type), x.values.data())); } std::vector<const Tensor*> inputs; for (auto& x : inputs_storage) { inputs.push_back(&x); } Tensor expected(TensorType(Shape(expected_.shape), element_type), expected_.values.data()); std::vector<typename Storage<element_type>::Type> result_values( expected.num_elements()); Tensor result(TensorType(expected.type()), result_values.data()); ASSERT_OK(Concatenate(absl::Span<const Tensor*>(inputs), dimension, result)); EXPECT_EQ(result, expected) << ToString("inputs", inputs) << "dimension: " << dimension; } template <ElementType storage_type, ElementType expressed_type> void test(QuantizedParameter&& quantized_parameter, std::initializer_list<TensorConst<expressed_type>>&& inputs_, DimensionSize dimension, TensorConst<expressed_type>&& expected_) { std::vector<std::vector<typename Storage<storage_type>::Type>> inputs_storage; std::vector<QuantizedTensor> input_tensors; std::vector<const QuantizedTensor*> inputs; for (auto& x : inputs_) { inputs_storage.emplace_back(QuantizeVector<storage_type, expressed_type>( x.values, quantized_parameter)); QuantizedTensorElementType element_type( storage_type, expressed_type, QuantizedParameter(quantized_parameter)); QuantizedTensorType type(Shape(x.shape), std::move(element_type)); input_tensors.emplace_back( QuantizedTensor(std::move(type), inputs_storage.back().data())); } for (auto& t : input_tensors) { inputs.push_back(&t); } auto quantized_expected_values = QuantizeVector<storage_type, expressed_type>( expected_.values, quantized_parameter); QuantizedTensor expected( QuantizedTensorType( Shape(expected_.shape), QuantizedTensorElementType(storage_type, expressed_type, QuantizedParameter(quantized_parameter))), quantized_expected_values.data()); std::vector<typename Storage<storage_type>::Type> result_values( expected.num_elements()); QuantizedTensor result(QuantizedTensorType(expected.type()), result_values.data()); ASSERT_OK(Concatenate(absl::Span<const QuantizedTensor*>(inputs), dimension, result)); EXPECT_EQ(result, expected) << ToString("inputs", inputs) << "dimension: " << dimension; } TEST(Concatenate, Unquantized) { test<ElementType::kI1>( {{{3, 2}, {true, false, true, false, true, false}}, {{1, 2}, {false, true}}}, 0, {{4, 2}, {true, false, true, false, true, false, false, true}}); test<ElementType::kSI8>({{{3, 2}, {1, 2, 3, 4, 5, 6}}, {{1, 2}, {7, 8}}}, 0, {{4, 2}, {1, 2, 3, 4, 5, 6, 7, 8}}); test<ElementType::kSI16>({{{3, 2}, {1, 2, 3, 4, 5, 6}}, {{1, 2}, {7, 8}}}, 0, {{4, 2}, {1, 2, 3, 4, 5, 6, 7, 8}}); test<ElementType::kSI32>({{{3, 2}, {1, 2, 3, 4, 5, 6}}, {{1, 2}, {7, 8}}}, 0, {{4, 2}, {1, 2, 3, 4, 5, 6, 7, 8}}); test<ElementType::kBF16>({{{3, 2}, {1, 2, 3, 4, 5, 6}}, {{1, 2}, {7, 8}}}, 0, {{4, 2}, {1, 2, 3, 4, 5, 6, 7, 8}}); test<ElementType::kF16>({{{3, 2}, {1, 2, 3, 4, 5, 6}}, {{1, 2}, {7, 8}}}, 0, {{4, 2}, {1, 2, 3, 4, 5, 6, 7, 8}}); test<ElementType::kF32>({{{3, 2}, {1, 2, 3, 4, 5, 6}}, {{1, 2}, {7, 8}}}, 0, {{4, 2}, {1, 2, 3, 4, 5, 6, 7, 8}}); test<ElementType::kI1>( {{{2, 3}, {true, false, true, false, true, false}}, {{2, 1}, {true, false}}}, 1, {{2, 4}, {true, false, true, true, false, true, false, false}}); test<ElementType::kSI8>({{{2, 3}, {1, 2, 3, 4, 5, 6}}, {{2, 1}, {7, 8}}}, 1, {{2, 4}, {1, 2, 3, 7, 4, 5, 6, 8}}); test<ElementType::kSI16>({{{2, 3}, {1, 2, 3, 4, 5, 6}}, {{2, 1}, {7, 8}}}, 1, {{2, 4}, {1, 2, 3, 7, 4, 5, 6, 8}}); test<ElementType::kSI32>({{{2, 3}, {1, 2, 3, 4, 5, 6}}, {{2, 1}, {7, 8}}}, 1, {{2, 4}, {1, 2, 3, 7, 4, 5, 6, 8}}); test<ElementType::kBF16>({{{2, 3}, {1, 2, 3, 4, 5, 6}}, {{2, 1}, {7, 8}}}, 1, {{2, 4}, {1, 2, 3, 7, 4, 5, 6, 8}}); test<ElementType::kF16>({{{2, 3}, {1, 2, 3, 4, 5, 6}}, {{2, 1}, {7, 8}}}, 1, {{2, 4}, {1, 2, 3, 7, 4, 5, 6, 8}}); test<ElementType::kF32>({{{2, 3}, {1, 2, 3, 4, 5, 6}}, {{2, 1}, {7, 8}}}, 1, {{2, 4}, {1, 2, 3, 7, 4, 5, 6, 8}}); } TEST(Concatenate, Quantized) { test<ElementType::kSI8, ElementType::kBF16>( {.scale = 0.1, .zero_point = 0}, {{{3, 2}, {1, 2, 3, 4, 5, 6}}, {{1, 2}, {7, 8}}}, 0, {{4, 2}, {1, 2, 3, 4, 5, 6, 7, 8}}); test<ElementType::kSI8, ElementType::kF16>( {.scale = 0.1, .zero_point = 0}, {{{3, 2}, {1, 2, 3, 4, 5, 6}}, {{1, 2}, {7, 8}}}, 0, {{4, 2}, {1, 2, 3, 4, 5, 6, 7, 8}}); test<ElementType::kSI8, ElementType::kF32>( {.scale = 0.1, .zero_point = 0}, {{{3, 2}, {1, 2, 3, 4, 5, 6}}, {{1, 2}, {7, 8}}}, 0, {{4, 2}, {1, 2, 3, 4, 5, 6, 7, 8}}); test<ElementType::kSI16, ElementType::kBF16>( {.scale = 0.1, .zero_point = 0}, {{{3, 2}, {1, 2, 3, 4, 5, 6}}, {{1, 2}, {7, 8}}}, 0, {{4, 2}, {1, 2, 3, 4, 5, 6, 7, 8}}); test<ElementType::kSI16, ElementType::kF16>( {.scale = 0.1, .zero_point = 0}, {{{3, 2}, {1, 2, 3, 4, 5, 6}}, {{1, 2}, {7, 8}}}, 0, {{4, 2}, {1, 2, 3, 4, 5, 6, 7, 8}}); test<ElementType::kSI16, ElementType::kF32>( {.scale = 0.1, .zero_point = 0}, {{{3, 2}, {1, 2, 3, 4, 5, 6}}, {{1, 2}, {7, 8}}}, 0, {{4, 2}, {1, 2, 3, 4, 5, 6, 7, 8}}); test<ElementType::kSI32, ElementType::kBF16>( {.scale = 0.1, .zero_point = 0}, {{{3, 2}, {1, 2, 3, 4, 5, 6}}, {{1, 2}, {7, 8}}}, 0, {{4, 2}, {1, 2, 3, 4, 5, 6, 7, 8}}); test<ElementType::kSI32, ElementType::kF16>( {.scale = 0.1, .zero_point = 0}, {{{3, 2}, {1, 2, 3, 4, 5, 6}}, {{1, 2}, {7, 8}}}, 0, {{4, 2}, {1, 2, 3, 4, 5, 6, 7, 8}}); test<ElementType::kSI32, ElementType::kF32>( {.scale = 0.1, .zero_point = 0}, {{{3, 2}, {1, 2, 3, 4, 5, 6}}, {{1, 2}, {7, 8}}}, 0, {{4, 2}, {1, 2, 3, 4, 5, 6, 7, 8}}); test<ElementType::kSI8, ElementType::kBF16>( {.scale = 0.1, .zero_point = 0}, {{{2, 3}, {1, 2, 3, 4, 5, 6}}, {{2, 1}, {7, 8}}}, 1, {{2, 4}, {1, 2, 3, 7, 4, 5, 6, 8}}); test<ElementType::kSI8, ElementType::kF16>( {.scale = 0.1, .zero_point = 0}, {{{2, 3}, {1, 2, 3, 4, 5, 6}}, {{2, 1}, {7, 8}}}, 1, {{2, 4}, {1, 2, 3, 7, 4, 5, 6, 8}}); test<ElementType::kSI8, ElementType::kF32>( {.scale = 0.1, .zero_point = 0}, {{{2, 3}, {1, 2, 3, 4, 5, 6}}, {{2, 1}, {7, 8}}}, 1, {{2, 4}, {1, 2, 3, 7, 4, 5, 6, 8}}); test<ElementType::kSI16, ElementType::kBF16>( {.scale = 0.1, .zero_point = 0}, {{{2, 3}, {1, 2, 3, 4, 5, 6}}, {{2, 1}, {7, 8}}}, 1, {{2, 4}, {1, 2, 3, 7, 4, 5, 6, 8}}); test<ElementType::kSI16, ElementType::kF16>( {.scale = 0.1, .zero_point = 0}, {{{2, 3}, {1, 2, 3, 4, 5, 6}}, {{2, 1}, {7, 8}}}, 1, {{2, 4}, {1, 2, 3, 7, 4, 5, 6, 8}}); test<ElementType::kSI16, ElementType::kF32>( {.scale = 0.1, .zero_point = 0}, {{{2, 3}, {1, 2, 3, 4, 5, 6}}, {{2, 1}, {7, 8}}}, 1, {{2, 4}, {1, 2, 3, 7, 4, 5, 6, 8}}); test<ElementType::kSI32, ElementType::kBF16>( {.scale = 0.1, .zero_point = 0}, {{{2, 3}, {1, 2, 3, 4, 5, 6}}, {{2, 1}, {7, 8}}}, 1, {{2, 4}, {1, 2, 3, 7, 4, 5, 6, 8}}); test<ElementType::kSI32, ElementType::kF16>( {.scale = 0.1, .zero_point = 0}, {{{2, 3}, {1, 2, 3, 4, 5, 6}}, {{2, 1}, {7, 8}}}, 1, {{2, 4}, {1, 2, 3, 7, 4, 5, 6, 8}}); test<ElementType::kSI32, ElementType::kF32>( {.scale = 0.1, .zero_point = 0}, {{{2, 3}, {1, 2, 3, 4, 5, 6}}, {{2, 1}, {7, 8}}}, 1, {{2, 4}, {1, 2, 3, 7, 4, 5, 6, 8}}); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/legacy/src/concatenate.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/tensorflow/lite/experimental/shlo/legacy/test/concatenate_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
a820757e-063a-4b9d-98ba-efa13d24715e
cpp
tensorflow/tensorflow
reduction
third_party/xla/xla/service/gpu/fusions/legacy/reduction.cc
third_party/xla/xla/service/gpu/fusions/legacy/reduction_test.cc
#include "xla/service/gpu/fusions/legacy/reduction.h" #include <cstdint> #include <functional> #include <memory> #include <optional> #include <string> #include <tuple> #include <utility> #include <vector> #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/container/inlined_vector.h" #include "absl/container/node_hash_map.h" #include "absl/log/check.h" #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/Twine.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/GlobalVariable.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/Type.h" #include "llvm/IR/Value.h" #include "llvm/Support/AtomicOrdering.h" #include "llvm/Support/Casting.h" #include "mlir/Support/LLVM.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/layout_util.h" #include "xla/service/buffer_assignment.h" #include "xla/service/gpu/elemental_ir_emitter.h" #include "xla/service/gpu/fusions/fusion_emitter.h" #include "xla/service/gpu/fusions/legacy/tiling_util.h" #include "xla/service/gpu/fusions/reduction_base.h" #include "xla/service/gpu/fusions/thunk_util.h" #include "xla/service/gpu/hlo_fusion_analysis.h" #include "xla/service/gpu/hlo_traversal.h" #include "xla/service/gpu/ir_emission_utils.h" #include "xla/service/gpu/ir_emitter_context.h" #include "xla/service/gpu/ir_emitter_nested.h" #include "xla/service/gpu/kernel_arguments.h" #include "xla/service/gpu/kernel_reuse_cache.h" #include "xla/service/gpu/launch_dimensions.h" #include "xla/service/gpu/model/indexing_map.h" #include "xla/service/gpu/parallel_loop_emitter.h" #include "xla/service/gpu/reduction_utils.h" #include "xla/service/gpu/runtime/kernel_thunk.h" #include "xla/service/gpu/runtime/thunk.h" #include "xla/service/gpu/target_util.h" #include "xla/service/llvm_ir/fused_ir_emitter.h" #include "xla/service/llvm_ir/ir_array.h" #include "xla/service/llvm_ir/kernel_support_library.h" #include "xla/service/llvm_ir/llvm_loop.h" #include "xla/service/llvm_ir/llvm_util.h" #include "xla/service/llvm_ir/loop_emitter.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/status_macros.h" #include "xla/stream_executor/device_description.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" #include "tsl/platform/status.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { using TypedPointer = std::pair<llvm::Value* const, llvm::Type* const>; using ReductionOutputMap = ConstHloInstructionMap<absl::Span<llvm_ir::IrArray const>>; using ExtraOutputGensMap = ConstHloInstructionMap<llvm_ir::ElementGenerator>; int GetNumOutputs(const Shape& shape) { if (shape.IsTuple()) { return shape.tuple_shapes_size(); } return 1; } const Shape& OutputShape(const Shape& output_shape, int output_index) { CHECK(output_index == 0 || output_shape.IsTuple()); return output_shape.IsTuple() ? output_shape.tuple_shapes(output_index) : output_shape; } llvm::Type* GetIndexType(const HloFusionInstruction& fusion, const Tiling& tiling, llvm::IRBuilder<>* builder) { return GetIndexTypeForKernel( &fusion, tiling.GetNumThreadsPerBlock() * tiling.GetNumBlocks(), builder); } llvm::Value* CastSharedToGlobal(llvm::IRBuilder<>* builder, llvm::Value* input, llvm::Type* element_type, llvm::Twine name) { return builder->CreateAddrSpaceCast( input, llvm::PointerType::get(element_type, 0), name); } class ReductionEmitter { public: ReductionEmitter(const HloFusionAnalysis& analysis, const ReductionInfo& reduction_codegen_info, IrEmitterContext& ir_emitter_context, const HloFusionInstruction& fusion, llvm::IRBuilder<>* builder) : builder_(builder), elemental_emitter_(ir_emitter_context, builder_), analysis_(analysis), reduction_codegen_info_(reduction_codegen_info), ir_emitter_context_(ir_emitter_context), fusion_(fusion), index_ty_(GetIndexType(fusion, reduction_codegen_info.GetTiling(), elemental_emitter_.builder())) { for (auto hero : analysis.fusion_heroes()) { if (hero.opcode() == HloOpcode::kReduce) { for (int i = 0; i < hero.instruction().operand_count() / 2; ++i) { CHECK(LayoutUtil::IsMonotonicWithDim0Major( hero.instruction().operand(i)->shape().layout())) << "reduction-layout-normalizer must run before code generation"; } } } } absl::StatusOr<FusionEmissionResult> EmitInitializers(); absl::Status EmitKernel(const LaunchDimensions& launch_dims, std::vector<llvm_ir::IrArray> inputs, std::vector<llvm_ir::IrArray> outputs); private: friend class ReductionGroupEmitter; absl::StatusOr<std::unique_ptr<Thunk>> BuildKernelThunkForFusion( const LaunchDimensions& launch_dimensions, absl::string_view discriminator, std::function<absl::Status(std::vector<llvm_ir::IrArray>, std::vector<llvm_ir::IrArray>)> kernel_builder_fn); absl::StatusOr<std::unique_ptr<Thunk>> BuildFusedInitializerThunk( const HloInstruction* fusion_root, BufferAllocation::Slice dest_slice, int output_index); absl::Status EmitIRForReduction( absl::Span<const HloInstruction* const> instr_index_group, FusedIrEmitter& fused_emitter, const ReductionOutputMap& result_ir_arrays, const Shape& input_shape); void MaybeEmitFenceForAMDGPU(); void EmitSyncThreads(); int ReducedDimensionSize() const { return reduction_codegen_info_.GetTiling().GetShape()[2]; } llvm::IRBuilder<>* builder_; GpuElementalIrEmitter elemental_emitter_; const HloFusionAnalysis& analysis_; const ReductionInfo& reduction_codegen_info_; IrEmitterContext& ir_emitter_context_; const HloFusionInstruction& fusion_; llvm::Type* index_ty_; }; class ReductionEmitter; class ReductionGroupEmitter { public: struct ReductionCalculationState { std::optional<llvm_ir::SharedMemoryTile> shared_cache; llvm::Value* initial_value; llvm::AllocaInst* partial_result_address; llvm::AllocaInst* input_address; llvm_ir::ElementGenerator input_gen; }; ReductionGroupEmitter( ReductionEmitter& reduction_emitter, absl::Span<const HloReduceInstruction* const> reduce_instr_index_group, const ReductionOutputMap& result_ir_arrays, FusedIrEmitter& fused_emitter); const ReductionCalculationState& GetCalculationStateFor( const HloInstruction* instruction, int operand_idx) const { const ReductionOpState& op_state = state_.at(instruction); CHECK_LT(operand_idx, op_state.size()); return op_state[operand_idx]; } void SetCalculationStateFor( const ReductionCalculationState& calculation_state, const HloInstruction* instruction, int operand_idx) { ReductionOpState& op_state = state_[instruction]; CHECK_EQ(operand_idx, op_state.size()); op_state.push_back(calculation_state); } void EmitReductionOutputForRowReduction( const TilingKernelInfo& tiling_kernel_info, const HloReduceInstruction* reduction, const std::vector<const HloInstruction*>& roots) const; void EmitReductionOutputForColumnReduction( const TilingKernelInfo& tiling_kernel_info, const HloReduceInstruction* reduction, const std::vector<const HloInstruction*>& roots) const; void EmitFullWarpShuffleDownLoopForReduce( const HloComputation* reducer, absl::Span<TypedPointer const> partial_result_addresses, int threads_per_block, int num_results_per_warp) const; void WriteReductionOutput(const TilingKernelInfo& tiling_kernel_info, const HloReduceInstruction* reduction, const std::vector<const HloInstruction*>& roots, absl::Span<TypedPointer const> values) const; llvm_ir::IrArray::Index GetOutputIndexForReduction( const TilingKernelInfo& tiling_kernel_info, const HloReduceInstruction* reduction, const HloInstruction* root, int output_idx) const; void GenerateElementForReducer(const HloReduceInstruction* reduction, const llvm_ir::IrArray::Index& index) const; absl::Status EmitExtraOutputsForReduce( const Shape& reduction_operand_shape, const llvm_ir::IrArray::Index& index, const ExtraOutputGensMap& extra_output_gens); private: ReductionEmitter& reduction_emitter_; const ReductionOutputMap& result_ir_arrays_; using ReductionOpState = absl::InlinedVector<ReductionCalculationState, 2>; absl::flat_hash_map<const HloInstruction*, ReductionOpState> state_; }; ReductionGroupEmitter::ReductionGroupEmitter( ReductionEmitter& reduction_emitter, absl::Span<const HloReduceInstruction* const> reduce_instr_index_group, const ReductionOutputMap& result_ir_arrays, FusedIrEmitter& fused_emitter) : reduction_emitter_(reduction_emitter), result_ir_arrays_(result_ir_arrays) { const ReductionInfo& reduction_info = reduction_emitter_.reduction_codegen_info_; VLOG(10) << "Emit prologue for reduction: " << reduction_emitter_.fusion_.ToString(); auto* builder = reduction_emitter_.builder_; for (const HloReduceInstruction* reduce_hlo : reduce_instr_index_group) { for (int op_result_idx = 0; op_result_idx < GetNumOutputs(reduce_hlo->shape()); op_result_idx++) { Shape result_shape = OutputShape(reduce_hlo->shape(), op_result_idx); llvm::Type* element_type = llvm_ir::PrimitiveTypeToIrType( result_shape.element_type(), builder->GetInsertBlock()->getModule()); llvm::AllocaInst* reduction_input_address = llvm_ir::EmitAllocaAtFunctionEntry( element_type, "reduction_input_address", builder); llvm::AllocaInst* result_address = llvm_ir::EmitAllocaAtFunctionEntry( element_type, "partial_reduction_result", builder); const HloInstruction* init_value = reduce_hlo->init_values()[op_result_idx]; llvm::Value* init_ir_value = (*fused_emitter.GetGenerator( *init_value))(llvm_ir::IrArray::Index(builder->getInt32Ty())) .value(); builder->CreateStore(init_ir_value, result_address); const Tiling& tiling = reduction_info.GetTiling(); auto shared_cache = [&]() -> std::optional<llvm_ir::SharedMemoryTile> { auto* module = reduction_emitter.ir_emitter_context_.llvm_module(); if (reduction_info.IsRowReduction()) { if (RowReductionGetRowsPerWarp( reduction_emitter_.ReducedDimensionSize()) > 1) { return std::nullopt; } auto block_size = tiling.GetThreadsPerBlock(); CHECK_EQ(block_size[ReductionDimensions::kRowMinorReducedDimension] % WarpSize(), 0); return llvm_ir::AllocateSharedMemoryTile( module, element_type, {block_size[ReductionDimensions::kRowKeptDimension], block_size[ReductionDimensions::kRowMinorReducedDimension] / WarpSize()}, "shared_cache"); } const auto& num_threads = tiling.GetThreadsPerBlock(); int n = num_threads[ReductionDimensions::kColReducedDimension]; CHECK_EQ(n, num_threads[ReductionDimensions::kColMinorKeptDimension]); return llvm_ir::AllocateSharedMemoryTile(module, element_type, {n, n + 1}, "shared_cache"); }(); llvm_ir::ElementGenerator input_gen = *fused_emitter.GetGenerator(*reduce_hlo->inputs()[op_result_idx]); SetCalculationStateFor({shared_cache, init_ir_value, result_address, reduction_input_address, input_gen}, reduce_hlo, op_result_idx); } } } void ReductionEmitter::MaybeEmitFenceForAMDGPU() { auto* module = builder_->GetInsertBlock()->getModule(); if (IsAMDGPU(module) && ir_emitter_context_.rocm_compute_capability().fence_before_barrier()) { builder_->CreateFence( llvm::AtomicOrdering::SequentiallyConsistent, builder_->getContext().getOrInsertSyncScopeID("workgroup")); } } void ReductionEmitter::EmitSyncThreads() { MaybeEmitFenceForAMDGPU(); EmitCallToTargetIntrinsic(TargetIntrinsicID::kBarrierId, {}, {}, builder_); } absl::StatusOr<std::unique_ptr<Thunk>> ReductionEmitter::BuildKernelThunkForFusion( const LaunchDimensions& launch_dimensions, absl::string_view discriminator, std::function<absl::Status(std::vector<llvm_ir::IrArray>, std::vector<llvm_ir::IrArray>)> kernel_builder_fn) { const HloComputation* fused_computation = fusion_.fused_instructions_computation(); std::string suggested_kernel_name = std::string(fusion_.name()); TF_ASSIGN_OR_RETURN(auto kernel_arguments, KernelArguments::Create( ir_emitter_context_.buffer_assignment(), &fusion_)); auto [status_or_entry, cached] = ir_emitter_context_.kernel_cache().GetWithStatus( fused_computation, kernel_arguments.args(), discriminator, [&]() -> absl::StatusOr<KernelReuseCache::Entry> { llvm::Function* kernel; std::vector<llvm_ir::IrArray> input_arrays; std::vector<llvm_ir::IrArray> output_arrays; TF_ASSIGN_OR_RETURN( std::tie(kernel, input_arrays, output_arrays), BuildKernelPrototype(ir_emitter_context_, suggested_kernel_name, kernel_arguments.args(), fusion_.operand_count(), launch_dimensions, builder_)); TF_RETURN_IF_ERROR(kernel_builder_fn(input_arrays, output_arrays)); return {{kernel->getName().str(), launch_dimensions, std::nullopt, 0}}; }); TF_ASSIGN_OR_RETURN(const KernelReuseCache::Entry* entry, status_or_entry); if (cached) { VLOG(3) << "Reuse: " << suggested_kernel_name << " -> " << entry->kernel_name; } return std::make_unique<KernelThunk>( &fusion_, entry->kernel_name, kernel_arguments.args(), launch_dimensions, entry->cluster_dim, entry->shmem_bytes); } absl::Status ReductionGroupEmitter::EmitExtraOutputsForReduce( const Shape& reduction_operand_shape, const llvm_ir::IrArray::Index& index, const ExtraOutputGensMap& extra_output_gens) { if (extra_output_gens.empty()) { return absl::OkStatus(); } auto* builder = reduction_emitter_.builder_; std::vector<std::pair<const HloInstruction*, llvm::Value*>> extra_output_ir_values; extra_output_ir_values.reserve(extra_output_gens.size()); auto get_index = [&](const HloInstruction* instr) { const Shape& s = instr->shape(); return ShapeUtil::EqualIgnoringElementType(reduction_operand_shape, s) ? index : index.SourceIndexOfBitcast(reduction_operand_shape, s, builder); }; for (const auto& [instr, generator] : extra_output_gens) { TF_ASSIGN_OR_RETURN(llvm::Value* const extra_output_ir_value, generator(get_index(instr))); extra_output_ir_values.emplace_back(instr, extra_output_ir_value); } for (const auto& [instr, generator] : extra_output_ir_values) { absl::Span<llvm_ir::IrArray const> result_ir = result_ir_arrays_.at(instr); CHECK_EQ(result_ir.size(), 1); result_ir[0].EmitWriteArrayElement(get_index(instr), generator, builder); } return absl::OkStatus(); } absl::StatusOr<std::unique_ptr<Thunk>> ReductionEmitter::BuildFusedInitializerThunk(const HloInstruction* fusion_root, BufferAllocation::Slice dest_slice, int output_index) { const HloReduceInstruction* reduce = DynCast<HloReduceInstruction>(fusion_root); TF_RET_CHECK(reduce); const HloInstruction* init_value = reduce->init_values()[0]; TF_ASSIGN_OR_RETURN( std::optional<std::unique_ptr<Thunk>> constant_init_thunk, BuildConstantInitializerThunk(ir_emitter_context_, fusion_root, init_value, dest_slice)); if (constant_init_thunk) { return *std::move(constant_init_thunk); } const Shape& dest_shape = fusion_root->shape(); LaunchDimensions launch_dimensions = CalculateLaunchDimensions( dest_shape, ir_emitter_context_.gpu_device_info()); const HloComputation* fused_computation = fusion_.fused_instructions_computation(); auto builder_fn = [&](std::vector<llvm_ir::IrArray> inputs, std::vector<llvm_ir::IrArray> outputs) -> absl::Status { FusedIrEmitter fused_emitter(elemental_emitter_); for (int i = 0; i < fused_computation->num_parameters(); i++) { fused_emitter.BindGenerator( *fused_computation->parameter_instruction(i), [builder = builder_, input = inputs[i]](llvm_ir::IrArray::Index index) { return input.EmitReadArrayElement(index, builder); }); } HloInstruction* instr = fused_computation->root_instruction(); if (instr->opcode() == HloOpcode::kTuple) { instr = instr->mutable_operand(output_index); } else { CHECK_EQ(0, output_index); } TF_RET_CHECK(instr->shape().IsArray()); TF_ASSIGN_OR_RETURN(auto generator, fused_emitter.GetGenerator(*instr->operand(1))); TF_RETURN_IF_ERROR(ParallelLoopEmitter(generator, {outputs[output_index]}, launch_dimensions, builder_) .EmitLoop(fusion_.name())); return absl::OkStatus(); }; return BuildKernelThunkForFusion(launch_dimensions, absl::StrCat("init_", output_index), builder_fn); } void ReductionGroupEmitter::EmitFullWarpShuffleDownLoopForReduce( const HloComputation* reducer, absl::Span<TypedPointer const> partial_result_addresses, int threads_per_block, int num_results_per_warp) const { CHECK_EQ(threads_per_block % 32, 0); CHECK_EQ(WarpSize() % num_results_per_warp, 0); auto* builder = reduction_emitter_.builder_; for (int distance = 16 / num_results_per_warp; distance >= 1; distance /= 2) { absl::InlinedVector<llvm::Value*, 2> reduction_params; for (auto acc : partial_result_addresses) { reduction_params.push_back(acc.first); } for (auto [partial_result_address, element_type] : partial_result_addresses) { int bit_width = llvm_ir::GetSizeInBits(element_type); llvm::Value* result_from_other_lane = llvm_ir::EmitAllocaAtFunctionEntry( element_type, "result_from_other_lane", builder); reduction_params.push_back(result_from_other_lane); llvm::Type* shuffled_value_type = element_type->isStructTy() ? builder->getIntNTy(bit_width) : element_type; llvm::Value* partial_result = builder->CreateLoad(shuffled_value_type, partial_result_address, "partial_reduction_result"); builder->CreateStore( EmitFullWarpShuffleDown( partial_result, builder->getInt32(distance), builder, reduction_emitter_.ir_emitter_context_.gpu_device_info()), result_from_other_lane); } absl::StatusOr<std::vector<llvm::Value*>> returned_scalars = CallNestedComputationWithScalarAddrs( builder, reduction_emitter_.ir_emitter_context_, *reducer, reduction_params); TF_CHECK_OK(returned_scalars.status()); for (int i = 0; i < returned_scalars->size(); i++) { builder->CreateStore(returned_scalars->at(i), partial_result_addresses[i].first); } } } llvm_ir::IrArray::Index ReductionGroupEmitter::GetOutputIndexForReduction( const TilingKernelInfo& tiling_kernel_info, const HloReduceInstruction* reduction, const HloInstruction* root, int output_idx) const { auto* builder = reduction_emitter_.builder_; auto* index_ty = reduction_emitter_.index_ty_; auto projected_index = [&]() -> llvm_ir::IrArray::Index { const auto& reduction_info = reduction_emitter_.reduction_codegen_info_; const auto& offset = tiling_kernel_info.tile_origin; const auto& shape = reduction_info.GetTiling().GetXlaShape(); const auto& thread_ids = tiling_kernel_info.thread_id_info.thread_ids; if (reduction_info.IsRowReduction()) { constexpr int kDim = ReductionDimensions::kRowKeptDimension; return {{builder->CreateAdd(offset[kDim], thread_ids[kDim])}, {shape.dimensions(kDim)}, index_ty}; } auto* major_idx = offset[ReductionDimensions::kColMajorKeptDimension]; auto* minor_idx = builder->CreateAdd( offset[ReductionDimensions::kColMinorKeptDimension], thread_ids[ReductionDimensions::kColReducedDimension]); return {{major_idx, minor_idx}, ShapeUtil::DeleteDimension( ReductionDimensions::kColReducedDimension, shape), index_ty}; }(); auto physical_shape = ShapeUtil::DeleteDimensions( reduction->dimensions(), reduction->operand(output_idx)->shape()); auto physical_index = projected_index.SourceIndexOfBitcast(physical_shape, builder); return llvm_ir::IrArray::Index(physical_index.multidim(), OutputShape(reduction->shape(), output_idx), index_ty) .SourceIndexOfBitcast(OutputShape(root->shape(), output_idx), builder); } void ReductionGroupEmitter::WriteReductionOutput( const TilingKernelInfo& tiling_kernel_info, const HloReduceInstruction* reduction, const std::vector<const HloInstruction*>& roots, const absl::Span<TypedPointer const> values) const { auto* builder = reduction_emitter_.builder_; const auto& reduction_info = reduction_emitter_.reduction_codegen_info_; const HloComputation* reducer = reduction->to_apply(); for (const auto& [oidx, typed_ptr] : llvm::enumerate(values)) { auto [output_ptr, type] = typed_ptr; for (auto root : roots) { llvm_ir::IrArray::Index output_index = GetOutputIndexForReduction(tiling_kernel_info, reduction, root, oidx); llvm::Value* output_address = result_ir_arrays_.at(root)[oidx].EmitArrayElementAddress( output_index, builder, "output_element_address"); if (reduction_info.IsRaceFree()) { FusedIrEmitter fused_emitter(reduction_emitter_.elemental_emitter_); llvm::Value* loaded = builder->CreateLoad(type, output_ptr, "output"); fused_emitter.BindGenerator( *reduction, [&](const llvm_ir::IrArray::Index& index) { return loaded; }); llvm_ir::ElementGenerator gen = *fused_emitter.GetGenerator(*root); llvm::Value* generated = *gen(output_index); builder->CreateStore(generated, output_address); } else { CHECK_EQ(values.size(), 1); CHECK_EQ(roots.size(), 1); CHECK_EQ(reduction, root) << "output fusion is not allowed for racing reductions"; TF_CHECK_OK(EmitAtomicOperationForNestedComputation( builder, reduction_emitter_.ir_emitter_context_, *reducer, output_address, output_ptr, type)); } } } } void ReductionGroupEmitter::EmitReductionOutputForRowReduction( const TilingKernelInfo& tiling_kernel_info, const HloReduceInstruction* reduction, const std::vector<const HloInstruction*>& roots) const { const HloComputation* reducer = reduction->to_apply(); const auto& thread_id_info = tiling_kernel_info.thread_id_info; const auto& thread_ids = thread_id_info.thread_ids; auto* thread_id_x = thread_ids[ReductionDimensions::kRowMinorReducedDimension]; auto constant = [&](uint64_t c) -> llvm::Constant* { return llvm::ConstantInt::get(reduction_emitter_.index_ty_, c); }; auto* builder = reduction_emitter_.builder_; auto is_zero = [&](llvm::Value* value) { return builder->CreateICmpEQ(value, constant(0)); }; int num_outputs = reducer->num_parameters() / 2; absl::InlinedVector<TypedPointer, 2> current_outputs; for (int output_idx = 0; output_idx < num_outputs; output_idx++) { const auto& state = GetCalculationStateFor(reduction, output_idx); current_outputs.push_back( {state.partial_result_address, state.partial_result_address->getAllocatedType()}); } const auto& reduction_info = reduction_emitter_.reduction_codegen_info_; const Tiling& tiling = reduction_info.GetTiling(); int num_rows_per_warp = RowReductionGetRowsPerWarp(reduction_emitter_.ReducedDimensionSize()); EmitFullWarpShuffleDownLoopForReduce(reducer, absl::MakeSpan(current_outputs), tiling.GetNumThreadsPerBlock(), num_rows_per_warp); KernelSupportLibrary ksl(builder); llvm::Value* warp_id = builder->CreateUDiv(thread_id_x, constant(WarpSize())); auto emit_write_output = [&](llvm::Value* write_condition, const absl::Span<TypedPointer const> values) { ksl.If("reduction_write_output", write_condition, [&] { WriteReductionOutput(tiling_kernel_info, reduction, roots, values); }); }; llvm::Value* is_in_bounds_y = builder->CreateICmpULT( thread_ids[ReductionDimensions::kRowKeptDimension], tiling_kernel_info .output_tile_bounds[ReductionDimensions::kRowKeptDimension]); ksl.If("thread_in_bounds", is_in_bounds_y, [&] { if (num_rows_per_warp > 1) { llvm::Value* is_writing_thread = is_zero(builder->CreateAnd( thread_id_x, constant(reduction_emitter_.ReducedDimensionSize() - 1))); emit_write_output(is_writing_thread, current_outputs); return; } ksl.If("intra_warp_reduce_write", is_zero(thread_id_info.lane_id), [&] { for (int oidx = 0; oidx < num_outputs; oidx++) { auto& state = GetCalculationStateFor(reduction, oidx); state.shared_cache->Store( builder->CreateLoad(current_outputs[oidx].second, current_outputs[oidx].first), {thread_id_info.thread_ids[ReductionDimensions::kRowKeptDimension], warp_id}, builder); } }); reduction_emitter_.EmitSyncThreads(); ksl.If("inter_warp_reduce", is_zero(warp_id), [&] { absl::InlinedVector<TypedPointer, 2> selected_values; for (int oidx = 0; oidx < num_outputs; oidx++) { auto& state = GetCalculationStateFor(reduction, oidx); llvm::Value* block_accum_addr = state.shared_cache->Address( {thread_id_info.thread_ids[ReductionDimensions::kRowKeptDimension], thread_id_info.lane_id}, builder); llvm::Type* element_type = state.partial_result_address->getAllocatedType(); llvm::Value* initial_value_addr = CastSharedToGlobal(builder, llvm_ir::EmitAllocaAtFunctionEntry( element_type, "initial_value_addr", builder), element_type, ""); builder->CreateStore(state.initial_value, initial_value_addr); llvm::Value* warp_exists = builder->CreateICmpULT( thread_id_x, constant(tiling.GetThreadsPerBlock() [ReductionDimensions::kRowMinorReducedDimension] / WarpSize())); llvm::Value* selected_value = builder->CreateSelect( warp_exists, block_accum_addr, initial_value_addr); selected_values.push_back({selected_value, element_type}); } if (tiling.GetThreadsPerBlock() [ReductionDimensions::kRowMinorReducedDimension] > WarpSize()) { EmitFullWarpShuffleDownLoopForReduce( reducer, absl::MakeSpan(selected_values), tiling.GetNumThreadsPerBlock(), 1); } emit_write_output(is_zero(thread_id_x), selected_values); }); }); } void ReductionGroupEmitter::EmitReductionOutputForColumnReduction( const TilingKernelInfo& tiling_kernel_info, const HloReduceInstruction* reduction, const std::vector<const HloInstruction*>& roots) const { auto* builder = reduction_emitter_.builder_; KernelSupportLibrary ksl(builder); const HloComputation* reducer = reduction->to_apply(); const auto& thread_id_info = tiling_kernel_info.thread_id_info; const auto& thread_ids = thread_id_info.thread_ids; auto constant = [&](uint64_t c) -> llvm::Constant* { return llvm::ConstantInt::get(reduction_emitter_.index_ty_, c); }; auto is_zero = [&](llvm::Value* value) { return builder->CreateICmpEQ(value, constant(0)); }; const auto& reduction_info = reduction_emitter_.reduction_codegen_info_; const Tiling& tiling = reduction_info.GetTiling(); int num_outputs = reducer->num_parameters() / 2; auto* kept_index = thread_ids[ReductionDimensions::kColMinorKeptDimension]; auto* reduced_index = thread_ids[ReductionDimensions::kColReducedDimension]; for (int output_idx = 0; output_idx < num_outputs; output_idx++) { const auto& state = GetCalculationStateFor(reduction, output_idx); auto* current_output_value = builder->CreateLoad(state.partial_result_address->getAllocatedType(), state.partial_result_address); state.shared_cache->Store(current_output_value, {kept_index, reduced_index}, builder); } reduction_emitter_.EmitSyncThreads(); absl::InlinedVector<TypedPointer, 2> shmem_transposed_addrs; for (int output_idx = 0; output_idx < num_outputs; output_idx++) { const auto& state = GetCalculationStateFor(reduction, output_idx); auto* shmem_transposed_addr = state.shared_cache->Address({reduced_index, kept_index}, builder); shmem_transposed_addrs.push_back( {shmem_transposed_addr, state.shared_cache->GetElementType()}); } EmitFullWarpShuffleDownLoopForReduce(reducer, absl::MakeSpan(shmem_transposed_addrs), tiling.GetNumThreadsPerBlock(), 1); llvm::Value* has_output = builder->CreateAnd( builder->CreateICmpULT( reduced_index, tiling_kernel_info .output_tile_bounds[ReductionDimensions::kColMinorKeptDimension]), builder->CreateICmpULT( kept_index, tiling_kernel_info .output_tile_bounds[ReductionDimensions::kColReducedDimension])); ksl.If("reduction_write_output", builder->CreateAnd(has_output, is_zero(thread_id_info.lane_id)), [&] { WriteReductionOutput(tiling_kernel_info, reduction, roots, shmem_transposed_addrs); }); } void ReductionGroupEmitter::GenerateElementForReducer( const HloReduceInstruction* reduction, const llvm_ir::IrArray::Index& index) const { HloComputation* reducer = reduction->to_apply(); auto* builder = reduction_emitter_.builder_; CHECK_EQ(reducer->num_parameters() % 2, 0); absl::InlinedVector<llvm::Value*, 2> reduction_accumulators; absl::InlinedVector<llvm::Value*, 2> reduction_input_value; for (int red_idx = 0; red_idx < reducer->num_parameters() / 2; red_idx++) { const auto& state = GetCalculationStateFor(reduction, red_idx); llvm::AllocaInst* input_address = state.input_address; auto input_index = index.SourceIndexOfBitcast(reduction->operand(0)->shape(), builder); llvm::Value* const input_ir_value = *state.input_gen(input_index); builder->CreateStore(input_ir_value, input_address); reduction_accumulators.push_back(state.partial_result_address); reduction_input_value.push_back(input_address); } absl::InlinedVector<llvm::Value*, 4> reduction_params; for (llvm::Value* acc : reduction_accumulators) { reduction_params.push_back(acc); } for (llvm::Value* value : reduction_input_value) { reduction_params.push_back(value); } absl::StatusOr<std::vector<llvm::Value*>> returned_scalars = CallNestedComputationWithScalarAddrs( builder, reduction_emitter_.ir_emitter_context_, *reducer, reduction_params); TF_CHECK_OK(returned_scalars.status()); for (int i = 0; i < returned_scalars->size(); i++) { builder->CreateStore(returned_scalars->at(i), reduction_accumulators[i]); } } absl::Status ReductionEmitter::EmitIRForReduction( absl::Span<const HloInstruction* const> instr_index_group, FusedIrEmitter& fused_emitter, const ReductionOutputMap& result_ir_arrays, const Shape& input_shape) { ExtraOutputGensMap extra_output_gens; absl::flat_hash_map<const HloReduceInstruction*, std::vector<const HloInstruction*>> heroes_to_roots; std::vector<const HloReduceInstruction*> heroes; for (const HloInstruction* hlo : instr_index_group) { auto& hero = FindNonTrivialHero(*hlo); if (IsRealReductionHero(*hlo, hero)) { auto reduction = Cast<HloReduceInstruction>(&hero); if (heroes_to_roots.find(reduction) == heroes_to_roots.end()) { heroes.push_back(reduction); } heroes_to_roots[reduction].push_back(hlo); } else { extra_output_gens[hlo] = *fused_emitter.GetGenerator(*hlo); } } CHECK(!heroes.empty()) << " expect at least one reduce instructions."; const Tiling& tiling = reduction_codegen_info_.GetTiling(); CHECK_EQ(tiling.GetNumThreadsPerBlock() % WarpSize(), 0); ReductionGroupEmitter group_emitter(*this, heroes, result_ir_arrays, fused_emitter); TF_ASSIGN_OR_RETURN( TilingKernelInfo tiling_kernel_info, EmitTilingKernel( builder_, tiling, index_ty_, [&](const TilingThreadIdInfo& thread_id_info, const llvm_ir::IrArray::Index& tile_index, absl::Span<llvm::Value* const> tile_dimensions) { auto emit_element = [&](absl::Span<llvm::Value* const> index_in_tile) { auto index = tile_index.AddOffset(index_in_tile, builder_); for (const HloReduceInstruction* reduce : heroes) { group_emitter.GenerateElementForReducer(reduce, index); } TF_CHECK_OK(group_emitter.EmitExtraOutputsForReduce( ShapeUtil::MakeShape( F32, reduction_codegen_info_.GetTiling().GetShape()), index, extra_output_gens)); }; EmitTile(builder_, reduction_codegen_info_.GetTiling(), thread_id_info, tile_dimensions, emit_element); })); KernelSupportLibrary ksl(builder_); for (auto reduce : heroes) { if (reduction_codegen_info_.IsRowReduction()) { group_emitter.EmitReductionOutputForRowReduction( tiling_kernel_info, reduce, heroes_to_roots[reduce]); } else { group_emitter.EmitReductionOutputForColumnReduction( tiling_kernel_info, reduce, heroes_to_roots[reduce]); } } return absl::OkStatus(); } absl::StatusOr<FusionEmissionResult> ReductionEmitter::EmitInitializers() { FusionEmissionResult result; if (reduction_codegen_info_.IsRaceFree()) { return result; } std::vector<BufferAllocation::Slice> slices; TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus( fusion_.shape(), [&](const Shape& subshape, ShapeIndex index) { if (!ShapeUtil::IsLeafIndex(fusion_.shape(), index)) { return absl::OkStatus(); } TF_ASSIGN_OR_RETURN( BufferAllocation::Slice slice, ir_emitter_context_.buffer_assignment().GetUniqueSlice(&fusion_, index)); slices.push_back(slice); return absl::OkStatus(); })); absl::Span<HloInstructionAdaptor const> fusion_roots = analysis_.fusion_roots(); for (int i = 0; i < fusion_roots.size(); ++i) { const HloInstruction* fusion_root = &fusion_roots[i].instruction(); if (IsReductionFromOrToContiguousDimensions(*fusion_root)) { TF_ASSIGN_OR_RETURN( result.thunks.emplace_back(), BuildFusedInitializerThunk(fusion_root, slices[i], i)); } } return result; } absl::Status ReductionEmitter::EmitKernel( const LaunchDimensions& launch_dims, std::vector<llvm_ir::IrArray> inputs, std::vector<llvm_ir::IrArray> outputs) { const HloComputation* fused_computation = fusion_.fused_instructions_computation(); FusedIrEmitter fused_emitter(elemental_emitter_); for (int i = 0; i < fused_computation->num_parameters(); i++) { HloInstruction* fused_operand = fused_computation->parameter_instruction(i); fused_emitter.BindGenerator( *fused_operand, [builder = builder_, input = inputs[i], fused_operand](const llvm_ir::IrArray::Index& index) { return input.EmitReadArrayElement(index, builder, fused_operand->name()); }); } ReductionOutputMap result_ir_arrays; int ir_arrays_idx = 0; for (const HloInstructionAdaptor& root : analysis_.fusion_roots()) { int get_num_results = GetNumOutputs(root.shape()); result_ir_arrays[&root.instruction()] = absl::MakeSpan(outputs).subspan(ir_arrays_idx, get_num_results); ir_arrays_idx += get_num_results; } KernelSupportLibrary ksl(builder_, llvm_ir::UnrollMode::kDefaultUnroll); const auto& instr_index_groups = reduction_codegen_info_.GetGroups().grouped_roots; Shape reduce_operand_shape = reduction_codegen_info_.GetReduceOperandShape(); llvm::Value* block_id_y = gpu::EmitCallToTargetIntrinsic( gpu::TargetIntrinsicID::kBlockIdy, {}, {}, builder_); llvm_ir::AddRangeMetadata(0, instr_index_groups.size(), llvm::cast<llvm::Instruction>(block_id_y), builder_->GetInsertBlock()->getModule()); block_id_y = builder_->CreateZExtOrTrunc(block_id_y, builder_->getInt32Ty()); block_id_y->setName("block.id.y"); for (int i = 0; i < instr_index_groups.size(); ++i) { TF_RETURN_IF_ERROR(ksl.IfWithStatus( absl::StrCat("reduce-group-", i), builder_->CreateICmpEQ(block_id_y, builder_->getInt32(i)), [&] { return EmitIRForReduction(instr_index_groups[i], fused_emitter, result_ir_arrays, reduce_operand_shape); })); } return absl::OkStatus(); } } absl::StatusOr<FusionEmissionResult> ReductionFusion::EmitInitializers( IrEmitterContext& ir_emitter_context, const HloFusionInstruction& fusion) const { llvm::IRBuilder<> builder(ir_emitter_context.llvm_module()->getContext()); return ReductionEmitter(analysis_, reduction_info_, ir_emitter_context, fusion, &builder) .EmitInitializers(); } absl::Status ReductionFusion::EmitKernel(IrEmitterContext& ir_emitter_context, const HloFusionInstruction& fusion, const LaunchDimensions& launch_dims, std::vector<llvm_ir::IrArray> inputs, std::vector<llvm_ir::IrArray> outputs, llvm::IRBuilder<>* builder) const { return ReductionEmitter(analysis_, reduction_info_, ir_emitter_context, fusion, builder) .EmitKernel(launch_dims, inputs, outputs); } int ReductionInfo::GetRowsPerWarp() const { if (!is_row_reduction_) return 1; return RowReductionGetRowsPerWarp( tiling_.GetShape()[ReductionDimensions::kRowMinorReducedDimension]); } LaunchDimensions ReductionInfo::launch_dimensions() const { size_t blocks_y = groups_.grouped_roots.size(); return {se::BlockDim(tiling_.GetNumBlocks(), static_cast<int64_t>(blocks_y), 1), se::ThreadDim(tiling_.GetNumThreadsPerBlock(), 1, 1)}; } ReductionInfo ReductionInfo::Create(const HloFusionAnalysis& analysis) { auto* hero_reduction = analysis.FindHeroReduction(); CHECK_NE(hero_reduction, nullptr); Shape input_shape = hero_reduction->operand(0)->shape(); ReductionDimensions reduction_dimensions = GetReductionKindAndContiguousComponents(*hero_reduction); auto shape = reduction_dimensions.dimensions; VLOG(10) << "is_row_reduction " << reduction_dimensions.is_row_reduction << " " << shape[0] << " " << shape[1] << " " << shape[2]; Vector3 reduction_tiling = GetReductionTiling(reduction_dimensions); int64_t num_threads_y = reduction_dimensions.is_row_reduction ? 1 : WarpSize(); int64_t rows_per_warp = reduction_dimensions.is_row_reduction ? RowReductionGetRowsPerWarp( shape[ReductionDimensions::kRowMinorReducedDimension]) : 1; int64_t num_threads_x = [&] { if (reduction_dimensions.is_row_reduction) { if (rows_per_warp > 1) { return shape[ReductionDimensions::kRowMinorReducedDimension]; } int64_t max_block_size = MinThreadsXRowReduction(hero_reduction->GetModule()->config()); return std::min( max_block_size, RoundUpTo( CeilOfRatio(shape[ReductionDimensions::kRowMinorReducedDimension], reduction_tiling [ReductionDimensions::kRowMinorReducedDimension]), WarpSize())); } return WarpSize(); }(); constexpr int64_t kThreadsPerBlockTarget = 256; if (reduction_dimensions.is_row_reduction && num_threads_x * 2 <= kThreadsPerBlockTarget) { int64_t kept_size = reduction_dimensions.dimensions[ReductionDimensions::kRowKeptDimension]; if (kept_size * num_threads_x <= kThreadsPerBlockTarget) { num_threads_y = kept_size; while ((num_threads_x * num_threads_y) % 32) ++num_threads_y; } else { num_threads_y = kThreadsPerBlockTarget / num_threads_x; } } int vector_size = GetVectorSize(analysis, reduction_dimensions, num_threads_x, reduction_tiling); absl::InlinedVector<int64_t, 4> num_threads{1, num_threads_y, num_threads_x}; absl::InlinedVector<int64_t, 4> tiled_shape{shape[0], shape[1], shape[2] / vector_size}; absl::InlinedVector<int64_t, 4> tile_per_thread{ reduction_tiling[0], reduction_tiling[1], std::max<int64_t>(reduction_tiling[2] / vector_size, 1)}; if (rows_per_warp > 1) { tile_per_thread[2] = 1; } if (vector_size != 1) { num_threads.push_back(1); tiled_shape.push_back(vector_size); tile_per_thread.push_back(vector_size); } Tiling tiling(tiled_shape, tile_per_thread, num_threads, {false, false, true, false}); bool reduction_is_race_free = ReductionIsRaceFree( hero_reduction->GetModule()->config(), reduction_dimensions); return ReductionInfo(analysis, tiling, reduction_dimensions.is_row_reduction, reduction_is_race_free, GroupDisjointReductions(analysis, false), hero_reduction); } std::optional<IndexingMap> ReductionInfo::ComputeThreadIdToOutputIndexing( int64_t root_index, mlir::MLIRContext* ctx) const { if (!groups_.is_reduction_root[root_index]) { auto map = ComposeIndexingMaps( GetIndexingMapForTiling(tiling_, ctx), GetBitcastMap(tiling_.GetXlaShape(), analysis_.fusion_root(root_index).shape(), ctx)); AddGroupIdConstraint(map, root_index, groups_); return map; } const auto& hero = analysis_.fusion_hero(root_index).instruction(); auto block_offsets = GetBlockOffsetsForTiling(tiling_, ctx); auto thread_ids = DelinearizeInBoundsIndex(mlir::getAffineDimExpr(0, ctx), tiling_.GetThreadsPerBlock()); auto physical_shape = ShapeUtil::DeleteDimensions(hero.dimensions(), hero.operand(0)->shape()); std::vector<IndexingMap::Variable> dimension_ranges = DimVarsFromGPUGrid( {tiling_.GetNumThreadsPerBlock(), 1, 1, tiling_.GetNumBlocks(), static_cast<int64_t>(groups_.grouped_roots.size()), 1}); constexpr int kRowKept = ReductionDimensions::kRowKeptDimension; constexpr int kRowMinorReduced = ReductionDimensions::kRowMinorReducedDimension; constexpr int kColMajorKept = ReductionDimensions::kColMajorKeptDimension; constexpr int kColMinorKept = ReductionDimensions::kColMinorKeptDimension; constexpr int kColReduced = ReductionDimensions::kColReducedDimension; auto map = [&]() { if (is_row_reduction_) { IndexingMap linear_index( mlir::AffineMap::get( 6, 0, block_offsets.getResult(kRowKept) + thread_ids[kRowKept], ctx), dimension_ranges, {}, {}); int rows_per_warp = GetRowsPerWarp(); if (rows_per_warp > 1) { linear_index.AddConstraint( thread_ids[kRowMinorReduced] % (WarpSize() / rows_per_warp), {0, 0}); } else { linear_index.AddConstraint(thread_ids[kRowMinorReduced], {0, 0}); } return ComposeIndexingMaps( linear_index, GetBitcastMap(ShapeUtil::MakeShape( PRED, {tiling_.GetShape()[kRowKept]}), physical_shape, ctx)); } mlir::SmallVector<mlir::AffineExpr> projected_dims{ block_offsets.getResult(kColMajorKept), block_offsets.getResult(kColMinorKept) + thread_ids[kColReduced]}; std::vector<IndexingMap::Variable> range_vars; if (thread_ids.size() == 4) { int vector_size = tiling_.GetThreadTileSize().back(); range_vars.push_back({0, vector_size - 1}); projected_dims.push_back(mlir::getAffineSymbolExpr(0, ctx)); } IndexingMap projected_index( mlir::AffineMap::get(6, range_vars.size(), projected_dims, ctx), dimension_ranges, range_vars, {}); projected_index.AddConstraint( mlir::getAffineDimExpr( KernelFusionInterface::kIndexingMapThreadIdxDims[0], ctx) % WarpSize(), {0, 0}); if (!is_row_reduction_) { projected_index.AddConstraint( projected_index.GetAffineMap().getResult(1), {0, tiling_.GetShape()[ReductionDimensions::kColMinorKeptDimension] - 1}); } return ComposeIndexingMaps( projected_index, GetBitcastMap(ShapeUtil::DeleteDimension( ReductionDimensions::kColReducedDimension, tiling_.GetXlaShape()), physical_shape, ctx)); }(); AddGroupIdConstraint(map, root_index, groups_); map.Simplify(); return map; } std::optional<IndexingMap> ReductionInfo::ComputeThreadIdToInputIndexing( int64_t root_index, int64_t hero_operand_index, mlir::MLIRContext* ctx) const { const auto& hero = analysis_.fusion_hero(root_index).instruction(); if (groups_.is_reduction_root[root_index] && hero_operand_index >= hero.operand_count() / 2) { return std::nullopt; } if (!groups_.is_reduction_root[root_index]) { return ComposeIndexingMaps( *ComputeThreadIdToOutputIndexing(root_index, ctx), *ComputeOutputToInputIndexing( &analysis_.fusion_root(root_index).instruction(), 0, ctx) .indexing_maps[hero_operand_index] .begin()); } auto map = ComposeIndexingMaps( GetIndexingMapForTiling(tiling_, ctx), GetBitcastMap(tiling_.GetXlaShape(), hero.operand(hero_operand_index)->shape(), ctx)); AddGroupIdConstraint(map, root_index, groups_); map.Simplify(); return map; } } }
#include "xla/service/gpu/fusions/legacy/reduction.h" #include <memory> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/status/statusor.h" #include "mlir/IR/MLIRContext.h" #include "xla/service/gpu/gpu_device_info_for_tests.h" #include "xla/service/gpu/hlo_fusion_analysis.h" #include "xla/service/gpu/model/indexing_map_serialization.h" #include "xla/service/gpu/model/indexing_test_utils.h" #include "xla/stream_executor/device_description.h" #include "xla/tests/hlo_test_base.h" namespace xla { namespace gpu { namespace { using ::testing::ElementsAre; using ::testing::SizeIs; class ReductionTest : public HloTestBase { protected: stream_executor::DeviceDescription device_info_ = TestGpuDeviceInfo::RTXA6000DeviceInfo(); mlir::MLIRContext mlir_context_; }; TEST_F(ReductionTest, ThreadIndexingRowReduction) { auto module = ParseAndReturnVerifiedModule(R"( HloModule module add { p0 = f32[] parameter(0) p1 = f32[] parameter(1) ROOT add = f32[] add(p0, p1) } fusion { %input = f32[100,64,512] parameter(0) %c0 = f32[] constant(0) ROOT reduce = f32[100,64] reduce(%input, %c0), dimensions={2}, to_apply=add } ENTRY entry { %input = f32[100,64,512] parameter(0) ROOT %fusion = f32[100,64] fusion(%input), kind=kInput, calls=fusion })") .value(); auto* root = module->entry_computation()->root_instruction(); auto analysis = HloFusionAnalysis::Create(*root, device_info_); ReductionFusion fusion(analysis); EXPECT_THAT( ToString(*fusion.ComputeThreadIdToInputIndexing(0, 0, &mlir_context_)), MatchIndexingString(R"( (th_x, th_y, th_z, bl_x, bl_y, bl_z)[s0, s1, s2, s3] -> ( bl_x floordiv 8, (bl_x mod 8) * 8 + th_x floordiv 32, (th_x mod 32) * 2 + s2 * 64 + s3 ), domain: th_x in [0, 255], th_y in [0, 0], th_z in [0, 0], bl_x in [0, 799], bl_y in [0, 0], bl_z in [0, 0], s0 in [0, 0], s1 in [0, 0], s2 in [0, 7], s3 in [0, 1] )")); EXPECT_THAT( ToString(*fusion.ComputeThreadIdToOutputIndexing(0, &mlir_context_)), MatchIndexingString(R"( (th_x, th_y, th_z, bl_x, bl_y, bl_z) -> ( bl_x floordiv 8, (bl_x mod 8) * 8 + th_x floordiv 32 ), domain: th_x in [0, 224], th_y in [0, 0], th_z in [0, 0], bl_x in [0, 799], bl_y in [0, 0], bl_z in [0, 0], th_x mod 32 in [0, 0] )")); } TEST_F(ReductionTest, TwoGroups) { auto module = ParseAndReturnVerifiedModule(R"( add { p0 = f32[] parameter(0) p1 = f32[] parameter(1) ROOT add = f32[] add(p0, p1) } fusion { %p0 = f32[2] parameter(0) %p1 = f32[2] parameter(1) %c0 = f32[] constant(-inf) %r0 = f32[] reduce(%p0, %c0), dimensions={0}, to_apply=add %c1 = f32[] constant(inf) %r1 = f32[] reduce(%p1, %c1), dimensions={0}, to_apply=add ROOT %tuple = (f32[], f32[]) tuple(%r0, %r1) } ENTRY entry { %p0 = f32[2] parameter(0) %p1 = f32[2] parameter(1) ROOT %fusion = (f32[], f32[]) fusion(%p0, %p1), kind=kInput, calls=fusion })") .value(); auto* root = module->entry_computation()->root_instruction(); auto analysis = HloFusionAnalysis::Create(*root, device_info_); ReductionFusion fusion(analysis); EXPECT_THAT(fusion.reduction_info().GetGroups().grouped_roots, ElementsAre(ElementsAre(&analysis.fusion_root(0).instruction()), ElementsAre(&analysis.fusion_root(1).instruction()))); } TEST_F(ReductionTest, OneGroup) { auto module = ParseAndReturnVerifiedModule(R"( %add { %p0 = c128[] parameter(0) %p1 = c128[] parameter(1) ROOT %add.35 = c128[] add(c128[] %p0, c128[] %p1) } %fusion { %p0 = c128[1,2] parameter(0) %c0 = c128[] constant((0, 0)) %reduce = c128[] reduce(%p0, %c0), dimensions={0,1}, to_apply=%add %real = f64[] real(c128[] %reduce) %imag = f64[] imag(c128[] %reduce) %negate = f64[] negate(f64[] %imag) ROOT %tuple.29 = (f64[], f64[]) tuple(f64[] %real, f64[] %negate) } ENTRY entry { %p0 = c128[1,2] parameter(0) ROOT %fusion = (f64[], f64[]) fusion(%p0), kind=kInput, calls=fusion })") .value(); auto* root = module->entry_computation()->root_instruction(); auto analysis = HloFusionAnalysis::Create(*root, device_info_); ReductionFusion fusion(analysis); EXPECT_THAT(fusion.reduction_info().GetGroups().grouped_roots, SizeIs(2)); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/legacy/reduction.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/legacy/reduction_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
472804a5-8f92-4078-b679-b52c4d682391
cpp
tensorflow/tensorflow
mlir_fusion_emitter
third_party/xla/xla/service/gpu/fusions/mlir/mlir_fusion_emitter.cc
third_party/xla/xla/service/gpu/fusions/mlir/mlir_fusion_emitter_test.cc
#include "xla/service/gpu/fusions/mlir/mlir_fusion_emitter.h" #include <cstdint> #include <functional> #include <iterator> #include <memory> #include <optional> #include <string> #include <utility> #include <variant> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "llvm/ADT/SmallVector.h" #include "llvm/IR/Function.h" #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicsNVPTX.h" #include "llvm/Linker/Linker.h" #include "llvm/Support/Casting.h" #include "mlir/Conversion/AffineToStandard/AffineToStandard.h" #include "mlir/Conversion/ComplexToStandard/ComplexToStandard.h" #include "mlir/Conversion/ReconcileUnrealizedCasts/ReconcileUnrealizedCasts.h" #include "mlir/Conversion/SCFToControlFlow/SCFToControlFlow.h" #include "mlir/Dialect/Affine/IR/AffineOps.h" #include "mlir/Dialect/Arith/IR/Arith.h" #include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h" #include "mlir/Dialect/ControlFlow/IR/ControlFlow.h" #include "mlir/Dialect/DLTI/DLTI.h" #include "mlir/Dialect/Func/Extensions/InlinerExtension.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/GPU/IR/GPUDialect.h" #include "mlir/Dialect/LLVMIR/LLVMDialect.h" #include "mlir/Dialect/LLVMIR/NVVMDialect.h" #include "mlir/Dialect/LLVMIR/ROCDLDialect.h" #include "mlir/Dialect/LLVMIR/Transforms/InlinerInterfaceImpl.h" #include "mlir/Dialect/Math/IR/Math.h" #include "mlir/Dialect/MemRef/Transforms/Passes.h" #include "mlir/Dialect/SCF/IR/SCF.h" #include "mlir/Dialect/Tensor/IR/Tensor.h" #include "mlir/Dialect/Vector/IR/VectorOps.h" #include "mlir/IR/Attributes.h" #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/DialectRegistry.h" #include "mlir/IR/ImplicitLocOpBuilder.h" #include "mlir/IR/Location.h" #include "mlir/IR/MLIRContext.h" #include "mlir/IR/OwningOpRef.h" #include "mlir/IR/Types.h" #include "mlir/Interfaces/DataLayoutInterfaces.h" #include "mlir/Pass/PassManager.h" #include "mlir/Support/LLVM.h" #include "mlir/Target/LLVMIR/Dialect/Builtin/BuiltinToLLVMIRTranslation.h" #include "mlir/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.h" #include "mlir/Target/LLVMIR/Dialect/NVVM/NVVMToLLVMIRTranslation.h" #include "mlir/Target/LLVMIR/Dialect/ROCDL/ROCDLToLLVMIRTranslation.h" #include "mlir/Target/LLVMIR/Export.h" #include "mlir/Transforms/Passes.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/mlir/tools/mlir_replay/public/compiler_trace.pb.h" #include "xla/mlir/tools/mlir_replay/public/compiler_trace_instrumentation.h" #include "xla/mlir_hlo/mhlo/IR/hlo_ops.h" #include "xla/mlir_hlo/mhlo/transforms/passes.h" #include "xla/service/buffer_assignment.h" #include "xla/service/dump.h" #include "xla/service/gpu/fusions/fusion_emitter.h" #include "xla/service/gpu/fusions/ir/xla_gpu_ops.h" #include "xla/service/gpu/fusions/mlir/computation_partitioner.h" #include "xla/service/gpu/fusions/mlir/elemental_hlo_to_mlir.h" #include "xla/service/gpu/fusions/mlir/type_util.h" #include "xla/service/gpu/fusions/transforms/passes.h" #include "xla/service/gpu/ir_emitter_context.h" #include "xla/service/gpu/kernel_arguments.h" #include "xla/service/gpu/kernel_reuse_cache.h" #include "xla/service/gpu/launch_dimensions.h" #include "xla/service/gpu/runtime/kernel_thunk.h" #include "xla/service/gpu/target_util.h" #include "xla/service/llvm_ir/llvm_util.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/status_macros.h" #include "xla/stream_executor/device_description.h" #include "xla/tsl/framework/mlir/status_scoped_diagnostic_handler.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { using llvm::SmallVector; using mlir::Value; using mlir::ValueRange; using mlir::func::FuncOp; void AddRanges(llvm::Function* func, const LaunchDimensions& launch_dims, llvm::Module* module) { for (auto& block : *func) { for (auto& instr : block) { if (auto* call = llvm::dyn_cast<llvm::CallInst>(&instr)) { if (auto* callee = call->getCalledFunction()) { switch (callee->getIntrinsicID()) { case llvm::Intrinsic::nvvm_read_ptx_sreg_tid_x: llvm_ir::AddRangeMetadata( 0, launch_dims.thread_counts_per_block().x, call, module); break; case llvm::Intrinsic::nvvm_read_ptx_sreg_tid_y: llvm_ir::AddRangeMetadata( 0, launch_dims.thread_counts_per_block().y, call, module); break; case llvm::Intrinsic::nvvm_read_ptx_sreg_tid_z: llvm_ir::AddRangeMetadata( 0, launch_dims.thread_counts_per_block().z, call, module); break; case llvm::Intrinsic::nvvm_read_ptx_sreg_ctaid_x: llvm_ir::AddRangeMetadata(0, launch_dims.block_counts().x, call, module); break; case llvm::Intrinsic::nvvm_read_ptx_sreg_ctaid_y: llvm_ir::AddRangeMetadata(0, launch_dims.block_counts().y, call, module); break; case llvm::Intrinsic::nvvm_read_ptx_sreg_ctaid_z: llvm_ir::AddRangeMetadata(0, launch_dims.block_counts().z, call, module); break; } } } } } } bool Needs64Bits(const Shape& shape) { return shape.IsArray() ? !IsInt32(ShapeUtil::ElementsIn(shape)) : absl::c_any_of(shape.tuple_shapes(), Needs64Bits); } bool Is64BitIndex(const HloInstruction* instr, int operand) { const auto& shape = instr->operand(operand)->shape(); return shape.element_type() == PrimitiveType::S64 || shape.element_type() == PrimitiveType::U64; } bool Needs64BitIndices(const HloComputation* computation) { for (auto* instr : computation->instructions()) { switch (instr->opcode()) { case HloOpcode::kDynamicSlice: case HloOpcode::kDynamicUpdateSlice: for (int i = 1; i < instr->operand_count(); ++i) { if (Is64BitIndex(instr, i)) return true; } break; case HloOpcode::kGather: case HloOpcode::kScatter: CHECK(instr->shape().IsArray()) << "Variadic scatter is unsupported."; if (Is64BitIndex(instr, 1)) return true; break; default: break; } if (Needs64Bits(instr->shape()) || absl::c_any_of(instr->called_computations(), Needs64BitIndices)) { return true; } } return false; } } Value MlirFusionEmitterBase::EmitBlockId(mlir::ImplicitLocOpBuilder& builder, int dim) const { const auto& counts = launch_dimensions().block_counts(); int64_t count = dim == 0 ? counts.x : dim == 1 ? counts.y : counts.z; auto block_id = builder.create<mlir::gpu::BlockIdOp>( static_cast<mlir::gpu::Dimension>(dim)); block_id->setAttr("xla.range", builder.getIndexArrayAttr({0, count - 1})); return block_id; } Value MlirFusionEmitterBase::EmitThreadId(mlir::ImplicitLocOpBuilder& builder, int dim) const { const auto& counts = launch_dimensions().thread_counts_per_block(); int64_t count = dim == 0 ? counts.x : dim == 1 ? counts.y : counts.z; auto thread_id = builder.create<mlir::gpu::ThreadIdOp>( static_cast<mlir::gpu::Dimension>(dim)); thread_id->setAttr("xla.range", builder.getIndexArrayAttr({0, count - 1})); return thread_id; } llvm::SmallVector<Value> MlirFusionEmitterBase::EmitThreadAndBlockIds( mlir::ImplicitLocOpBuilder& builder) const { auto& b = builder; return {EmitThreadId(b, 0), EmitThreadId(b, 1), EmitThreadId(b, 2), EmitBlockId(b, 0), EmitBlockId(b, 1), EmitBlockId(b, 2)}; } absl::StatusOr<FusionEmissionResult> MlirFusionEmitterBase::Emit( IrEmitterContext& ir_emitter_context, const HloFusionInstruction& fusion) const { VLOG(4) << "Fusion: " << fusion.fused_instructions_computation()->ToString(); TF_ASSIGN_OR_RETURN( auto args, KernelArguments::Create(ir_emitter_context.buffer_assignment(), &fusion)); auto launch_dims = launch_dimensions(); auto [status_or_entry, cached] = ir_emitter_context.kernel_cache().GetWithStatus( fusion.fused_instructions_computation(), args.args(), "", [&]() -> absl::StatusOr<KernelReuseCache::Entry> { std::string kernel_name = ir_emitter_context.name_uniquer()->GetUniqueName( llvm_ir::SanitizeFunctionName(std::string(fusion.name()))); if (ir_emitter_context.emit_kernels()) { TF_ASSIGN_OR_RETURN( auto module, CreateLLVMModule( *ir_emitter_context.mlir_context(), ir_emitter_context.llvm_module()->getContext(), ir_emitter_context.gpu_device_info(), fusion, kernel_name, &ir_emitter_context.buffer_assignment())); auto* kernel_func = module->getFunction(kernel_name); AddRanges(kernel_func, launch_dims, module.get()); auto* target = ir_emitter_context.llvm_module(); module->setDataLayout(target->getDataLayout()); module->setTargetTriple(target->getTargetTriple()); llvm::IRBuilder<> builder(module->getContext()); AnnotateFunctionAsGpuKernel(module.get(), kernel_func, &builder); TF_RETURN_IF_ERROR(AnnotateKernelLaunchDimensions( ir_emitter_context.gpu_device_info(), launch_dims, kernel_name, module.get())); CHECK(!llvm::Linker::linkModules( *target, std::move(module), llvm::Linker::Flags::OverrideFromSrc)); } else { VLOG(3) << "Skipped kernel compilation."; } return KernelReuseCache::Entry{kernel_name, launch_dims, std::nullopt, 0}; }); TF_ASSIGN_OR_RETURN(const KernelReuseCache::Entry* entry, status_or_entry); if (cached) { VLOG(3) << "Reuse: " << fusion.name() << " -> " << entry->kernel_name; } FusionEmissionResult result; result.thunks.emplace_back(std::make_unique<KernelThunk>( &fusion, entry->kernel_name, args.args(), launch_dims, entry->cluster_dim, entry->shmem_bytes)); return result; } absl::StatusOr<std::unique_ptr<llvm::Module>> MlirFusionEmitterBase::CreateLLVMModule( mlir::MLIRContext& mlir_context, llvm::LLVMContext& llvm_context, const se::DeviceDescription& device, const HloFusionInstruction& fusion, const std::string& entry_function_name, const BufferAssignment* buffer_assignment) const { HloModule* hlo_module = fusion.GetModule(); std::unique_ptr<mlir::interpreter::MlirCompilationTrace> trace = nullptr; if (DumpingEnabledForHloModule(*hlo_module) && DumpingEnabledForHloPass("mlir-fusion-emitter", hlo_module->config().debug_options())) { trace = std::make_unique<mlir::interpreter::MlirCompilationTrace>(); } TF_ASSIGN_OR_RETURN( auto module, CreateMLIRModule(mlir_context, fusion, entry_function_name, buffer_assignment)); mlir::PassManager pm(&mlir_context); AddXlaGpuOpsOptimizationPasses(pm); AddLoopTransformationPasses(pm); AddLoweringPasses(pm, device); auto pipeline_status = RunPassPipeline(module.get(), pm, trace.get()); if (trace) { DumpPerModuleProtobufToFile( *hlo_module, *trace, hlo_module->config().debug_options(), absl::StrCat(entry_function_name, ".mlir-trace")); } TF_RETURN_IF_ERROR(pipeline_status); auto llvm_module = mlir::translateModuleToLLVMIR(module.get(), llvm_context); TF_RET_CHECK(llvm_module != nullptr) << "Failed to translate module to LLVM IR."; return llvm_module; } absl::StatusOr<mlir::OwningOpRef<mlir::ModuleOp>> MlirFusionEmitterBase::CreateMLIRModule( mlir::MLIRContext& context, const HloFusionInstruction& fusion, const std::string& entry_function_name, const BufferAssignment* buffer_assignment, mlir::interpreter::MlirCompilationTrace* trace) const { context.loadDialect<mlir::DLTIDialect, mlir::NVVM::NVVMDialect, mlir::ROCDL::ROCDLDialect, mlir::affine::AffineDialect, mlir::arith::ArithDialect, mlir::cf::ControlFlowDialect, mlir::func::FuncDialect, mlir::gpu::GPUDialect, mlir::math::MathDialect, mlir::mhlo::MhloDialect, mlir::scf::SCFDialect, mlir::tensor::TensorDialect, mlir::vector::VectorDialect, xla::gpu::XlaGpuDialect>(); mlir::DialectRegistry registry; mlir::LLVM::registerInlinerInterface(registry); mlir::func::registerInlinerExtension(registry); mlir::registerBuiltinDialectTranslation(registry); mlir::registerLLVMDialectTranslation(registry); mlir::registerNVVMDialectTranslation(registry); mlir::registerROCDLDialectTranslation(registry); context.appendDialectRegistry(registry); mlir::OpBuilder builder(&context); auto loc = mlir::NameLoc::get(builder.getStringAttr(fusion.name())); mlir::OwningOpRef<mlir::ModuleOp> module = llvm_ir::CreateMlirModuleOp(loc); SmallVector<mlir::Type> param_types; std::optional<KernelArguments> args; if (buffer_assignment != nullptr) { TF_ASSIGN_OR_RETURN(args, KernelArguments::Create(*buffer_assignment, &fusion)); } int next_slice_index = 0; absl::flat_hash_map<BufferAllocation::Slice, std::optional<int>> slice_indices; auto get_arg_attrs = [&](int index) -> absl::StatusOr<mlir::Attribute> { if (!args) { return builder.getDictionaryAttr({builder.getNamedAttr( "xla.slice_index", builder.getIndexAttr(next_slice_index++))}); } const auto& arg = args->args()[index]; SmallVector<mlir::NamedAttribute> attrs; attrs.push_back(builder.getNamedAttr( "xla.slice_index", builder.getIndexAttr(arg.llvm_arg_index()))); attrs.push_back( builder.getNamedAttr(mlir::LLVM::LLVMDialect::getAlignAttrName(), builder.getIndexAttr(arg.alignment()))); attrs.push_back(builder.getNamedAttr( mlir::LLVM::LLVMDialect::getDereferenceableAttrName(), builder.getIndexAttr(arg.slice().size()))); if (!arg.written()) { attrs.push_back( builder.getNamedAttr("xla.invariant", builder.getUnitAttr())); } return builder.getDictionaryAttr(attrs); }; SmallVector<mlir::Attribute> arg_attrs; int arg_index = 0; for (auto* param : fusion.operands()) { param_types.push_back( mlir_converter::TensorShapeToMlirType(param->shape(), builder)); TF_ASSIGN_OR_RETURN(arg_attrs.emplace_back(), get_arg_attrs(arg_index++)); } auto result_types = mlir_converter::ShapeToMlirTypes(fusion.shape(), builder); param_types.append(result_types.begin(), result_types.end()); TF_RETURN_IF_ERROR(ShapeUtil::ForEachSubshapeWithStatus( fusion.shape(), [&](const auto& shape, const ShapeIndex& index) { if (shape.IsArray()) { TF_ASSIGN_OR_RETURN(arg_attrs.emplace_back(), get_arg_attrs(arg_index++)); } return absl::OkStatus(); })); builder.setInsertionPointToStart(module->getBody()); auto entry_func = builder.create<FuncOp>( loc, entry_function_name, mlir::FunctionType::get(&context, param_types, result_types), mlir::StringAttr{}, mlir::ArrayAttr::get(&context, arg_attrs), mlir::ArrayAttr{}); entry_func->setAttr("xla.entry", mlir::UnitAttr::get(&context)); TF_RETURN_IF_ERROR(EmitMlir(module.get(), entry_func, fusion)); return module; } absl::Status MlirFusionEmitterBase::EmitMlir( mlir::ModuleOp module, FuncOp entry_function, const HloFusionInstruction& fusion) const { std::vector<mlir_converter::EpilogueSpecification> epilogues = GetEpilogues(fusion, module->getContext()); mlir_converter::PartitionedComputations computations( fusion.fused_instructions_computation(), module->getContext(), epilogues); auto subgraph_to_mlir_fn = computations.DeclareFunctions(module); for (const auto& epilogue : epilogues) { for (auto* custom : epilogue.heroes) { if (custom->user_count() == 0) { subgraph_to_mlir_fn.extract(&computations.FindSubgraph(custom)) .mapped() .erase(); } } } auto* root = fusion.fused_instructions_computation()->root_instruction(); if (root->opcode() == HloOpcode::kTuple && !epilogues.empty()) { subgraph_to_mlir_fn.extract(&computations.FindSubgraph(root)) .mapped() .erase(); } auto call_targets = computations.CreateCallTargetProvider(subgraph_to_mlir_fn); for (const auto& comp : computations.partitioned_computations()) { for (const auto& subgraph : comp.subgraphs()) { if (subgraph_to_mlir_fn.contains(&subgraph)) { TF_RETURN_IF_ERROR(mlir_converter::SubgraphToMlirFunction( comp, subgraph, subgraph_to_mlir_fn[&subgraph], call_targets)); } } } for (const auto& epilogue : computations.epilogues()) { if (epilogue.roots.empty()) continue; TF_RETURN_IF_ERROR(mlir_converter::SubgraphToMlirFunction( computations.FindPartitionedComputation( fusion.fused_instructions_computation()), epilogue, subgraph_to_mlir_fn[&epilogue], call_targets)); } int index_bitwidth = Needs64BitIndices(fusion.fused_instructions_computation()) ? 64 : 32; mlir::OpBuilder b(module->getContext()); auto index_layout = mlir::DataLayoutEntryAttr::get( b.getIndexType(), b.getI32IntegerAttr(index_bitwidth)); module->setAttr( mlir::DLTIDialect::kDataLayoutAttrName, mlir::DataLayoutSpecAttr::get(module->getContext(), {index_layout})); return EmitEntryFunction(computations, call_targets, entry_function, fusion); } absl::flat_hash_map<const HloInstruction*, ValueRange> MlirFusionEmitterBase::EmitEpilogue( int epilogue_index, const mlir_converter::PartitionedComputations& computations, FuncOp entry_fn, const absl::flat_hash_map<const HloInstruction*, llvm::SmallVector<Value>>& injected, ValueRange output_indices, mlir::ImplicitLocOpBuilder& builder) const { const auto& epilogue = computations.epilogues().at(epilogue_index); if (epilogue.roots.empty()) { return {}; } auto epilogue_fn = mlir::cast<FuncOp>( entry_fn->getParentOfType<mlir::ModuleOp>().lookupSymbol(epilogue.name)); SmallVector<Value> operands = ValueRange(entry_fn.getArguments().take_front( computations.fusion()->num_parameters())); absl::c_copy(output_indices, std::back_inserter(operands)); int injected_offset = operands.size(); operands.resize(injected_offset + epilogue.num_injected_values); for (auto [injected_instruction, start] : epilogue.injected_value_starts) { absl::c_copy(injected.at(injected_instruction), operands.begin() + injected_offset + start); } ValueRange results = builder.create<PureCallOp>(epilogue_fn, operands).getResults(); absl::flat_hash_map<const HloInstruction*, ValueRange> results_per_root; for (auto* root : epilogue.roots) { int arity = root->shape().IsTuple() ? root->shape().tuple_shapes().size() : 1; results_per_root[root] = results.take_front(arity); results = results.drop_front(arity); } CHECK_EQ(results.size(), 0); return results_per_root; } absl::Status MlirFusionEmitterBase::RunPassPipeline( mlir::ModuleOp module, mlir::PassManager& pm, mlir::interpreter::MlirCompilationTrace* trace) const { if (VLOG_IS_ON(5)) { module.getContext()->disableMultithreading(); pm.enableIRPrinting(); } if (trace) { module.getContext()->disableMultithreading(); pm.addInstrumentation( std::make_unique<mlir::interpreter::MlirCompilerTraceInstrumentation>( *trace)); } tsl::StatusScopedDiagnosticHandler diagnostic_handler(module.getContext()); (void)pm.run(module); return diagnostic_handler.consumeStatus(); } void AddXlaGpuOpsOptimizationPasses(mlir::OpPassManager& pm) { pm.addNestedPass<FuncOp>(CreateSimplifyArithPass()); pm.addPass(mlir::createCanonicalizerPass()); pm.addPass(mlir::createCSEPass()); pm.addPass(CreateEraseDeadFunctionsPass()); pm.addPass(mlir::createCSEPass()); } void AddLoopTransformationPasses(mlir::OpPassManager& pm) { pm.addNestedPass<FuncOp>(CreateLowerXlaGpuToScfPass()); pm.addPass(mlir::createInlinerPass({}, [&](mlir::OpPassManager& pm) { pm.addPass(mlir::createCSEPass()); })); pm.addPass(mlir::createCanonicalizerPass()); pm.addPass(mlir::createCSEPass()); pm.addNestedPass<FuncOp>(CreateFuseLoopsPass()); pm.addNestedPass<FuncOp>(CreatePeelLoopsPass()); pm.addNestedPass<FuncOp>(CreateLowerXlaGpuLoopsToScfPass()); pm.addPass(mlir::mhlo::createConvertToSignlessPass()); pm.addPass(CreatePropagateSliceIndicesPass()); pm.addPass(CreateFlattenTensorsPass()); pm.addPass(mlir::createLoopInvariantCodeMotionPass()); pm.addNestedPass<FuncOp>(CreateUnswitchLoopsPass()); pm.addPass(mlir::createLoopInvariantCodeMotionPass()); pm.addNestedPass<FuncOp>(CreateVectorizeLoadsAndStoresPass()); pm.addNestedPass<FuncOp>(CreateOptimizeLoopsPass()); } void AddLoweringPasses(mlir::OpPassManager& pm, const se::DeviceDescription& device) { bool is_amd = std::holds_alternative<se::RocmComputeCapability>( device.gpu_compute_capability()); pm.addNestedPass<FuncOp>(CreateConvertPureCallOpsPass()); pm.addPass(CreateLowerTensorsPass( is_amd, is_amd ? device.rocm_compute_capability().gcn_arch_name() : device.cuda_compute_capability().ToString())); pm.addPass(mlir::createConvertComplexToStandardPass()); pm.addPass(CreateMergePointersToSameSlicePass()); pm.addPass(mlir::createCanonicalizerPass()); pm.addPass(mlir::createCSEPass()); pm.addNestedPass<FuncOp>(CreateSimplifyArithPass()); pm.addPass(CreateSimplifyAffinePass()); pm.addPass(mlir::createLowerAffinePass()); pm.addPass(mlir::createLoopInvariantCodeMotionPass()); pm.addPass(mlir::createSymbolDCEPass()); pm.addPass(mlir::createCSEPass()); auto maybe_convert_fp8 = MaybeCreateConvertFloatNvidiaPass(device); if (maybe_convert_fp8.has_value()) { pm.addPass(std::move(*maybe_convert_fp8)); } pm.addPass(CreateExpandFloatOpsPass()); pm.addPass(mlir::createLowerAffinePass()); pm.addPass(mlir::createConvertSCFToCFPass()); pm.addPass(CreateLowerToLLVMPass(is_amd)); pm.addPass(mlir::createReconcileUnrealizedCastsPass()); } } }
#include "xla/service/gpu/fusions/mlir/mlir_fusion_emitter.h" #include <cstdint> #include <optional> #include <string> #include <gtest/gtest.h> #include "absl/status/status.h" #include "absl/strings/str_replace.h" #include "absl/strings/string_view.h" #include "llvm/IR/LLVMContext.h" #include "llvm/Support/raw_ostream.h" #include "mlir/Dialect/Affine/IR/AffineOps.h" #include "mlir/Dialect/Arith/IR/Arith.h" #include "mlir/Dialect/Bufferization/IR/BufferizableOpInterface.h" #include "mlir/Dialect/Complex/IR/Complex.h" #include "mlir/Dialect/Func/Extensions/InlinerExtension.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/GPU/IR/GPUDialect.h" #include "mlir/Dialect/LLVMIR/NVVMDialect.h" #include "mlir/Dialect/LLVMIR/ROCDLDialect.h" #include "mlir/Dialect/Math/IR/Math.h" #include "mlir/Dialect/SCF/IR/SCF.h" #include "mlir/Dialect/Tensor/IR/Tensor.h" #include "mlir/IR/ImplicitLocOpBuilder.h" #include "mlir/IR/MLIRContext.h" #include "mlir/IR/ValueRange.h" #include "mlir/Pass/PassManager.h" #include "mlir/Target/LLVMIR/Dialect/Builtin/BuiltinToLLVMIRTranslation.h" #include "mlir/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.h" #include "mlir/Target/LLVMIR/Dialect/NVVM/NVVMToLLVMIRTranslation.h" #include "mlir/Target/LLVMIR/Dialect/ROCDL/ROCDLToLLVMIRTranslation.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/mlir_hlo/mhlo/IR/hlo_ops.h" #include "xla/service/gpu/fusions/mlir/computation_partitioner.h" #include "xla/service/gpu/gpu_device_info_for_tests.h" #include "xla/service/gpu/launch_dimensions.h" #include "xla/service/gpu/model/indexing_map.h" #include "xla/stream_executor/device_description.h" #include "xla/tests/filecheck.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { class DummyCopyFusionEmitter : public MlirFusionEmitterBase { public: LaunchDimensions launch_dimensions() const final { return {1, 100}; } std::optional<IndexingMap> ComputeThreadIdToOutputIndexing( int64_t, mlir::MLIRContext*) const final { return std::nullopt; } std::optional<IndexingMap> ComputeThreadIdToInputIndexing( int64_t, int64_t, mlir::MLIRContext*) const final { return std::nullopt; } protected: absl::Status EmitEntryFunction( const mlir_converter::PartitionedComputations& computations, const mlir_converter::CallTargetProvider& call_targets, mlir::func::FuncOp entry_function, const HloFusionInstruction& fusion) const override { mlir::ImplicitLocOpBuilder b(entry_function.getLoc(), entry_function); b.setInsertionPointToStart(entry_function.addEntryBlock()); auto thread_id = EmitThreadId(b, 0); auto value = b.create<mlir::tensor::ExtractOp>( entry_function.getArgument(0), mlir::ValueRange{thread_id}); auto result = b.create<mlir::tensor::InsertOp>( value, entry_function.getArgument(1), mlir::ValueRange{thread_id}); b.create<mlir::func::ReturnOp>(result->getResults()); return absl::OkStatus(); } }; class MlirFusionEmitterTest : public HloTestBase { protected: MlirFusionEmitterTest() { context_.loadDialect<mlir::tensor::TensorDialect, mlir::func::FuncDialect, mlir::affine::AffineDialect, mlir::arith::ArithDialect, mlir::complex::ComplexDialect, mlir::math::MathDialect, mlir::scf::SCFDialect, mlir::mhlo::MhloDialect, mlir::gpu::GPUDialect, mlir::NVVM::NVVMDialect, mlir::ROCDL::ROCDLDialect>(); mlir::DialectRegistry registry; mlir::func::registerInlinerExtension(registry); mlir::registerBuiltinDialectTranslation(registry); mlir::registerLLVMDialectTranslation(registry); mlir::registerNVVMDialectTranslation(registry); mlir::registerROCDLDialectTranslation(registry); context_.appendDialectRegistry(registry); } mlir::MLIRContext context_; stream_executor::DeviceDescription device_info_ = TestGpuDeviceInfo::CudaOrRocmDeviceInfo(); }; constexpr absl::string_view kModule = R"( fused_computation { ROOT %p0 = f32[100] parameter(0) } ENTRY main { %p0 = f32[100] parameter(0) ROOT fusion = f32[100] fusion(%p0), kind=kLoop, calls=fused_computation })"; TEST_F(MlirFusionEmitterTest, CreateMlirModule) { auto module = ParseAndReturnVerifiedModule(kModule).value(); DummyCopyFusionEmitter emitter; TF_ASSERT_OK_AND_ASSIGN( auto mlir_module, emitter.CreateMLIRModule( context_, *Cast<HloFusionInstruction>( module->entry_computation()->root_instruction()), "fusion", nullptr)); std::string out; llvm::raw_string_ostream stream(out); stream << *mlir_module; TF_ASSERT_OK_AND_ASSIGN(auto filecheck_result, RunFileCheck(out, R"( )")); EXPECT_TRUE(filecheck_result); } TEST_F(MlirFusionEmitterTest, CreateLLVMModule) { llvm::LLVMContext llvm_context; auto module = ParseAndReturnVerifiedModule(kModule).value(); DummyCopyFusionEmitter emitter; TF_ASSERT_OK_AND_ASSIGN( auto llvm_module, emitter.CreateLLVMModule( context_, llvm_context, device_info_, *Cast<HloFusionInstruction>( module->entry_computation()->root_instruction()), "fusion", nullptr)); std::string out; llvm::raw_string_ostream stream(out); stream << *llvm_module; TF_ASSERT_OK_AND_ASSIGN( auto filecheck_result, RunFileCheck( out, absl::StrReplaceAll( R"( )", {{"TIDX", device_info_.cuda_compute_capability().major == -1 ? "@llvm.amdgcn.workitem.id.x" : "@llvm.nvvm.read.ptx.sreg.tid.x"}}))); EXPECT_TRUE(filecheck_result); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/mlir/mlir_fusion_emitter.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/mlir/mlir_fusion_emitter_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
3a215bb7-022f-4f4a-a01d-521a81880e38
cpp
tensorflow/tensorflow
elemental_hlo_to_mlir
third_party/xla/xla/service/gpu/fusions/mlir/elemental_hlo_to_mlir.cc
third_party/xla/xla/service/gpu/fusions/mlir/elemental_hlo_to_mlir_test.cc
#include "xla/service/gpu/fusions/mlir/elemental_hlo_to_mlir.h" #include <cstddef> #include <cstdint> #include <functional> #include <iterator> #include <queue> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/container/node_hash_map.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" #include "llvm/ADT/APFloat.h" #include "llvm/ADT/APInt.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/MathExtras.h" #include "mlir/Dialect/Affine/IR/AffineOps.h" #include "mlir/Dialect/Affine/LoopUtils.h" #include "mlir/Dialect/Arith/IR/Arith.h" #include "mlir/Dialect/Complex/IR/Complex.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/SCF/IR/SCF.h" #include "mlir/Dialect/Tensor/IR/Tensor.h" #include "mlir/Dialect/Vector/IR/VectorOps.h" #include "mlir/IR/AffineExpr.h" #include "mlir/IR/Attributes.h" #include "mlir/IR/BuiltinAttributeInterfaces.h" #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/BuiltinTypeInterfaces.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/IRMapping.h" #include "mlir/IR/ImplicitLocOpBuilder.h" #include "mlir/IR/TypeRange.h" #include "mlir/IR/Types.h" #include "mlir/IR/Value.h" #include "mlir/IR/ValueRange.h" #include "mlir/Interfaces/DataLayoutInterfaces.h" #include "mlir/Support/LLVM.h" #include "xla/comparison_util.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/translate/hlo_to_mhlo/hlo_utils.h" #include "xla/mlir_hlo/mhlo/IR/hlo_ops.h" #include "xla/mlir_hlo/mhlo/transforms/map_mhlo_to_scalar_op.h" #include "xla/primitive_util.h" #include "xla/service/algorithm_util.h" #include "xla/service/gpu/fusions/ir/xla_gpu_ops.h" #include "xla/service/gpu/fusions/mlir/computation_partitioner.h" #include "xla/service/gpu/fusions/mlir/type_util.h" #include "xla/service/gpu/model/indexing_analysis.h" #include "xla/service/gpu/model/indexing_map.h" #include "xla/shape_util.h" #include "xla/status_macros.h" #include "xla/xla_data.pb.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace mlir_converter { namespace { using llvm::SmallVector; using llvm::SmallVectorImpl; using mlir::Block; using mlir::FloatType; using mlir::ImplicitLocOpBuilder; using mlir::IntegerType; using mlir::IRMapping; using mlir::Location; using mlir::MLIRContext; using mlir::OpBuilder; using mlir::Value; using mlir::ValueRange; using mlir::arith::AndIOp; using mlir::arith::CmpIOp; using mlir::arith::CmpIPredicate; using mlir::arith::ConstantIndexOp; using mlir::arith::ConstantOp; using mlir::scf::IfOp; using mlir::scf::YieldOp; namespace arith = ::mlir::arith; namespace mhlo = ::mlir::mhlo; namespace scf = ::mlir::scf; static auto& kUnsupportedOps = *new llvm::DenseSet<HloOpcode>{HloOpcode::kAddDependency, HloOpcode::kAfterAll, HloOpcode::kAllGather, HloOpcode::kAllGatherDone, HloOpcode::kAllGatherStart, HloOpcode::kAllReduce, HloOpcode::kAllReduceDone, HloOpcode::kAllReduceStart, HloOpcode::kAllToAll, HloOpcode::kAsyncDone, HloOpcode::kAsyncStart, HloOpcode::kAsyncUpdate, HloOpcode::kBatchNormGrad, HloOpcode::kBatchNormInference, HloOpcode::kBatchNormTraining, HloOpcode::kCholesky, HloOpcode::kCollectivePermute, HloOpcode::kCollectivePermuteDone, HloOpcode::kCollectivePermuteStart, HloOpcode::kCopyDone, HloOpcode::kCopyStart, HloOpcode::kCustomCall, HloOpcode::kDomain, HloOpcode::kDynamicReshape, HloOpcode::kFft, HloOpcode::kFusion, HloOpcode::kGetDimensionSize, HloOpcode::kOptimizationBarrier, HloOpcode::kInfeed, HloOpcode::kOutfeed, HloOpcode::kPartitionId, HloOpcode::kRecv, HloOpcode::kRecvDone, HloOpcode::kReduceScatter, HloOpcode::kReplicaId, HloOpcode::kRng, HloOpcode::kRngBitGenerator, HloOpcode::kRngGetAndUpdateState, HloOpcode::kScatter, HloOpcode::kSelectAndScatter, HloOpcode::kSend, HloOpcode::kSendDone, HloOpcode::kSetDimensionSize, HloOpcode::kSort, HloOpcode::kTopK, HloOpcode::kTriangularSolve, HloOpcode::kWhile, HloOpcode::kConditional, HloOpcode::kStochasticConvert, HloOpcode::kCall}; bool IsUnsupportedGather(const HloInstruction* instr) { if (instr->opcode() != HloOpcode::kGather) return false; auto* gather = Cast<HloGatherInstruction>(instr); const auto& dims = gather->gather_dimension_numbers(); int indices_rank = gather->operand(1)->shape().rank(); if (dims.index_vector_dim() != 1 || !dims.collapsed_slice_dims().empty() || indices_rank == 0 || indices_rank > 2) { return true; } for (auto [index, val] : llvm::enumerate(dims.start_index_map())) { if (index != val) return true; } for (auto [index, val] : llvm::enumerate(dims.offset_dims())) { if (index + 1 != val) return true; } return false; } absl::StatusOr<Value> GetSingleOperandValue( const OperandProvider& operand_provider, const HloInstruction* instr, int operand_index, ValueRange indices) { TF_ASSIGN_OR_RETURN(auto operand, operand_provider(instr, operand_index, indices)); TF_RET_CHECK(operand.size() == 1) << "Expected operand to be a single value."; return operand.front(); } absl::StatusOr<SmallVector<Value, 1>> EmitReduce( const HloInstruction* instr, ValueRange indices, const OperandProvider& operand_provider, const CallTargetProvider& call_target_provider, ImplicitLocOpBuilder& b) { auto* mlir_context = b.getContext(); HloInstructionIndexing indexing = ComputeOutputToInputIndexing(instr, 0, mlir_context); const auto& indexing_map = *indexing.indexing_maps[0].begin(); SmallVector<Value, 1> init_values; for (int i = instr->operand_count() / 2; i < instr->operand_count(); ++i) { TF_ASSIGN_OR_RETURN(init_values.emplace_back(), GetSingleOperandValue(operand_provider, instr, i, {})); } auto body = [&](ValueRange iter_args, ValueRange dim_values, ValueRange symbol_values) -> absl::StatusOr<SmallVector<Value>> { auto indices = ApplyIndexing(indexing_map, dim_values, symbol_values, b); SmallVector<Value, 2> args{iter_args}; for (int i = 0; i < instr->operand_count() / 2; ++i) { TF_ASSIGN_OR_RETURN( args.emplace_back(), GetSingleOperandValue(operand_provider, instr, i, indices)); } auto reducer = call_target_provider( instr->called_computations().front()->root_instruction()); return b.create<mlir::func::CallOp>(reducer, args).getResults(); }; return EmitLoopNestWithStatus(b, indices, init_values, indexing_map, body); } absl::StatusOr<SmallVector<Value, 1>> EmitReduceWindow( const HloInstruction* instr, ValueRange indices, const OperandProvider& operand_provider, const CallTargetProvider& call_target_provider, ImplicitLocOpBuilder& b) { MLIRContext* mlir_context = b.getContext(); HloInstructionIndexing indexing = ComputeOutputToInputIndexing(instr, 0, mlir_context); auto indexing_map = *indexing.indexing_maps[0].begin(); indexing_map.RescaleSymbols(); auto reduce_window = DynCast<HloReduceWindowInstruction>(instr); CHECK(reduce_window != nullptr); SmallVector<Value, 1> init_values; for (auto [index, init_value] : llvm::enumerate(reduce_window->init_values())) { TF_ASSIGN_OR_RETURN( init_values.emplace_back(), GetSingleOperandValue(operand_provider, instr, reduce_window->input_count() + index, {})); } auto body = [&](ValueRange iter_args, ValueRange dim_values, ValueRange symbol_values) -> absl::StatusOr<SmallVector<Value>> { auto indices = ApplyIndexing(indexing_map, dim_values, symbol_values, b); SmallVector<Value, 2> args{iter_args}; for (auto [index, input] : llvm::enumerate(reduce_window->inputs())) { TF_ASSIGN_OR_RETURN( args.emplace_back(), GetSingleOperandValue(operand_provider, instr, index, indices)); } auto reducer = call_target_provider( instr->called_computations().front()->root_instruction()); return b.create<mlir::func::CallOp>(reducer, args).getResults(); }; return EmitLoopNestWithStatus(b, indices, init_values, indexing_map, body); } absl::StatusOr<SmallVector<Value, 1>> EmitConcat( const HloInstruction* instr, ValueRange indices, const OperandProvider& operand_provider, ImplicitLocOpBuilder& b) { auto result_element_type = PrimitiveTypeToMlirType(instr->shape().element_type(), b); int concat_dim = Cast<HloConcatenateInstruction>(instr)->concatenate_dimension(); SmallVector<Value, 3> operand_indices = indices; SmallVector<int64_t, 3> offsets{0}; for (auto* operand : instr->operands()) { offsets.push_back(offsets.back() + operand->shape().dimensions(concat_dim)); } std::function<absl::StatusOr<SmallVector<Value, 1>>(int64_t, int64_t)> generate_concat; generate_concat = [&](int64_t begin, int64_t end) -> absl::StatusOr<SmallVector<Value, 1>> { if (begin == end - 1) { operand_indices[concat_dim] = b.create<arith::SubIOp>( indices[concat_dim], b.create<ConstantIndexOp>(offsets[begin])); TF_ASSIGN_OR_RETURN(auto operand, operand_provider(instr, begin, operand_indices)); return operand; } int64_t mid = (begin + end) / 2; auto if_op = b.create<IfOp>( mlir::TypeRange{result_element_type}, b.create<CmpIOp>(CmpIPredicate::ult, indices[concat_dim], b.create<ConstantIndexOp>(offsets[mid])), true, true); b.setInsertionPointToStart(if_op.getBody(0)); TF_ASSIGN_OR_RETURN(auto left_val, generate_concat(begin, mid)); b.create<YieldOp>(left_val); b.setInsertionPointToStart(if_op.getBody(1)); TF_ASSIGN_OR_RETURN(auto right_val, generate_concat(mid, end)); b.create<YieldOp>(right_val); b.setInsertionPointAfter(if_op); return if_op.getResults(); }; return generate_concat(0, instr->operand_count()); } absl::StatusOr<SmallVector<Value, 1>> EmitDynamicSlice( const HloInstruction* instr, ValueRange indices, const OperandProvider& operand_provider, ImplicitLocOpBuilder& b) { SmallVector<Value, 3> input_indices(indices); const auto& input_shape = instr->operand(0)->shape(); for (int i = 0; i < input_shape.rank(); ++i) { TF_ASSIGN_OR_RETURN( auto offset, GetSingleOperandValue(operand_provider, instr, i + 1, {})); offset = ClampIndex(offset, primitive_util::IsUnsignedIntegralType( instr->operand(i + 1)->shape().element_type()), input_shape.dimensions(i) - instr->shape().dimensions(i), b); input_indices[i] = b.create<arith::AddIOp>(input_indices[i], offset); } return operand_provider(instr, 0, input_indices); } absl::StatusOr<SmallVector<Value, 1>> EmitDynamicUpdateSlice( const HloInstruction* instr, ValueRange indices, const OperandProvider& operand_provider, ImplicitLocOpBuilder& b) { auto result_element_type = PrimitiveTypeToMlirType(instr->shape().element_type(), b); Value is_in_bounds = b.create<ConstantOp>(b.getIntegerAttr(b.getI1Type(), 1)); mlir::SmallVector<Value, 3> update_indices; const auto& updates_shape = instr->operand(1)->shape(); for (int i = 0; i < instr->shape().rank(); ++i) { int64_t update_size = updates_shape.dimensions(i); TF_ASSIGN_OR_RETURN( auto start_index, GetSingleOperandValue(operand_provider, instr, i + 2, {})); start_index = ClampIndex(start_index, primitive_util::IsUnsignedIntegralType( instr->operand(i + 2)->shape().element_type()), instr->shape().dimensions(i) - update_size, b); auto end_index = b.create<arith::AddIOp>( start_index, b.create<ConstantOp>(b.getIndexAttr(update_size))); is_in_bounds = b.create<AndIOp>( is_in_bounds, b.create<CmpIOp>(CmpIPredicate::sge, indices[i], start_index)); is_in_bounds = b.create<AndIOp>( is_in_bounds, b.create<CmpIOp>(CmpIPredicate::slt, indices[i], end_index)); update_indices.push_back(b.create<arith::SubIOp>(indices[i], start_index)); } auto if_op = b.create<IfOp>(mlir::TypeRange{result_element_type}, is_in_bounds, true, true); b.setInsertionPointToStart(if_op.getBody(0)); TF_ASSIGN_OR_RETURN( auto updated_value, GetSingleOperandValue(operand_provider, instr, 1, update_indices)); b.create<YieldOp>(updated_value); b.setInsertionPointToStart(if_op.getBody(1)); TF_ASSIGN_OR_RETURN( auto original_value, GetSingleOperandValue(operand_provider, instr, 0, indices)); b.create<YieldOp>(original_value); b.setInsertionPointAfter(if_op); return if_op.getResults(); } absl::StatusOr<SmallVector<Value, 1>> EmitGather( const HloInstruction* instr, ValueRange indices, const OperandProvider& operand_provider, ImplicitLocOpBuilder& b) { auto row = indices[0]; auto zero = b.create<ConstantIndexOp>(0); SmallVector<Value, 3> operand_indices(instr->operand(0)->shape().rank(), zero); const auto& indices_shape = instr->operand(1)->shape(); int num_indices = indices_shape.rank() == 1 ? 1 : indices_shape.dimensions(1); for (int i = 0; i < num_indices; ++i) { auto i_val = i == 0 ? zero : b.create<ConstantIndexOp>(i); int64_t slice_size = instr->gather_slice_sizes()[i]; int64_t input_size = instr->operand(0)->shape().dimensions()[i]; TF_ASSIGN_OR_RETURN( auto input_index, operand_provider(instr, 1, indices_shape.rank() == 1 ? ValueRange{row} : ValueRange{row, i_val})); TF_RET_CHECK(input_index.size() == 1) << "Expected operand to be a single value."; operand_indices[i] = ClampIndex(input_index.front(), primitive_util::IsUnsignedIntegralType( instr->operand(1)->shape().element_type()), input_size - slice_size, b); } for (int i = 0; i < operand_indices.size(); ++i) { operand_indices[i] = b.createOrFold<arith::AddIOp>(operand_indices[i], indices[i + 1]); } return operand_provider(instr, 0, operand_indices); } SmallVector<SmallVector<Value, 3>, 2> GetInputIndices( const HloInstructionIndexing& indexing, ValueRange output_indices, ImplicitLocOpBuilder& b) { SmallVector<SmallVector<Value, 3>, 2> indices; for (auto& maps : indexing.indexing_maps) { CHECK_EQ(maps.size(), 1); CHECK(!maps.begin()->IsUndefined()); indices.push_back(ApplyIndexing(*maps.begin(), output_indices, {}, b)); } return indices; } absl::StatusOr<SmallVector<Value, 1>> EmitPad( const HloInstruction* instr, ValueRange indices, const OperandProvider& operand_provider, ImplicitLocOpBuilder& b) { auto result_element_type = PrimitiveTypeToMlirType(instr->shape().element_type(), b); auto indexing = ComputeOutputToInputIndexing(instr, 0, b.getContext()); const auto& indexing_map = *indexing.indexing_maps[0].begin(); Value is_in_bounds = CheckConstraints(indexing_map, indices, {}, b); auto if_op = b.create<IfOp>(mlir::TypeRange{result_element_type}, is_in_bounds, true, true); b.setInsertionPointToStart(if_op.getBody(0)); TF_ASSIGN_OR_RETURN(auto input_value, GetSingleOperandValue( operand_provider, instr, 0, GetInputIndices(indexing, indices, b)[0 ])); b.create<YieldOp>(input_value); b.setInsertionPointToStart(if_op.getBody(1)); TF_ASSIGN_OR_RETURN(auto padding_value, GetSingleOperandValue(operand_provider, instr, 1, {})); b.create<YieldOp>(padding_value); b.setInsertionPointAfter(if_op); return {{if_op.getResult(0)}}; } absl::StatusOr<Value> EmitFloatCast(Value value, mlir::Type target_type, ImplicitLocOpBuilder& b) { if (value.getType().getIntOrFloatBitWidth() < target_type.getIntOrFloatBitWidth()) { return b.create<arith::ExtFOp>(target_type, value); } if (value.getType().getIntOrFloatBitWidth() > target_type.getIntOrFloatBitWidth()) { return b.create<arith::TruncFOp>(target_type, value); } return value; } absl::StatusOr<Value> EmitMulAdd(Value lhs, Value rhs, Value accumulator, PrimitiveType result_element_type, mlir::Type accumulator_type, ImplicitLocOpBuilder& b) { if (primitive_util::IsFloatingPointType(result_element_type)) { if (result_element_type == PrimitiveType::BF16) { lhs = b.create<arith::ExtFOp>(b.getF32Type(), lhs); rhs = b.create<arith::ExtFOp>(b.getF32Type(), rhs); } TF_ASSIGN_OR_RETURN( Value casted, EmitFloatCast(b.create<arith::MulFOp>(lhs, rhs), accumulator_type, b)); return b.create<arith::AddFOp>(accumulator, casted); } if (result_element_type == PrimitiveType::PRED) { return b.create<arith::OrIOp>(accumulator, b.create<arith::AndIOp>(lhs, rhs)); } return b.create<arith::AddIOp>(accumulator, b.create<arith::MulIOp>(lhs, rhs)); } absl::StatusOr<SmallVector<Value, 1>> EmitDotLoop( const HloInstruction* instr, ValueRange indices, const OperandProvider& operand_provider, ImplicitLocOpBuilder& b) { auto result_element_type = PrimitiveTypeToMlirType(instr->shape().element_type(), b); HloInstructionIndexing indexing = ComputeOutputToInputIndexing(instr, 0, b.getContext()); const IndexingMap& lhs_indexing_map = *indexing.indexing_maps.at(0).begin(); const IndexingMap& rhs_indexing_map = *indexing.indexing_maps.at(1).begin(); const mlir::Type accumulator_type = result_element_type.isBF16() ? b.getF32Type() : result_element_type; Value accum_init_value = b.create<ConstantOp>(b.getZeroAttr(accumulator_type)).getResult(); size_t rhs_symbol_count = rhs_indexing_map.GetSymbolCount(); auto body = [&](ValueRange iter_args, ValueRange dim_values, ValueRange symbol_values) -> absl::StatusOr<SmallVector<Value>> { auto lhs_indices = ApplyIndexing(lhs_indexing_map, dim_values, symbol_values, b); auto rhs_indices = ApplyIndexing(rhs_indexing_map, dim_values, symbol_values.take_front(rhs_symbol_count), b); TF_ASSIGN_OR_RETURN(Value lhs_value, GetSingleOperandValue( operand_provider, instr, 0, lhs_indices)); TF_ASSIGN_OR_RETURN(Value rhs_value, GetSingleOperandValue( operand_provider, instr, 1, rhs_indices)); Value accum = iter_args[0]; TF_ASSIGN_OR_RETURN( accum, EmitMulAdd(lhs_value, rhs_value, accum, instr->shape().element_type(), accumulator_type, b)); return {{accum}}; }; TF_ASSIGN_OR_RETURN(ValueRange results, EmitLoopNestWithStatus(b, indices, {accum_init_value}, lhs_indexing_map, body)); TF_RET_CHECK(results.size() == 1); if (result_element_type.isBF16()) { return {{b.create<arith::TruncFOp>(b.getBF16Type(), results.front())}}; } return {{results.front()}}; } absl::StatusOr<SmallVector<Value, 1>> EmitDot( const HloInstruction* instr, ValueRange indices, const OperandProvider& operand_provider, ImplicitLocOpBuilder& b) { VLOG(10) << "EmitDot: " << instr->ToString(); if (!algorithm_util::IsSupportedByElementalIrEmitter( instr->precision_config().algorithm())) { return absl::InvalidArgumentError( absl::StrFormat("Algorithm not supported by the ElementalIrEmitter: %s", PrecisionConfig::Algorithm_Name( instr->precision_config().algorithm()))); } auto* dot = DynCast<HloDotInstruction>(instr); TF_RET_CHECK(dot != nullptr); if (dot->sparse_operands()) { return absl::UnimplementedError( "Sparse dot is supported by Triton emitter only."); } return EmitDotLoop(instr, indices, operand_provider, b); } absl::StatusOr<SmallVector<Value, 1>> EmitConvolution( const HloInstruction* instr, ValueRange indices, const OperandProvider& operand_provider, ImplicitLocOpBuilder& b) { VLOG(10) << "EmitConvolution: " << instr->ToString(); return EmitDotLoop(instr, indices, operand_provider, b); } absl::StatusOr<SmallVector<Value, 1>> EmitParameter(const HloInstruction* instr, mlir::func::FuncOp this_fn, ValueRange indices, ImplicitLocOpBuilder& b) { Value value = this_fn.getArgument(instr->parameter_number()); if (mlir::isa<mlir::TensorType>(value.getType())) { value = b.create<mlir::tensor::ExtractOp>(value, indices); } else { TF_RET_CHECK(indices.empty()); } return {{value}}; } template <typename MhloOp, typename... ExtraArgs> SmallVector<Value, 1> MapHloOp(mlir::Type result_type, llvm::ArrayRef<mlir::Type> arg_types, llvm::ArrayRef<Value> args, ImplicitLocOpBuilder& b, ExtraArgs&&... extra_args) { Value result = mhlo::MhloOpToStdScalarOp::mapOpOfType<MhloOp>( b.getLoc(), result_type, arg_types, typename MhloOp::Adaptor(args, std::forward<ExtraArgs>(extra_args)...), &b); if (result.getType().isInteger(1)) { result = b.create<mlir::arith::ExtUIOp>(b.getI8Type(), result); } return {result}; } template <typename MhloOp> SmallVector<Value, 1> MapElementwiseOp(llvm::ArrayRef<mlir::Type> arg_types, llvm::ArrayRef<Value> args, ImplicitLocOpBuilder& b) { return MapHloOp<MhloOp>(args.back().getType(), arg_types, args, b); } } Value UnrealizedConversionCast(mlir::Type type, Value value, ImplicitLocOpBuilder& b) { SmallVector<Value> converted; b.createOrFold<mlir::UnrealizedConversionCastOp>(converted, type, value); return converted.front(); } SmallVector<Value, 2> UnrealizedConversionCast(mlir::TypeRange types, ValueRange values, ImplicitLocOpBuilder& b) { SmallVector<Value, 2> converted; for (auto [type, value] : llvm::zip(types, values)) { converted.push_back(UnrealizedConversionCast(type, value, b)); } return converted; } Value ApplyAffineExpr(mlir::AffineExpr expr, ValueRange dims, ValueRange symbols, ImplicitLocOpBuilder& b) { while (!dims.empty() && !expr.isFunctionOfDim(dims.size() - 1)) { dims = dims.drop_back(); } while (!symbols.empty() && !expr.isFunctionOfSymbol(symbols.size() - 1)) { symbols = symbols.drop_back(); } SmallVector<Value> args(dims); absl::c_copy(symbols, std::back_inserter(args)); return b.createOrFold<mlir::affine::AffineApplyOp>(expr, args); } SmallVector<Value, 3> ApplyIndexing(IndexingMap map, ValueRange dims, ValueRange symbols, ImplicitLocOpBuilder& b) { map.ClearConstraints(); SmallVector<Value, 3> results; for (unsigned int i = 0; i < map.GetAffineMap().getNumResults(); ++i) { SmallVector<Value, 1> result; b.createOrFold<ApplyIndexingOp>(result, dims, symbols, map.GetSubMap(i)); results.append(result); } return results; } Value CheckConstraint(Value constrained_value, Interval range, ImplicitLocOpBuilder& b) { auto lb = b.create<ConstantOp>(b.getIndexAttr(range.lower)); if (range.IsPoint()) { return b.create<CmpIOp>(CmpIPredicate::eq, constrained_value, lb); } auto ub = b.create<ConstantOp>(b.getIndexAttr(range.upper)); return b.create<AndIOp>( b.create<CmpIOp>(CmpIPredicate::sge, constrained_value, lb), b.create<CmpIOp>(CmpIPredicate::sle, constrained_value, ub)); } Value CheckConstraints(const IndexingMap& map, ValueRange dims, ValueRange symbols, ImplicitLocOpBuilder& b) { SmallVector<mlir::AffineExpr, 1> expressions; for (auto&& [expression, _] : map.GetConstraints()) { expressions.push_back(expression); } auto input_map = map.GetAffineMap(); IndexingMap constraints_map{ mlir::AffineMap::get(input_map.getNumDims(), input_map.getNumSymbols(), expressions, input_map.getContext()), map.GetDimVars(), map.GetRangeVars(), map.GetRTVars()}; SmallVector<Value, 1> constraints_values = ApplyIndexing(constraints_map, dims, symbols, b); Value ret = b.create<ConstantOp>(b.getIntegerAttr(b.getI1Type(), 1)); for (auto&& [value, expression_and_range] : llvm::zip(constraints_values, map.GetConstraints())) { ret = b.create<AndIOp>( ret, CheckConstraint(value, expression_and_range.second, b)); } for (auto&& [index, bound] : llvm::enumerate(map.GetDimensionBounds())) { ret = b.create<AndIOp>(ret, CheckConstraint(dims[index], bound, b)); } return ret; } namespace { absl::StatusOr<SmallVector<Value, 1>> EmitTuple( const HloInstruction* instr, ValueRange indices, const OperandProvider& operand_provider, ImplicitLocOpBuilder& builder) { const auto* first_shape = &instr->shape().tuple_shapes(0); while (first_shape->IsTuple()) { first_shape = &first_shape->tuple_shapes(0); } CHECK_EQ(first_shape->rank(), indices.size()) << "Indices for tuple must be for the first tuple element"; SmallVector<Value, 1> operands; for (int i = 0; i < instr->operand_count(); ++i) { SmallVector<Value> operand_indices; const auto* operand = instr->operand(i); const auto* operand_index_shape = &operand->shape(); while (operand_index_shape->IsTuple()) { operand_index_shape = &operand_index_shape->tuple_shapes(0); } if (i > 0 && !ShapeUtil::EqualIgnoringElementType(*first_shape, *operand_index_shape)) { auto operand_map = GetBitcastMap(*first_shape, *operand_index_shape, builder.getContext()); operand_indices = ApplyIndexing(operand_map, indices, {}, builder); } else { operand_indices = indices; } TF_ASSIGN_OR_RETURN(auto values, operand_provider(instr, i, operand_indices)); operands.append(values); } return operands; } absl::StatusOr<SmallVector<Value, 1>> EmitConstant( const HloInstruction* instr, ValueRange indices, ImplicitLocOpBuilder& builder) { mlir::Type result_element_type = PrimitiveTypeToMlirType(instr->shape().element_type(), builder); TF_ASSIGN_OR_RETURN(auto value_attr, CreateDenseElementsAttrFromLiteral( instr->literal(), builder)); if (primitive_util::IsUnsignedIntegralType(instr->shape().element_type())) { value_attr = value_attr.mapValues(result_element_type, [](const llvm::APInt& i) { return i; }); } else if (instr->shape().element_type() == PrimitiveType::PRED) { value_attr = value_attr.mapValues( result_element_type, [](const llvm::APInt& i) { return i.zext(8); }); } if (ShapeUtil::IsEffectiveScalar(instr->shape())) { if (primitive_util::IsComplexType(instr->shape().element_type())) { return {{builder.create<mlir::complex::ConstantOp>( result_element_type, mlir::cast<mlir::ArrayAttr>( value_attr.getValues<mlir::Attribute>()[0]))}}; } auto val = mlir::cast<mlir::TypedAttr>(value_attr.getValues<mlir::Attribute>()[0]); return {{builder.create<ConstantOp>(val).getResult()}}; } auto constant = builder.create<ConstantOp>(value_attr).getResult(); return {{builder.create<mlir::tensor::ExtractOp>(constant, indices)}}; } absl::StatusOr<SmallVector<Value, 2>> GetOperands( const HloInstruction* instr, ValueRange indices, const OperandProvider& operand_provider, ImplicitLocOpBuilder& builder) { SmallVector<Value, 2> operands; bool is_elementwise = HloInstruction::IsOpElementwise(instr->opcode()) || instr->opcode() == HloOpcode::kMap; if (is_elementwise && instr->shape().IsArray()) { int64_t rank = instr->shape().rank(); is_elementwise &= absl::c_all_of(instr->operands(), [&](const HloInstruction* operand) { return operand->shape().rank() == rank; }); } if (is_elementwise) { for (int64_t operand_number = 0; operand_number < instr->operand_count(); ++operand_number) { TF_ASSIGN_OR_RETURN(operands.emplace_back(), GetSingleOperandValue(operand_provider, instr, operand_number, indices)); } } else { auto input_indices = GetInputIndices( ComputeOutputToInputIndexing(instr, 0, builder.getContext()), indices, builder); for (auto&& [operand_number, operand_indices] : llvm::enumerate(input_indices)) { TF_ASSIGN_OR_RETURN( operands.emplace_back(), GetSingleOperandValue(operand_provider, instr, operand_number, operand_indices)); } } CHECK_NE(operands.size(), 0); for (auto [index, operand] : llvm::enumerate(operands)) { TF_RET_CHECK(operand != nullptr) << "null operand at index " << index << " for " << instr->ToShortString(); } return operands; } absl::StatusOr<SmallVector<Value, 1>> EmitConvert( const HloInstruction* instr, llvm::ArrayRef<mlir::Type> arg_types, ValueRange operands, ImplicitLocOpBuilder& builder) { auto element_type = instr->shape().element_type(); auto result_type_with_sign = PrimitiveTypeToMlirTypeWithSign(element_type, builder); mlir::Type result_element_type = PrimitiveTypeToMlirType(instr->shape().element_type(), builder); if (element_type == PRED) { if (mlir::isa<FloatType>(operands[0].getType())) { Value i1 = builder.create<mlir::arith::CmpFOp>( mlir::arith::CmpFPredicate::UNE, operands[0], builder.create<ConstantOp>( builder.getFloatAttr(operands[0].getType(), 0.0))); return {{builder.create<mlir::arith::ExtUIOp>(builder.getI8Type(), i1) .getResult()}}; } if (mlir::isa<IntegerType>(operands[0].getType())) { Value i1 = builder.create<mlir::arith::CmpIOp>( mlir::arith::CmpIPredicate::ne, operands[0], builder.create<mlir::arith::ConstantIntOp>(0, operands[0].getType())); return {{builder.create<mlir::arith::ExtUIOp>(builder.getI8Type(), i1) .getResult()}}; } } auto out = mhlo::MhloOpToStdScalarOp::mapConvertOpToStdScalarOp( builder.getLoc(), result_type_with_sign, result_element_type, arg_types, operands, &builder); if (auto int_ty = mlir::dyn_cast<IntegerType>(out.getType())) { auto in = operands[0]; if (auto float_ty = mlir::dyn_cast<FloatType>(in.getType())) { auto cst_int = [&](int64_t x) { return builder.create<arith::ConstantIntOp>(x, int_ty); }; if (primitive_util::IsUnsignedIntegralType(element_type)) { auto cst_float = [&](uint64_t x) { return builder.create<ConstantOp>(builder.getFloatAttr(float_ty, x)); }; int64_t min = 0; int64_t max = llvm::maxUIntN(int_ty.getWidth()); out = builder.create<mlir::arith::SelectOp>( builder.create<mlir::arith::CmpFOp>(mlir::arith::CmpFPredicate::ULE, in, cst_float(min)), cst_int(min), out); out = builder.create<mlir::arith::SelectOp>( builder.create<mlir::arith::CmpFOp>(mlir::arith::CmpFPredicate::OGE, in, cst_float(max)), cst_int(max), out); } else { auto cst_float = [&](int64_t x) { return builder.create<ConstantOp>(builder.getFloatAttr(float_ty, x)); }; int64_t min = llvm::minIntN(int_ty.getWidth()); int64_t max = llvm::maxIntN(int_ty.getWidth()); out = builder.create<mlir::arith::SelectOp>( builder.create<mlir::arith::CmpFOp>(mlir::arith::CmpFPredicate::OLE, in, cst_float(min)), cst_int(min), out); out = builder.create<mlir::arith::SelectOp>( builder.create<mlir::arith::CmpFOp>(mlir::arith::CmpFPredicate::OGE, in, cst_float(max)), cst_int(max), out); out = builder.create<mlir::arith::SelectOp>( builder.create<mlir::arith::CmpFOp>(mlir::arith::CmpFPredicate::UNO, in, in), cst_int(0), out); } } } return {{out}}; } absl::StatusOr<SmallVector<Value, 1>> EmitIota(const HloInstruction* instr, ValueRange indices, ImplicitLocOpBuilder& builder) { auto element_type = instr->shape().element_type(); auto result_type_with_sign = PrimitiveTypeToMlirTypeWithSign(element_type, builder); auto result_element_type = PrimitiveTypeToMlirType(instr->shape().element_type(), builder); auto index = indices[Cast<HloIotaInstruction>(instr)->iota_dimension()]; auto index_type = builder.getIntegerType( mlir::DataLayout::closest(builder.getInsertionBlock()->getParentOp()) .getTypeSizeInBits(index.getType())); index = builder.create<arith::IndexCastUIOp>(index_type, index); return {{mhlo::MhloOpToStdScalarOp::mapConvertOpToStdScalarOp( builder.getLoc(), result_type_with_sign, result_element_type, {index_type}, {index}, &builder)}}; } absl::StatusOr<SmallVector<Value, 1>> EmitCompare( const HloInstruction* instr, llvm::ArrayRef<mlir::Type> arg_types, ValueRange operands, ImplicitLocOpBuilder& builder) { auto* context = builder.getContext(); auto direction = mhlo::symbolizeComparisonDirection( ComparisonDirectionToString(instr->comparison_direction())); mhlo::CompareOp::Properties properties; properties.comparison_direction = mhlo::ComparisonDirectionAttr::get(context, direction.value()); auto result_types = llvm::to_vector(mlir::TypeRange{builder.getI1Type()}); auto i1 = mhlo::MhloOpToStdScalarOp::mapOpOfType<mhlo::CompareOp>( builder.getLoc(), result_types, arg_types, mhlo::CompareOp::Adaptor(operands, nullptr, properties), &builder); return {{builder.create<mlir::arith::ExtUIOp>(builder.getI8Type(), i1) .getResult()}}; } absl::StatusOr<SmallVector<Value, 1>> EmitReducePrecision( const HloInstruction* instr, llvm::ArrayRef<mlir::Type> arg_types, llvm::ArrayRef<Value> operands, ImplicitLocOpBuilder& builder) { mhlo::ReducePrecisionOp::Properties properties; properties.exponent_bits = builder.getI32IntegerAttr(instr->exponent_bits()); properties.mantissa_bits = builder.getI32IntegerAttr(instr->mantissa_bits()); return MapHloOp<mhlo::ReducePrecisionOp>(operands.front().getType(), arg_types, operands, builder, nullptr, properties); } absl::StatusOr<SmallVector<Value, 1>> HloToMlir( const HloInstruction* instr, mlir::func::FuncOp this_fn, ValueRange indices, const OperandProvider& operand_provider, const CallTargetProvider& call_target_provider, ImplicitLocOpBuilder& builder) { CHECK(!kUnsupportedOps.contains(instr->opcode())) << instr->ToShortString(); auto element_type = instr->shape().element_type(); switch (instr->opcode()) { case HloOpcode::kConcatenate: return EmitConcat(instr, indices, operand_provider, builder); case HloOpcode::kConstant: return EmitConstant(instr, indices, builder); case HloOpcode::kConvolution: return EmitConvolution(instr, indices, operand_provider, builder); case HloOpcode::kDynamicSlice: return EmitDynamicSlice(instr, indices, operand_provider, builder); case HloOpcode::kDynamicUpdateSlice: return EmitDynamicUpdateSlice(instr, indices, operand_provider, builder); case HloOpcode::kGather: return EmitGather(instr, indices, operand_provider, builder); case HloOpcode::kIota: return EmitIota(instr, indices, builder); case HloOpcode::kPad: return EmitPad(instr, indices, operand_provider, builder); case HloOpcode::kDot: return EmitDot(instr, indices, operand_provider, builder); case HloOpcode::kParameter: return EmitParameter(instr, this_fn, indices, builder); case HloOpcode::kReduce: return EmitReduce(instr, indices, operand_provider, call_target_provider, builder); case HloOpcode::kReduceWindow: return EmitReduceWindow(instr, indices, operand_provider, call_target_provider, builder); case HloOpcode::kTuple: return EmitTuple(instr, indices, operand_provider, builder); case HloOpcode::kGetTupleElement: { TF_ASSIGN_OR_RETURN(auto tuple, operand_provider(instr, 0, indices)); return {{tuple[instr->tuple_index()]}}; } default: break; } SmallVector<mlir::Type, 2> arg_types; arg_types.reserve(instr->operands().size()); for (auto operand : instr->operands()) { auto operand_element_type = PrimitiveTypeToMlirTypeWithSign( operand->shape().element_type(), builder); arg_types.push_back(operand_element_type); } TF_ASSIGN_OR_RETURN(auto operands, GetOperands(instr, indices, operand_provider, builder)); switch (instr->opcode()) { case HloOpcode::kAbs: return { MapHloOp<mhlo::AbsOp>(PrimitiveTypeToMlirType(element_type, builder), arg_types, operands, builder)}; case HloOpcode::kAdd: if (element_type == PRED) { return MapElementwiseOp<mhlo::OrOp>(arg_types, operands, builder); } return MapElementwiseOp<mhlo::AddOp>(arg_types, operands, builder); case HloOpcode::kAnd: return MapElementwiseOp<mhlo::AndOp>(arg_types, operands, builder); case HloOpcode::kAtan2: return MapElementwiseOp<mhlo::Atan2Op>(arg_types, operands, builder); case HloOpcode::kCbrt: return MapElementwiseOp<mhlo::CbrtOp>(arg_types, operands, builder); case HloOpcode::kCeil: return MapElementwiseOp<mhlo::CeilOp>(arg_types, operands, builder); case HloOpcode::kClamp: return MapElementwiseOp<mhlo::ClampOp>(arg_types, operands, builder); case HloOpcode::kClz: return MapElementwiseOp<mhlo::ClzOp>(arg_types, operands, builder); case HloOpcode::kCompare: return EmitCompare(instr, arg_types, operands, builder); case HloOpcode::kComplex: return MapHloOp<mhlo::ComplexOp>( PrimitiveTypeToMlirType(element_type, builder), arg_types, operands, builder); case HloOpcode::kCos: return MapElementwiseOp<mhlo::CosineOp>(arg_types, operands, builder); case HloOpcode::kDivide: return MapElementwiseOp<mhlo::DivOp>(arg_types, operands, builder); case HloOpcode::kErf: return MapElementwiseOp<mhlo::ErfOp>(arg_types, operands, builder); case HloOpcode::kExp: return MapElementwiseOp<mhlo::ExpOp>(arg_types, operands, builder); case HloOpcode::kExpm1: return MapElementwiseOp<mhlo::Expm1Op>(arg_types, operands, builder); case HloOpcode::kFloor: return MapElementwiseOp<mhlo::FloorOp>(arg_types, operands, builder); case HloOpcode::kIsFinite: return MapHloOp<mhlo::IsFiniteOp>(builder.getI1Type(), arg_types, operands, builder); case HloOpcode::kImag: return MapHloOp<mhlo::ImagOp>( PrimitiveTypeToMlirType(element_type, builder), arg_types, operands, builder); case HloOpcode::kLog: return MapElementwiseOp<mhlo::LogOp>(arg_types, operands, builder); case HloOpcode::kLog1p: return MapElementwiseOp<mhlo::Log1pOp>(arg_types, operands, builder); case HloOpcode::kLogistic: return MapElementwiseOp<mhlo::LogisticOp>(arg_types, operands, builder); case HloOpcode::kMap: { auto mapper = call_target_provider( instr->called_computations().front()->root_instruction()); return builder.create<PureCallOp>(mapper, operands).getResults(); } case HloOpcode::kMaximum: if (element_type == PRED) { return MapElementwiseOp<mhlo::OrOp>(arg_types, operands, builder); } return MapElementwiseOp<mhlo::MaxOp>(arg_types, operands, builder); case HloOpcode::kMinimum: if (element_type == PRED) { return MapElementwiseOp<mhlo::AndOp>(arg_types, operands, builder); } return MapElementwiseOp<mhlo::MinOp>(arg_types, operands, builder); case HloOpcode::kMultiply: if (element_type == PRED) { return MapElementwiseOp<mhlo::AndOp>(arg_types, operands, builder); } return MapElementwiseOp<mhlo::MulOp>(arg_types, operands, builder); case HloOpcode::kNegate: return MapElementwiseOp<mhlo::NegOp>(arg_types, operands, builder); case HloOpcode::kNot: { if (element_type == PRED) { auto zero = builder.create<mlir::arith::ConstantIntOp>(0, builder.getI8Type()); Value result = builder.create<mlir::arith::ExtUIOp>( builder.getI8Type(), builder.create<mlir::arith::CmpIOp>(mlir::arith::CmpIPredicate::eq, operands[0], zero)); return {{result}}; } return MapElementwiseOp<mhlo::NotOp>(arg_types, operands, builder); } case HloOpcode::kOr: return MapElementwiseOp<mhlo::OrOp>(arg_types, operands, builder); case HloOpcode::kPopulationCount: return MapHloOp<mhlo::PopulationCountOp>( PrimitiveTypeToMlirType(element_type, builder), arg_types, operands, builder); case HloOpcode::kPower: return MapElementwiseOp<mhlo::PowOp>(arg_types, operands, builder); case HloOpcode::kReal: return MapHloOp<mhlo::RealOp>( PrimitiveTypeToMlirType(element_type, builder), arg_types, operands, builder); case HloOpcode::kReducePrecision: return EmitReducePrecision(instr, arg_types, operands, builder); case HloOpcode::kRemainder: return MapElementwiseOp<mhlo::RemOp>(arg_types, operands, builder); case HloOpcode::kRoundNearestAfz: return MapElementwiseOp<mhlo::RoundOp>(arg_types, operands, builder); case HloOpcode::kRoundNearestEven: return MapElementwiseOp<mhlo::RoundNearestEvenOp>(arg_types, operands, builder); case HloOpcode::kRsqrt: return MapElementwiseOp<mhlo::RsqrtOp>(arg_types, operands, builder); case HloOpcode::kSelect: { operands[0] = builder.createOrFold<mlir::arith::TruncIOp>( builder.getI1Type(), operands[0]); return MapElementwiseOp<mhlo::SelectOp>(arg_types, operands, builder); } case HloOpcode::kShiftLeft: return MapElementwiseOp<mhlo::ShiftLeftOp>(arg_types, operands, builder); case HloOpcode::kShiftRightArithmetic: return MapElementwiseOp<mhlo::ShiftRightArithmeticOp>(arg_types, operands, builder); case HloOpcode::kShiftRightLogical: return MapElementwiseOp<mhlo::ShiftRightLogicalOp>(arg_types, operands, builder); case HloOpcode::kSign: return MapElementwiseOp<mhlo::SignOp>(arg_types, operands, builder); case HloOpcode::kSin: return MapElementwiseOp<mhlo::SineOp>(arg_types, operands, builder); case HloOpcode::kSqrt: return MapElementwiseOp<mhlo::SqrtOp>(arg_types, operands, builder); case HloOpcode::kSubtract: return MapElementwiseOp<mhlo::SubtractOp>(arg_types, operands, builder); case HloOpcode::kTan: return MapElementwiseOp<mhlo::TanOp>(arg_types, operands, builder); case HloOpcode::kTanh: return MapElementwiseOp<mhlo::TanhOp>(arg_types, operands, builder); case HloOpcode::kXor: return MapElementwiseOp<mhlo::XorOp>(arg_types, operands, builder); case HloOpcode::kBitcastConvert: return MapHloOp<mhlo::BitcastConvertOp>( PrimitiveTypeToMlirType(element_type, builder), arg_types, operands, builder); case HloOpcode::kConvert: return EmitConvert(instr, arg_types, operands, builder); case HloOpcode::kBitcast: case HloOpcode::kCopy: case HloOpcode::kSlice: case HloOpcode::kBroadcast: case HloOpcode::kReshape: case HloOpcode::kReverse: case HloOpcode::kTranspose: return operands; default: break; } return absl::UnimplementedError(absl::StrCat("Unsupported: ", instr->name())); } } ValueRange ProvideParameter(const PartitionedComputation& computation, const HloInstruction* instr, int operand_index, ValueRange indices, const CallTargetProvider& call_target_provider, mlir::func::FuncOp this_fn, ImplicitLocOpBuilder& builder, const PartitionedComputation::Subgraph* caller) { auto* operand = instr->operand(operand_index); if (!caller) { caller = &computation.FindSubgraph(instr); } const auto& injected_value_starts = caller->injected_value_starts; if (auto it = injected_value_starts.find(operand); it != injected_value_starts.end()) { return ValueRange(this_fn.getArguments()) .take_back(caller->num_injected_values) .slice(it->second, 1); } auto callee = call_target_provider(operand); SmallVector<Value> operands( this_fn.getArguments().take_front(instr->parent()->num_parameters())); absl::c_copy(indices, std::back_inserter(operands)); auto results = builder.create<PureCallOp>(callee, operands).getResults(); auto callee_subgraph = computation.FindSubgraph(operand); if (callee_subgraph.roots.size() == 1) { CHECK_EQ(callee_subgraph.roots.front(), operand) << "Expected " << operand->ToString() << " to be the root of " << callee_subgraph.ToString(); return results; } int offset = 0; for (auto root : callee_subgraph.roots) { int root_arity = root->shape().IsTuple() ? root->shape().tuple_shapes_size() : 1; if (root == operand) { return results.slice(offset, root_arity); } offset += root_arity; } LOG(FATAL) << "Did not find operand " << operand->name() << " in roots of " << callee_subgraph.ToString(); } SmallVector<Value, 2> ProvideParameterRange( const PartitionedComputation& computation, const HloInstruction* instr, int start, int num, ValueRange indices, const CallTargetProvider& call_target_provider, mlir::func::FuncOp this_fn, ImplicitLocOpBuilder& builder) { SmallVector<Value, 2> scalars; scalars.reserve(num); for (int i = 0; i < num; ++i) { ValueRange parameter_value = ProvideParameter(computation, instr, i + start, indices, call_target_provider, this_fn, builder); scalars.append(parameter_value.begin(), parameter_value.end()); } return scalars; } namespace { class SubgraphConverter { public: SubgraphConverter(const PartitionedComputation& computation, const PartitionedComputation::Subgraph& subgraph, mlir::func::FuncOp this_fn, const CallTargetProvider& call_target_provider, ValueRange parameters, ValueRange indices, ImplicitLocOpBuilder& builder) : computation_(computation), subgraph_(subgraph), this_fn_(this_fn), call_target_provider_(call_target_provider), parameters_(parameters), indices_(indices), builder_(builder), provide_operand_fn_( std::bind(std::mem_fn(&SubgraphConverter::ProvideOperand), this, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3)) {} absl::StatusOr<SmallVector<Value>> Convert(); absl::StatusOr<SmallVector<Value>> ProvideOperand(const HloInstruction* instr, int index, ValueRange operand_indices); absl::StatusOr<SmallVector<Value>> EmitInstruction( const HloInstruction* instr, ValueRange indices); absl::StatusOr<SmallVector<Value>> EmitElementwiseInstruction( const HloInstruction* instr, ValueRange indices); private: const PartitionedComputation& computation_; const PartitionedComputation::Subgraph& subgraph_; mlir::func::FuncOp this_fn_; const CallTargetProvider& call_target_provider_; ValueRange parameters_; ValueRange indices_; ImplicitLocOpBuilder& builder_; absl::node_hash_map<std::pair<const HloInstruction*, std::vector<void*>>, SmallVector<Value>> cached_instructions_; OperandProvider provide_operand_fn_; }; absl::StatusOr<SmallVector<Value>> SubgraphConverter::Convert() { SmallVector<Value> results; TF_RET_CHECK(subgraph_.roots.size() == subgraph_.root_indexing.size()) << "roots and root_indexing must have the same size in " << subgraph_.ToString(); for (const auto [root, indexing] : llvm::zip(subgraph_.roots, subgraph_.root_indexing)) { if (auto it = subgraph_.injected_value_starts.find(root); it != subgraph_.injected_value_starts.end()) { auto injected = this_fn_.getArguments().take_back(subgraph_.num_injected_values); int arity = root->shape().IsTuple() ? root->shape().tuple_shapes_size() : 1; absl::c_copy(injected.slice(it->second, arity), std::back_inserter(results)); continue; } int num_dims = indexing.GetAffineMap().getNumDims(); auto root_indices = ApplyIndexing(indexing, indices_.take_front(num_dims), indices_.drop_front(num_dims), builder_); TF_ASSIGN_OR_RETURN(auto root_results, EmitInstruction(root, root_indices)); results.append(root_results.begin(), root_results.end()); } return results; } absl::StatusOr<SmallVector<Value>> SubgraphConverter::ProvideOperand( const HloInstruction* instr, int index, ValueRange operand_indices) { auto* operand = instr->operand(index); if (subgraph_.instructions.contains(operand)) { return EmitInstruction(operand, operand_indices); } return ProvideParameter(computation_, instr, index, operand_indices, call_target_provider_, this_fn_, builder_, &subgraph_); } absl::StatusOr<SmallVector<Value>> SubgraphConverter::EmitInstruction( const HloInstruction* instr, ValueRange indices) { std::vector<void*> indices_ptrs; indices_ptrs.reserve(indices.size()); for (auto index : indices) { indices_ptrs.push_back(index.getAsOpaquePointer()); } auto& entry = cached_instructions_[std::make_pair(instr, indices_ptrs)]; if (!entry.empty()) { auto* entry_block = entry.front().getParentBlock(); auto* insertion_block = builder_.getInsertionBlock(); while (insertion_block != nullptr) { if (insertion_block == entry_block) return entry; if (insertion_block->getParentOp()) { insertion_block = insertion_block->getParentOp()->getBlock(); } else { insertion_block = nullptr; VLOG(2) << "Failed dominance check while looking up cache for " << instr->ToShortString() << ". This is a bug in the computation partitioner."; } } } if (HloInstruction::IsOpElementwise(instr->opcode())) { return EmitElementwiseInstruction(instr, indices); } TF_ASSIGN_OR_RETURN(entry, HloToMlir(instr, this_fn_, indices, provide_operand_fn_, call_target_provider_, builder_)); CHECK(!absl::c_linear_search(entry, nullptr)) << "Failed to lower " << instr->name(); return entry; } absl::StatusOr<SmallVector<Value>> SubgraphConverter::EmitElementwiseInstruction(const HloInstruction* root, ValueRange indices) { std::vector<void*> indices_ptrs; indices_ptrs.reserve(indices.size()); for (auto index : indices) { indices_ptrs.push_back(index.getAsOpaquePointer()); } std::queue<const HloInstruction*> worklist; absl::flat_hash_set<const HloInstruction*> visited; worklist.push(root); SmallVector<const HloInstruction*> pre_order; while (!worklist.empty()) { const HloInstruction* instr = worklist.front(); worklist.pop(); pre_order.push_back(instr); if (HloInstruction::IsOpElementwise(instr->opcode())) { for (int i = instr->operand_count() - 1; i >= 0; --i) { auto* operand = instr->operand(i); if (subgraph_.instructions.contains(operand) && !cached_instructions_.contains({operand, indices_ptrs}) && visited.insert(operand).second) { worklist.push(operand); } } } } for (auto* instr : llvm::reverse(pre_order)) { auto& entry = cached_instructions_[{instr, indices_ptrs}]; TF_ASSIGN_OR_RETURN(entry, HloToMlir(instr, this_fn_, indices, provide_operand_fn_, call_target_provider_, builder_)); } return cached_instructions_[{root, indices_ptrs}]; } absl::StatusOr<SmallVector<Value>> SubgraphToMlir( const PartitionedComputation& computation, const PartitionedComputation::Subgraph& subgraph, mlir::func::FuncOp this_fn, const CallTargetProvider& call_target_provider, ValueRange parameters, ValueRange indices, ImplicitLocOpBuilder& builder) { return SubgraphConverter(computation, subgraph, this_fn, call_target_provider, parameters, indices, builder) .Convert(); } } void GetLoopBoundsFromIndexingMap(ImplicitLocOpBuilder& b, const IndexingMap& indexing_map, SmallVectorImpl<Value>* lbs, SmallVectorImpl<Value>* ubs, SmallVectorImpl<Value>* steps) { Value c1 = b.create<ConstantIndexOp>(1); for (const Interval& bound : indexing_map.GetSymbolBounds()) { lbs->push_back(b.create<ConstantIndexOp>(bound.lower)); ubs->push_back(b.create<ConstantIndexOp>(bound.upper + 1)); steps->push_back(c1); } } absl::Status SubgraphToMlirFunction( const PartitionedComputation& computation, const PartitionedComputation::Subgraph& subgraph, mlir::func::FuncOp& func, const CallTargetProvider& call_target_provider) { TF_RET_CHECK(func != nullptr); ImplicitLocOpBuilder builder(func.getLoc(), func->getContext()); builder.setInsertionPointToStart(func.addEntryBlock()); auto parameters = func.getArguments().take_front( computation.computation().num_parameters()); auto indices_and_injected_values = func.getArguments().drop_front( computation.computation().num_parameters()); auto indices = indices_and_injected_values.drop_back(subgraph.num_injected_values); TF_ASSIGN_OR_RETURN( auto results, SubgraphToMlir(computation, subgraph, func, call_target_provider, parameters, indices, builder)); CHECK_EQ(results.size(), func.getResultTypes().size()); for (auto& result : results) { if (result.getType().isInteger(1)) { result = builder.create<mlir::arith::ExtUIOp>(builder.getI8Type(), result); } } builder.create<mlir::func::ReturnOp>(results); return absl::OkStatus(); } namespace { ValueRange EmitLoopNestImpl( ImplicitLocOpBuilder& b, ValueRange dim_values, ValueRange iter_args_inits, const IndexingMap& indexing_map, mlir::function_ref<SmallVector<Value>(ValueRange , ValueRange , ValueRange )> create_body, bool vectorize) { SmallVector<Value, 4> lbs, ubs, steps; GetLoopBoundsFromIndexingMap(b, indexing_map, &lbs, &ubs, &steps); SmallVector<Value, 4> vector_inits; if (vectorize) { CHECK_EQ(indexing_map.GetSymbolBounds().back().lower, 0); int vector_size = indexing_map.GetSymbolBounds().back().upper + 1; vector_inits = iter_args_inits; for (auto& init : vector_inits) { if (!mlir::isa<mlir::ShapedType>(init.getType())) { auto vector_ty = mlir::VectorType::get({vector_size}, init.getType()); init = b.create<mlir::vector::SplatOp>(vector_ty, init); } } iter_args_inits = vector_inits; } auto bb = [&](OpBuilder& nested_builder, Location loc, ValueRange symbol_values, ValueRange iter_args) -> scf::ValueVector { ImplicitLocOpBuilder nested_b(loc, nested_builder); auto is_in_bounds = mlir_converter::CheckConstraints( indexing_map, dim_values, symbol_values, nested_b); auto if_op = nested_b.create<scf::IfOp>( is_in_bounds, [&](OpBuilder& then_builder, Location then_loc) -> void { OpBuilder::InsertionGuard g(b); b.setInsertionPointToStart(then_builder.getInsertionBlock()); SmallVector<Value, 4> results; if (vectorize) { SmallVector<Value, 4> vector_args; vector_args = iter_args; for (auto& init : vector_args) { if (mlir::isa<mlir::VectorType>(init.getType())) { init = b.create<mlir::vector::ExtractOp>(init, symbol_values.back()); } } results = create_body(vector_args, dim_values, symbol_values); for (auto [index, init] : llvm::enumerate(iter_args)) { if (mlir::isa<mlir::VectorType>(init.getType())) { results[index] = b.create<mlir::vector::InsertOp>( results[index], iter_args[index], symbol_values.back()); } } } else { results = create_body(iter_args, dim_values, symbol_values); } b.create<scf::YieldOp>(results); }, [&](OpBuilder& else_b, Location else_loc) { OpBuilder::InsertionGuard g(b); b.setInsertionPointToStart(else_b.getInsertionBlock()); b.create<scf::YieldOp>(iter_args); }); return if_op.getResults(); }; scf::LoopNest loop_nest = scf::buildLoopNest(b, b.getLoc(), lbs, ubs, steps, iter_args_inits, bb); if (loop_nest.results.empty()) { return {}; } ValueRange result_range = loop_nest.results.front().getDefiningOp()->getResults(); CHECK_EQ(result_range.size(), loop_nest.results.size()) << "buildLoopNest did not return the results of the root loop?"; return result_range; } } ValueRange EmitXlaLoopOp(ImplicitLocOpBuilder& b, ValueRange dim_values, ValueRange iter_args_inits, const IndexingMap& indexing_map, mlir::function_ref<SmallVector<Value>( ValueRange , ValueRange , ValueRange )> create_body, bool vectorize) { SmallVector<Value, 4> vector_inits; if (vectorize) { CHECK_EQ(indexing_map.GetSymbolBounds().back().lower, 0); int vector_size = indexing_map.GetSymbolBounds().back().upper + 1; vector_inits = iter_args_inits; for (auto& init : vector_inits) { if (!mlir::isa<mlir::ShapedType>(init.getType())) { auto vector_ty = mlir::VectorType::get({vector_size}, init.getType()); init = b.create<mlir::vector::SplatOp>(vector_ty, init); } } iter_args_inits = vector_inits; } auto bb = [&](OpBuilder& nested_builder, Location loc, ValueRange ivs, ValueRange map_results, ValueRange iter_args) { SmallVector<Value, 4> results; if (vectorize) { SmallVector<Value, 4> vector_args; vector_args = iter_args; for (auto& init : vector_args) { if (mlir::isa<mlir::VectorType>(init.getType())) { init = nested_builder.create<mlir::vector::ExtractOp>(loc, init, ivs.back()); } } results = create_body(ivs, map_results, vector_args); for (auto [index, init] : llvm::enumerate(iter_args)) { if (mlir::isa<mlir::VectorType>(init.getType())) { results[index] = nested_builder.create<mlir::vector::InsertOp>( loc, results[index], iter_args[index], ivs.back()); } } } else { results = create_body(ivs, map_results, iter_args); } nested_builder.create<xla::gpu::YieldOp>(loc, results); }; return b.create<LoopOp>(indexing_map, dim_values, iter_args_inits, bb) .getResults(); } ValueRange EmitLoopNest(ImplicitLocOpBuilder& b, ValueRange dim_values, ValueRange iter_args_inits, const IndexingMap& indexing_map, mlir::function_ref<SmallVector<Value>( ValueRange , ValueRange , ValueRange )> create_body, bool vectorize) { int64_t cumulative_loop_size = 1; int last_peelable_symbol = indexing_map.GetSymbolCount() - 1 - (vectorize ? 1 : 0); for (int sym_index = last_peelable_symbol; sym_index >= 0 && cumulative_loop_size < 64; --sym_index) { auto& bound = indexing_map.GetSymbolBound(sym_index); cumulative_loop_size *= bound.GetLoopTripCount(); if (!indexing_map.IsSymbolConstrained(sym_index)) continue; IndexingMap peeled_map = indexing_map; if (bound.upper == bound.lower) continue; --peeled_map.GetMutableSymbolBound(sym_index).upper; peeled_map.Simplify(); if (peeled_map.IsSymbolConstrained(sym_index)) continue; auto first_results = EmitLoopNestImpl(b, dim_values, iter_args_inits, peeled_map, create_body, vectorize); IndexingMap remainder = indexing_map; remainder.GetMutableSymbolBound(sym_index).lower = bound.upper; remainder.Simplify(); VLOG(5) << "Peeled indexing map " << indexing_map << "\n into " << peeled_map << "\nand remainder\n" << remainder; return EmitLoopNestImpl(b, dim_values, first_results, remainder, create_body, vectorize); } return EmitLoopNestImpl(b, dim_values, iter_args_inits, indexing_map, create_body, vectorize); } absl::StatusOr<ValueRange> EmitLoopNestWithStatus( ImplicitLocOpBuilder& b, ValueRange dim_values, ValueRange iter_args_inits, const IndexingMap& indexing_map, mlir::function_ref<absl::StatusOr<SmallVector<Value>>( ValueRange , ValueRange , ValueRange )> create_body) { absl::Status status = absl::OkStatus(); auto result = EmitLoopNest( b, dim_values, iter_args_inits, indexing_map, [&](ValueRange iter_args, ValueRange dim_values, ValueRange symbol_values) -> SmallVector<Value> { auto body_result = create_body(iter_args, dim_values, symbol_values); if (!body_result.ok()) { status = std::move(body_result.status()); return ValueRange{}; } return std::move(body_result.value()); }); if (!status.ok()) { return status; } return result; } Value ClampIndex(Value index, bool is_unsigned, int64_t high, ImplicitLocOpBuilder& b) { auto zero = b.create<ConstantOp>(b.getIndexAttr(0)); if (high <= 0) { return zero; } if (is_unsigned) { if (index.getType() != b.getIndexType()) { index = b.create<arith::IndexCastUIOp>(b.getIndexType(), index); } index = b.create<arith::MinUIOp>( index, b.create<ConstantOp>(b.getIndexAttr(high))); } else { if (index.getType() != b.getIndexType()) { index = b.create<arith::IndexCastOp>(b.getIndexType(), index); } index = b.create<arith::MinSIOp>( index, b.create<ConstantOp>(b.getIndexAttr(high))); index = b.create<arith::MaxSIOp>(index, zero); } return index; } SmallVector<Value, 2> InlineBlock(OpBuilder& builder, Block& src_block, ValueRange mapped_args) { IRMapping mapping; for (auto [from, to] : llvm::zip(src_block.getArguments(), mapped_args)) { mapping.map(from, to); } for (auto& op : src_block.without_terminator()) { builder.clone(op, mapping); } auto* terminator = src_block.getTerminator(); SmallVector<Value, 2> mapped_results; mapped_results.reserve(terminator->getResults().size()); for (Value result : src_block.getTerminator()->getOperands()) { mapped_results.push_back(mapping.lookup(result)); } return mapped_results; } } } }
#include "xla/service/gpu/fusions/mlir/elemental_hlo_to_mlir.h" #include <functional> #include <string> #include <vector> #include <gtest/gtest.h> #include "absl/status/status.h" #include "llvm/Support/raw_ostream.h" #include "mlir/AsmParser/AsmParser.h" #include "mlir/Dialect/Affine/IR/AffineOps.h" #include "mlir/Dialect/Arith/IR/Arith.h" #include "mlir/Dialect/DLTI/DLTI.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/LLVMIR/LLVMDialect.h" #include "mlir/Dialect/Math/IR/Math.h" #include "mlir/Dialect/SCF/IR/SCF.h" #include "mlir/Dialect/Tensor/IR/Tensor.h" #include "mlir/IR/AffineExpr.h" #include "mlir/IR/ImplicitLocOpBuilder.h" #include "mlir/IR/Location.h" #include "mlir/IR/MLIRContext.h" #include "mlir/Pass/PassManager.h" #include "mlir/Transforms/Passes.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/mlir_hlo/mhlo/IR/hlo_ops.h" #include "xla/service/gpu/fusions/ir/xla_gpu_ops.h" #include "xla/service/gpu/fusions/mlir/computation_partitioner.h" #include "xla/service/gpu/model/indexing_map.h" #include "xla/service/hlo_parser.h" #include "xla/service/llvm_ir/llvm_util.h" #include "xla/status_macros.h" #include "xla/tests/filecheck.h" #include "xla/tests/hlo_test_base.h" #include "xla/tsl/lib/core/status_test_util.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace mlir_converter { namespace { class ElementalHloToMlirTest : public HloTestBase { public: ElementalHloToMlirTest() { context_.loadDialect<mlir::tensor::TensorDialect, mlir::func::FuncDialect, mlir::affine::AffineDialect, mlir::arith::ArithDialect, mlir::math::MathDialect, mlir::scf::SCFDialect, mlir::mhlo::MhloDialect, mlir::LLVM::LLVMDialect, mlir::DLTIDialect, xla::gpu::XlaGpuDialect>(); } absl::Status Run(const std::string& hlo, const std::string& filecheck_str, std::function<EpilogueSpecification(HloComputation* entry)> epilogue_spec_fn = nullptr) { auto hlo_module = ParseAndReturnVerifiedModule(hlo).value(); mlir::ImplicitLocOpBuilder builder(mlir::UnknownLoc::get(&context_), &context_); auto module = llvm_ir::CreateMlirModuleOp(builder.getLoc()); (*module)->setAttr( mlir::DLTIDialect::kDataLayoutAttrName, mlir::parseAttribute("#dlti.dl_spec<#dlti.dl_entry<index,32:i32>>", builder.getContext())); builder.setInsertionPointToStart(module->getBody()); auto* entry_computation = hlo_module->entry_computation(); std::vector<EpilogueSpecification> epilogue_spec; if (epilogue_spec_fn) { epilogue_spec.push_back(epilogue_spec_fn(entry_computation)); } PartitionedComputations partitioned_computations(entry_computation, &context_, epilogue_spec); auto fns = partitioned_computations.DeclareFunctions(module.get()); auto entry_func = fns[&partitioned_computations .FindPartitionedComputation(entry_computation) .GetRootSubgraph()]; auto& entry_pc = partitioned_computations.FindPartitionedComputation(entry_computation); auto call_targets = partitioned_computations.CreateCallTargetProvider(fns); TF_RETURN_IF_ERROR(SubgraphToMlirFunction( entry_pc, entry_pc.GetRootSubgraph(), entry_func, call_targets)); if (!partitioned_computations.epilogues().empty()) { const auto& epilogue = partitioned_computations.epilogues().front(); TF_RETURN_IF_ERROR(SubgraphToMlirFunction(entry_pc, epilogue, fns[&epilogue], call_targets)); } mlir::PassManager pm(&context_); pm.addPass(mlir::createCanonicalizerPass()); pm.addPass(mlir::createCSEPass()); TF_RET_CHECK(pm.run(module.get()).succeeded()); std::string out; llvm::raw_string_ostream stream(out); stream << module.get(); TF_ASSIGN_OR_RETURN(auto filecheck_result, RunFileCheck(out, filecheck_str)); TF_RET_CHECK(filecheck_result); return absl::OkStatus(); } mlir::MLIRContext context_; }; TEST_F(ElementalHloToMlirTest, Reduce) { TF_EXPECT_OK(Run(R"( add { p0 = f32[] parameter(0) p1 = f32[] parameter(1) ROOT sum = f32[] add(p0, p1) } ENTRY main { p0 = f32[10,20,30,40] parameter(0) p1 = f32[] parameter(1) ROOT r = f32[10,30] reduce(p0, p1), dimensions={1,3}, to_apply=add })", R"( )")); } TEST_F(ElementalHloToMlirTest, ReduceUnsigned) { TF_EXPECT_OK(Run(R"( add { p0 = u32[] parameter(0) p1 = u32[] parameter(1) ROOT sum = u32[] add(p0, p1) } ENTRY main { p0 = u32[10,20,30,40] parameter(0) p1 = u32[] parameter(1) ROOT r = u32[10,30] reduce(p0, p1), dimensions={1,3}, to_apply=add })", R"( )")); } TEST_F(ElementalHloToMlirTest, ReduceWindow) { TF_EXPECT_OK(Run(R"( add { p0 = f32[] parameter(0) p1 = f32[] parameter(1) ROOT sum = f32[] add(p0, p1) } ENTRY main { p0 = f32[42,12,8] parameter(0) p1 = f32[] parameter(1) ROOT r = f32[42,3,8] reduce-window(p0, p1), window={ size=1x1x7 stride=1x4x1 pad=0_0x0_0x3_3 }, to_apply=add })", R"( )")); } TEST_F(ElementalHloToMlirTest, ReduceWindowWithRescaling) { TF_EXPECT_OK(Run(R"( add { p0 = f32[] parameter(0) p1 = f32[] parameter(1) ROOT sum = f32[] add(p0, p1) } ENTRY main { p0 = f32[42,12,8] parameter(0) p1 = f32[] parameter(1) ROOT r = f32[19,12,8] reduce-window(p0, p1), window={ size=8x1x1 stride=4x1x1 pad=0_0x0_0x0_0 lhs_dilate=2x1x1 }, to_apply=add })", R"( )")); } TEST_F(ElementalHloToMlirTest, Concatenate) { TF_EXPECT_OK(Run(R"( ENTRY main { p0 = f32[10,20,30] parameter(0) p1 = f32[10,15,30] parameter(1) p2 = f32[10,3,30] parameter(2) ROOT r = f32[10,38,30] concatenate(p0, p1, p2), dimensions={1} })", R"( )")); } TEST_F(ElementalHloToMlirTest, ConcatenateMany) { TF_EXPECT_OK(Run(R"( ENTRY main { p0 = f32[10,1,30] parameter(0) p1 = f32[10,2,30] parameter(1) p2 = f32[10,3,30] parameter(2) p3 = f32[10,4,30] parameter(3) p4 = f32[10,5,30] parameter(4) p5 = f32[10,6,30] parameter(5) p6 = f32[10,7,30] parameter(6) ROOT r = f32[10,28,30] concatenate(p0, p1, p2, p3, p4, p5, p6), dimensions={1} })", R"( )")); } TEST_F(ElementalHloToMlirTest, ConcatenateUnsigned) { TF_EXPECT_OK(Run(R"( ENTRY main { p0 = u32[10,20,30] parameter(0) p1 = u32[10,15,30] parameter(1) ROOT r = u32[10,35,30] concatenate(p0, p1), dimensions={1} })", R"( )")); } TEST_F(ElementalHloToMlirTest, Gather) { TF_EXPECT_OK(Run(R"( ENTRY main { operand = f32[33,34] parameter(0) indices = s32[1806,1] parameter(1) ROOT r = f32[1806,7,8] gather(operand, indices), offset_dims={1,2}, collapsed_slice_dims={}, start_index_map={0}, index_vector_dim=1, slice_sizes={7,8} })", R"( )")); } TEST_F(ElementalHloToMlirTest, GatherWithImplicitVectorDim) { TF_EXPECT_OK(Run(R"( ENTRY main { operand = f32[33,34] parameter(0) indices = s32[1806] parameter(1) ROOT r = f32[1806,7,8] gather(operand, indices), offset_dims={1,2}, collapsed_slice_dims={}, start_index_map={0}, index_vector_dim=1, slice_sizes={7,8} })", R"( )")); } TEST_F(ElementalHloToMlirTest, Pad) { TF_EXPECT_OK(Run(R"( ENTRY main { p0 = f32[4, 4] parameter(0) p1 = f32[] parameter(1) ROOT pad = f32[12, 16] pad(p0, p1), padding=1_4_1x4_8_0 })", R"( )")); } TEST_F(ElementalHloToMlirTest, PadUnsigned) { TF_EXPECT_OK(Run(R"( ENTRY main { p0 = u32[4, 4] parameter(0) p1 = u32[] parameter(1) ROOT pad = u32[12, 16] pad(p0, p1), padding=1_4_1x4_8_0 })", R"( )")); } TEST_F(ElementalHloToMlirTest, DotWithF32Type) { TF_EXPECT_OK(Run(R"( ENTRY main { p0 = f32[3, 4] parameter(0) p1 = f32[4, 5] parameter(1) ROOT dot = f32[3, 5] dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} })", R"( )")); } TEST_F(ElementalHloToMlirTest, DotWithBF16Type) { TF_EXPECT_OK(Run(R"( ENTRY main { p0 = bf16[3, 4] parameter(0) p1 = bf16[4, 5] parameter(1) ROOT dot = bf16[3, 5] dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} })", R"( )")); } TEST_F(ElementalHloToMlirTest, DotWithS32Type) { TF_EXPECT_OK(Run(R"( ENTRY main { p0 = s32[3, 4] parameter(0) p1 = s32[4, 5] parameter(1) ROOT dot = s32[3, 5] dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} })", R"( )")); } TEST_F(ElementalHloToMlirTest, DotWithU32Type) { TF_EXPECT_OK(Run(R"( ENTRY main { p0 = u32[3, 4] parameter(0) p1 = u32[4, 5] parameter(1) ROOT dot = u32[3, 5] dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} })", R"( )")); } TEST_F(ElementalHloToMlirTest, DotWithPredType) { TF_EXPECT_OK(Run(R"( ENTRY main { p0 = pred[3, 4] parameter(0) p1 = pred[4, 5] parameter(1) ROOT dot = pred[3, 5] dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} })", R"( )")); } TEST_F(ElementalHloToMlirTest, DotWithBatchAnd2ContractingDims) { TF_EXPECT_OK(Run(R"( ENTRY main { p0 = f32[7, 3, 4, 5] parameter(0) p1 = f32[5, 6, 4, 7] parameter(1) ROOT dot = f32[7, 3, 6] dot(p0, p1), lhs_contracting_dims={2, 3}, rhs_contracting_dims={2, 0}, lhs_batch_dims={0}, rhs_batch_dims={3} })", R"( )")); } TEST_F(ElementalHloToMlirTest, ConvolutionSimple) { TF_EXPECT_OK(Run(R"( ENTRY main { p0 = f32[2,8,12,4] parameter(0) p1 = f32[4,3,5,16] parameter(1) ROOT conv = f32[2,6,8,16] convolution(p0, p1), window={size=3x5 pad=0_0x0_0}, dim_labels=b01f_i01o->b01f })", R"( )")); } TEST_F(ElementalHloToMlirTest, ConvolutionWithWindowStrides) { TF_EXPECT_OK(Run(R"( ENTRY main { p0 = f32[2,8,12,4] parameter(0) p1 = f32[4,3,5,16] parameter(1) ROOT conv = f32[2,3,4,16] convolution(p0, p1), window={size=3x5 stride=2x2 pad=0_0x0_0}, dim_labels=b01f_i01o->b01f })", R"( )")); } TEST_F(ElementalHloToMlirTest, ConvolutionWithPadding) { TF_EXPECT_OK(Run(R"( ENTRY main { p0 = f32[2,8,12,4] parameter(0) p1 = f32[4,3,5,16] parameter(1) ROOT conv = f32[2,8,12,16] convolution(p0, p1), window={size=3x5 pad=1_1x2_2}, dim_labels=b01f_i01o->b01f })", R"( )")); } TEST_F(ElementalHloToMlirTest, ConvolutionWithLhsDilation) { TF_EXPECT_OK(Run(R"( ENTRY main { p0 = f32[2,8,12,4] parameter(0) p1 = f32[4,3,5,16] parameter(1) ROOT conv = f32[2,13,19,16] convolution(p0, p1), window={size=3x5 pad=0_0x0_0 lhs_dilate=2x2}, dim_labels=b01f_i01o->b01f })", R"( )")); } TEST_F(ElementalHloToMlirTest, ConvolutionWithRhsDilation) { TF_EXPECT_OK(Run(R"( ENTRY main { p0 = f32[2,8,12,4] parameter(0) p1 = f32[4,3,5,16] parameter(1) ROOT conv = f32[2,4,4,16] convolution(p0, p1), window={size=3x5 pad=0_0x0_0 rhs_dilate=2x2}, dim_labels=b01f_i01o->b01f })", R"( )")); } TEST_F(ElementalHloToMlirTest, ConvolutionWithFeatureGroupCount) { TF_EXPECT_OK(Run(R"( ENTRY main { p0 = f32[2,8,12,4] parameter(0) p1 = f32[2,3,5,16] parameter(1) ROOT conv = f32[2,6,8,16] convolution(p0, p1), window={size=3x5 pad=0_0x0_0}, dim_labels=b01f_i01o->b01f, feature_group_count=2 })", R"( )")); } TEST_F(ElementalHloToMlirTest, ConvolutionWithBatchGroupCount) { TF_EXPECT_OK(Run(R"( ENTRY main { p0 = f32[2,8,12,4] parameter(0) p1 = f32[4,3,5,16] parameter(1) ROOT conv = f32[1,6,8,16] convolution(p0, p1), window={size=3x5 pad=0_0x0_0}, dim_labels=b01f_i01o->b01f, batch_group_count=2 })", R"( )")); } TEST_F(ElementalHloToMlirTest, Transpose) { TF_EXPECT_OK(Run(R"( ENTRY main { p0 = f32[4,5,6] parameter(0) ROOT transpose = f32[6,5,4] transpose(p0), dimensions={2,1,0} })", R"( )")); } TEST_F(ElementalHloToMlirTest, Broadcast) { TF_EXPECT_OK(Run(R"( ENTRY main { p0 = f32[4,5] parameter(0) ROOT broadcast = f32[6,4,5] broadcast(p0), dimensions={1,2} })", R"( )")); } TEST_F(ElementalHloToMlirTest, Add) { TF_EXPECT_OK(Run(R"( ENTRY main { p0 = f32[4] parameter(0) p1 = f32[4] parameter(1) ROOT add = f32[4] add(p0, p1) })", R"( )")); } TEST_F(ElementalHloToMlirTest, Complex) { TF_EXPECT_OK(Run(R"( ENTRY main { p0 = f32[4] parameter(0) p1 = f32[4] parameter(1) ROOT add = c64[4] complex(p0, p1) })", R"( )")); } TEST_F(ElementalHloToMlirTest, ComplexAbs) { TF_EXPECT_OK(Run(R"( ENTRY main { p0 = c64[4] parameter(0) ROOT abs = f32[4] abs(p0) })", R"( )")); } TEST_F(ElementalHloToMlirTest, UnsignedDiv) { TF_EXPECT_OK(Run(R"( ENTRY main { p0 = u32[4] parameter(0) p1 = u32[4] parameter(1) ROOT div = u32[4] divide(p0, p1) })", R"( )")); } TEST_F(ElementalHloToMlirTest, ConvertToUnsigned) { TF_EXPECT_OK(Run(R"( ENTRY main { p0 = f32[4] parameter(0) ROOT convert = u32[4] convert(p0) })", R"( )")); } TEST_F(ElementalHloToMlirTest, ConvertS8ToPred) { TF_EXPECT_OK(Run(R"( ENTRY main { p0 = s8[4] parameter(0) ROOT convert = pred[4] convert(p0) })", R"( )")); } TEST_F(ElementalHloToMlirTest, ConvertToUnsigned64Saturation) { TF_EXPECT_OK(Run(R"( ENTRY main { p0 = f32[4] parameter(0) ROOT convert = u64[4] convert(p0) })", R"( )")); } TEST_F(ElementalHloToMlirTest, PopulationCountUnsigned) { TF_EXPECT_OK(Run(R"( ENTRY main{ p0 = u32[10,1,4]{2,1,0} parameter(0) ROOT popcnt = u32[10,1,4]{2,1,0} popcnt(p0) })", R"( )")); } TEST_F(ElementalHloToMlirTest, Epilogue) { TF_EXPECT_OK(Run( R"( ENTRY main { %p0 = f32[2,16,17] parameter(0) %log = f32[2,16,17] log(%p0) %transpose = f32[2,17,16] transpose(%log), dimensions={0,2,1} %p1 = f32[] parameter(1) %bc = f32[2,17,16] broadcast(%p1), dimensions={} ROOT %add = f32[2,17,16] add(%transpose, %bc) })", R"( [this](HloComputation* entry) { EpilogueSpecification epilogue; epilogue.heroes.push_back(entry->GetInstructionWithName("transpose")); epilogue.roots.push_back(entry->GetInstructionWithName("add")); epilogue.index_ranges = {2, 16, 17}; epilogue.root_indexing.push_back( IndexingMap{mlir::AffineMap::getMultiDimIdentityMap(3, &context_) .getSubMap({0, 2, 1}), DimVarsFromTensorSizes({2, 17, 17}), {}, {}}); return epilogue; })); } TEST_F(ElementalHloToMlirTest, ScalarConstant) { TF_EXPECT_OK(Run(R"( ENTRY main { p0 = f32[1,1] parameter(0) c1 = f32[1,1] constant({{1.0}}) ROOT add = f32[1,1] add(p0, c1) })", R"( })")); } TEST_F(ElementalHloToMlirTest, ScalarUnsignedConstant) { TF_EXPECT_OK(Run(R"( ENTRY main { p0 = u32[1,1] parameter(0) c1 = u32[1,1] constant({{1}}) ROOT add = u32[1,1] add(p0, c1) })", R"( })")); } TEST_F(ElementalHloToMlirTest, ScalarComplexConstant) { TF_EXPECT_OK(Run(R"( ENTRY main { p0 = c64[] parameter(0) c1 = c64[] constant((1.0, 0.0)) ROOT add = c64[] add(p0, c1) })", R"( })")); } TEST_F(ElementalHloToMlirTest, TensorConstant) { TF_EXPECT_OK(Run(R"( ENTRY main { p0 = f32[2,1] parameter(0) c1 = f32[2,1] constant({{1.0}, {2.0}}) ROOT add = f32[2,1] add(p0, c1) })", R"( })")); } TEST_F(ElementalHloToMlirTest, TensorConstantPred) { TF_EXPECT_OK(Run( R"( ENTRY main { ROOT c1 = pred[2] constant({1, 0}) })", " } TEST_F(ElementalHloToMlirTest, DynamicSlice) { TF_EXPECT_OK(Run(R"( ENTRY main { in = f32[20,30] parameter(0) i0 = s32[] parameter(1) i1 = s32[] parameter(2) ROOT slice = f32[4,5] dynamic-slice(in, i0, i1), dynamic_slice_sizes={4,5} })", R"( )")); } TEST_F(ElementalHloToMlirTest, DynamicSliceUnsignedIndices) { TF_EXPECT_OK(Run(R"( ENTRY main { in = f32[20,30] parameter(0) i0 = u32[] parameter(1) i1 = u32[] parameter(2) ROOT slice = f32[4,5] dynamic-slice(in, i0, i1), dynamic_slice_sizes={4,5} })", R"( )")); } TEST_F(ElementalHloToMlirTest, DynamicUpdateSlice) { TF_EXPECT_OK(Run(R"( ENTRY main { in = f32[20,30] parameter(0) updates = f32[5,6] parameter(1) i0 = s32[] parameter(2) i1 = s32[] parameter(3) ROOT updated = f32[20,30] dynamic-update-slice(in, updates, i0, i1) })", R"( )")); } TEST_F(ElementalHloToMlirTest, DynamicUpdateSliceUnsigned) { TF_EXPECT_OK(Run(R"( ENTRY main { in = u32[20,30] parameter(0) updates = u32[5,6] parameter(1) i0 = s32[] parameter(2) i1 = s32[] parameter(3) ROOT updated = u32[20,30] dynamic-update-slice(in, updates, i0, i1) })", R"( )")); } TEST_F(ElementalHloToMlirTest, IotaUnsigned) { TF_EXPECT_OK(Run(R"( ENTRY main { ROOT iota = u32[10,20] iota(), iota_dimension=0 })", R"( )")); } TEST_F(ElementalHloToMlirTest, IotaComplex) { TF_EXPECT_OK(Run(R"( ENTRY main { ROOT iota = c64[6,4,5] iota(), iota_dimension=1 })", R"( )")); } TEST_F(ElementalHloToMlirTest, MixedIndexingTuple) { TF_EXPECT_OK(Run(R"( ENTRY main { %p0 = f32[10,10] parameter(0) %p1 = f32[100] parameter(1) ROOT tuple = (f32[10,10], f32[100]) tuple(%p0, %p1) })", R"( )")); } TEST_F(ElementalHloToMlirTest, NestedTuple) { TF_EXPECT_OK(Run(R"( ENTRY main { %p0 = f32[10,10] parameter(0) %p1 = f32[100] parameter(1) %t0 = (f32[10,10], f32[100]) tuple(%p0, %p1) %t1 = (f32[100], f32[10,10]) tuple(%p1, %p0) ROOT tuple = ((f32[10,10], f32[100]), f32[100], (f32[100], f32[10,10])) tuple(%t0, %p1, %t1) })", R"( )")); } TEST_F(ElementalHloToMlirTest, ReducePrecision) { TF_EXPECT_OK(Run(R"( ENTRY main { %p0 = f32[5,7] parameter(0) ROOT r = f32[5,7] reduce-precision(%p0), exponent_bits=8, mantissa_bits=23 } )", " } TEST_F(ElementalHloToMlirTest, Map) { TF_EXPECT_OK(Run(R"( mapper { a = f32[] parameter(0) b = f32[] parameter(1) ROOT add = f32[] add(a, b) } ENTRY main { %p0 = f32[5,7] parameter(0) %p1 = f32[5,7] parameter(1) ROOT r = f32[5,7] map(%p0, %p1), dimensions={}, to_apply=mapper })", R"( )")); } TEST_F(ElementalHloToMlirTest, BroadcastSelect) { TF_EXPECT_OK(Run(R"( ENTRY main { p0 = pred[] parameter(0) p1 = f32[5,7] parameter(1) p2 = f32[5,7] parameter(2) ROOT r = f32[5,7] select(p0, p1, p2) })", R"( )")); } } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/mlir/elemental_hlo_to_mlir.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/mlir/elemental_hlo_to_mlir_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
8485cc72-be10-4d28-b9e1-e103725c5321
cpp
tensorflow/tensorflow
computation_partitioner
third_party/xla/xla/service/gpu/fusions/mlir/computation_partitioner.cc
third_party/xla/xla/service/gpu/fusions/mlir/computation_partitioner_test.cc
#include "xla/service/gpu/fusions/mlir/computation_partitioner.h" #include <cstdint> #include <functional> #include <iterator> #include <optional> #include <sstream> #include <string> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/container/node_hash_map.h" #include "absl/log/check.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_join.h" #include "absl/types/span.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallVector.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/Dialect/LLVMIR/LLVMAttrs.h" #include "mlir/IR/Attributes.h" #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/ImplicitLocOpBuilder.h" #include "mlir/IR/MLIRContext.h" #include "mlir/IR/Types.h" #include "mlir/IR/Value.h" #include "mlir/Interfaces/DataLayoutInterfaces.h" #include "mlir/Support/LLVM.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/gpu/fusions/fusion_emitter.h" #include "xla/service/gpu/fusions/mlir/type_util.h" #include "xla/service/gpu/hlo_fusion_analysis.h" #include "xla/service/gpu/model/indexing_analysis.h" #include "xla/service/gpu/model/indexing_map.h" #include "xla/service/llvm_ir/llvm_util.h" #include "xla/shape.h" #include "xla/shape_util.h" namespace xla { namespace gpu { namespace mlir_converter { namespace { int Arity(const Shape& shape) { return shape.IsTuple() ? shape.tuple_shapes_size() : 1; } const Shape& TupleShape(const Shape& shape, int index) { return shape.IsTuple() ? shape.tuple_shapes(index) : shape; } } EpilogueSpecification EpilogueSpecification::FromIdentityIndexing( const HloInstruction* hero, const HloInstruction* root, mlir::MLIRContext* mlir_context) { EpilogueSpecification result; absl::c_copy(root->shape().dimensions(), std::back_inserter(result.index_ranges)); result.roots.push_back(root); result.root_indexing.push_back( CreateIdentityMap(root->shape(), mlir_context)); result.heroes.push_back(hero); return result; } EpilogueSpecification EpilogueSpecification::FromOutputIndexing( const HloFusionAnalysis& analysis, const std::vector<const HloInstruction*>& heroes, const std::vector<const HloInstruction*>& roots, const KernelFusionInterface& fusion, mlir::MLIRContext* mlir_context) { EpilogueSpecification result; absl::flat_hash_map<const HloInstruction*, const HloInstruction*> root_to_hero; for (auto [root, hero] : llvm::zip(analysis.fusion_roots(), analysis.fusion_heroes())) { root_to_hero[&root.instruction()] = &hero.instruction(); } absl::flat_hash_map<const HloInstruction*, int> root_to_index; for (auto [index, root] : llvm::enumerate(analysis.fusion_roots())) { root_to_index[&root.instruction()] = root_to_index.size(); } result.root_indexing.reserve(roots.size()); for (auto* root : roots) { auto indexing = fusion.ComputeThreadIdToOutputIndexing(root_to_index[root], mlir_context); if (result.index_ranges.empty()) { result.index_ranges.reserve(indexing->GetDimensionCount() + indexing->GetSymbolCount()); for (const auto& dim : indexing->GetDimensionBounds()) { result.index_ranges.push_back(dim.upper + 1); } for (const auto& sym : indexing->GetSymbolBounds()) { result.index_ranges.push_back(sym.upper + 1); } } auto* hero = root_to_hero[root]; auto epilogue_indexing = ComputeEpilogueInputToOutputIndexing( {*hero, &analysis.fusion()}, {*root, &analysis.fusion()}, mlir_context); result.root_indexing.push_back( ComposeIndexingMaps(*indexing, epilogue_indexing)); } result.heroes = heroes; result.roots = roots; return result; } std::string PartitionedComputation::Subgraph::ToString(int indentation) const { std::string indent(indentation, ' '); std::ostringstream ss; ss << indent << "SUBGRAPH " << name << " {\n"; for (auto* instr : (*instructions.begin())->parent()->MakeInstructionPostOrder()) { if (!instructions.contains(instr)) continue; ss << indent << " "; if (absl::c_linear_search(roots, instr)) { ss << "ROOT "; } ss << instr->ToString() << "\n"; } ss << indent << "}"; return ss.str(); } std::string PartitionedComputation::ToString(int indentation) const { std::ostringstream ss; ss << "PartitionedComputation " << computation_->name() << ":"; for (const Subgraph& subgraph : subgraphs_) { ss << "\n" << subgraph.ToString(indentation); } return ss.str(); } std::string PartitionedComputations::ToString() const { std::ostringstream ss; ss << "PartitionedComputations:"; for (const auto& partitioned_computation : partitioned_computations_) { ss << "\n" << partitioned_computation.ToString(); } return ss.str(); } template <typename C, typename F> bool AllIdentical(const C& c, F&& f) { auto begin = std::begin(c); auto end = std::end(c); if (begin == end || begin + 1 == end) { return true; } auto v = f(*begin); ++begin; for (; begin != end; ++begin) { if (f(*begin) != v) { return false; } } return true; } bool IsEvaluatedMoreThanOnce(const HloInstruction* instr) { return absl::c_any_of(instr->users(), [&](const HloInstruction* user) { if (user->opcode() == HloOpcode::kGather && absl::c_linear_search(user->OperandIndices(instr), 1) && instr->shape().rank() >= 2 && instr->shape().dimensions(1) > 1) { return true; } if (user->opcode() == HloOpcode::kConcatenate && user->OperandIndices(instr).size() > 1) { return true; } return false; }); } PartitionedComputation::PartitionedComputation( const HloComputation* computation, mlir::MLIRContext* mlir_context, std::function<bool(const HloInstruction*)> is_subgraph_root) : computation_(computation) { CHECK_NE(computation, nullptr); int next_function_id = 0; int next_indexing_id = 0; auto pre_order = computation->MakeInstructionPostOrder(); absl::c_reverse(pre_order); absl::flat_hash_map<const HloInstruction*, int> instr_indices; for (auto [i, instr] : llvm::enumerate(pre_order)) { instr_indices[instr] = i; } std::vector<std::pair<int, int>> ids(pre_order.size()); auto allocate_new_function = [&](const HloInstruction* instr) { ids[instr_indices[instr]] = {next_function_id++, next_indexing_id++}; }; for (auto [instr_index, instr] : llvm::enumerate(pre_order)) { bool is_root = instr->user_count() == 0 || is_subgraph_root(instr); bool users_have_consistent_indexing = AllIdentical( instr->users(), [&](const HloInstruction* user) { return ids[instr_indices[user]]; }); bool all_users_elementwise = absl::c_all_of(instr->users(), [&](const HloInstruction* user) { return HloInstruction::IsOpElementwise(user->opcode()); }); if (!is_root && users_have_consistent_indexing && all_users_elementwise) { ids[instr_index] = ids[instr_indices[instr->users().front()]]; } else if (is_root || instr->user_count() > 1 || IsEvaluatedMoreThanOnce(instr)) { allocate_new_function(instr); } else { ids[instr_index] = ids[instr_indices[instr->users().front()]]; ids[instr_index].second = next_indexing_id++; } } std::vector<std::vector<const HloInstruction*>> functions(next_function_id); for (auto [id, instr] : llvm::reverse(llvm::zip(ids, pre_order))) { functions[id.first].push_back(instr); } subgraphs_.reserve(functions.size()); for (auto&& [function_id, instructions] : llvm::enumerate(functions)) { auto is_different_function = [&, function_id = function_id](auto* user) { return ids[instr_indices[user]].first != function_id; }; std::vector<const HloInstruction*> roots; std::vector<IndexingMap> root_indexing; const xla::Shape* first_root_shape = nullptr; for (auto* instruction : instructions) { if (instruction->user_count() == 0 || absl::c_any_of(instruction->users(), is_different_function)) { roots.push_back(instruction); if (first_root_shape) { CHECK(!instruction->shape().IsTuple()) << "Internal tuples are not supported"; if (ShapeUtil::EqualIgnoringElementType(*first_root_shape, instruction->shape())) { root_indexing.push_back(root_indexing.front()); } else { root_indexing.push_back(GetBitcastMap( *first_root_shape, instruction->shape(), mlir_context)); } } else { first_root_shape = &instruction->shape(); while (first_root_shape->IsTuple()) { first_root_shape = &first_root_shape->tuple_shapes()[0]; } root_indexing.push_back( CreateIdentityMap(*first_root_shape, mlir_context)); } } } std::vector<int64_t> ranges{first_root_shape->dimensions().begin(), first_root_shape->dimensions().end()}; CHECK(!roots.empty()) << "No roots found"; std::string name = llvm_ir::SanitizeFunctionName(absl::StrCat( roots.front()->parent()->name(), "_", absl::StrJoin(roots, "_", [](std::string* out, const auto* root) { absl::StrAppend(out, root->name()); }))); subgraphs_.push_back(Subgraph{ std::move(name), {instructions.begin(), instructions.end()}, std::move(roots), std::move(ranges), std::move(root_indexing)}); } for (const auto& subgraph : subgraphs_) { for (const auto* instruction : subgraph.instructions) { instructions_to_subgraphs_[instruction] = &subgraph; } } } PartitionedComputation::Subgraph PartitionedComputation::Subgraph::ForEpilogue( const EpilogueSpecification& epilogue) { if (epilogue.roots.empty()) return {}; const auto* computation = epilogue.heroes.front()->parent(); PartitionedComputation::Subgraph subgraph; subgraph.name = llvm_ir::SanitizeFunctionName( absl::StrCat(computation->name(), "__epilogue__", absl::StrJoin(epilogue.roots, "_", [](std::string* out, const auto* root) { absl::StrAppend(out, root->name()); }))); subgraph.roots = epilogue.roots; int index = 0; for (auto* hero : epilogue.heroes) { if (subgraph.injected_value_starts.insert({hero, index}).second) { index += Arity(hero->shape()); } } subgraph.num_injected_values = index; absl::flat_hash_set<const HloInstruction*> seen; std::function<void(const HloInstruction*)> visit; visit = [&](const HloInstruction* instruction) { if (subgraph.injected_value_starts.contains(instruction)) return; if (!seen.insert(instruction).second) return; for (auto [index, operand] : llvm::enumerate(instruction->operands())) { visit(operand); } }; visit(computation->root_instruction()); subgraph.instructions = std::move(seen); subgraph.index_ranges = epilogue.index_ranges; subgraph.root_indexing = epilogue.root_indexing; return subgraph; } PartitionedComputations::PartitionedComputations( const HloComputation* fusion, mlir::MLIRContext* mlir_context, std::vector<EpilogueSpecification> epilogues) : fusion_(fusion) { absl::flat_hash_set<const HloComputation*> seen; std::vector<const HloComputation*> computations; std::function<void(const HloComputation*)> visit; visit = [&](const HloComputation* computation) { if (!seen.insert(computation).second) return; computations.push_back(computation); for (auto* instr : computation->instructions()) { absl::c_for_each(instr->called_computations(), visit); } }; visit(fusion); absl::flat_hash_set<const HloInstruction*> roots; epilogues_.reserve(epilogues.size()); for (const auto& epilogue : epilogues) { epilogues_.push_back( PartitionedComputation::Subgraph::ForEpilogue(epilogue)); roots.insert(epilogue.heroes.begin(), epilogue.heroes.end()); for (auto* instruction : epilogue.heroes) { roots.insert(instruction->operands().begin(), instruction->operands().end()); } } auto is_root = [&](const HloInstruction* instruction) { return roots.contains(instruction); }; partitioned_computations_.reserve(computations.size()); for (auto* computation : computations) { computation_to_partitioning_[computation] = &partitioned_computations_.emplace_back( PartitionedComputation{computation, mlir_context, is_root}); } } absl::flat_hash_map<const PartitionedComputation::Subgraph*, mlir::func::FuncOp> PartitionedComputations::DeclareFunctions(mlir::ModuleOp module) const { absl::flat_hash_map<const PartitionedComputation::Subgraph*, mlir::func::FuncOp> mapping; mlir::ImplicitLocOpBuilder builder(module.getLoc(), module->getContext()); builder.setInsertionPointToEnd(module.getBody()); auto create_funcs = [&](absl::Span<const PartitionedComputation::Subgraph> subgraphs) { for (const auto& subgraph : subgraphs) { if (subgraph.roots.empty()) continue; auto func_op = CreateSubgraphMlirFunction(subgraph, builder); func_op->setAttr("llvm.linkage", mlir::LLVM::LinkageAttr::get( module->getContext(), mlir::LLVM::Linkage::Internal)); mapping[&subgraph] = func_op; } }; for (const auto& computation : partitioned_computations_) { create_funcs(computation.subgraphs()); } create_funcs(epilogues_); return mapping; } const PartitionedComputation::Subgraph& PartitionedComputations::FindSubgraph( const HloInstruction* instr) const { return FindPartitionedComputation(instr->parent()).FindSubgraph(instr); } CallTargetProvider PartitionedComputations::CreateCallTargetProvider( const absl::flat_hash_map<const PartitionedComputation::Subgraph*, mlir::func::FuncOp>& subgraph_to_func) const { return [&, this](const HloInstruction* instr) { const auto& subgraph = FindSubgraph(instr); CHECK(subgraph_to_func.contains(&subgraph)) << "No function found for subgraph with instruction " << instr->ToString(); return subgraph_to_func.at(&subgraph); }; } mlir::func::FuncOp CreateSubgraphMlirFunction( const PartitionedComputation::Subgraph& subgraph, mlir::ImplicitLocOpBuilder& b) { auto* computation = subgraph.roots.front()->parent(); llvm::SmallVector<mlir::Type> parameter_types; llvm::SmallVector<mlir::Type> result_types; auto element_type = [&](const auto& shape) { return PrimitiveTypeToMlirType(shape.element_type(), b); }; for (auto* root : subgraph.roots) { for (auto ty : ShapeToMlirTypes(root->shape(), b)) { result_types.push_back( mlir::cast<mlir::RankedTensorType>(ty).getElementType()); } } llvm::SmallVector<mlir::DictionaryAttr> arg_attrs; if (computation->IsFusionComputation() || computation->IsEntryComputation()) { for (auto* param : computation->parameter_instructions()) { parameter_types.push_back(TensorShapeToMlirType(param->shape(), b)); arg_attrs.emplace_back(); } for (int64_t size : subgraph.index_ranges) { parameter_types.push_back(b.getIndexType()); arg_attrs.emplace_back(mlir::DictionaryAttr::get( b.getContext(), {b.getNamedAttr("xla.range", b.getIndexArrayAttr({0, size - 1}))})); } int operand_offset = parameter_types.size(); parameter_types.resize(operand_offset + subgraph.num_injected_values); arg_attrs.resize(parameter_types.size()); for (auto [value, start] : subgraph.injected_value_starts) { for (int index = 0; index < Arity(value->shape()); ++index) { parameter_types[operand_offset + start + index] = element_type(TupleShape(value->shape(), index)); } } } else { for (auto* param : computation->parameter_instructions()) { parameter_types.push_back(element_type(param->shape())); } } auto ty = b.getFunctionType(parameter_types, result_types); auto func_op = b.create<mlir::func::FuncOp>( subgraph.name, ty, llvm::ArrayRef<mlir::NamedAttribute>{}, arg_attrs); func_op.setPrivate(); return func_op; } } } }
#include "xla/service/gpu/fusions/mlir/computation_partitioner.h" #include <string> #include <utility> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "llvm/Support/raw_ostream.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/AffineExpr.h" #include "mlir/IR/Builders.h" #include "mlir/IR/ImplicitLocOpBuilder.h" #include "mlir/IR/Location.h" #include "mlir/IR/MLIRContext.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/gpu/model/indexing_analysis.h" #include "xla/tests/hlo_test_base.h" namespace xla { namespace gpu { namespace mlir_converter { namespace { using ::testing::ElementsAre; using ::testing::SizeIs; using ::testing::UnorderedElementsAre; class ComputationPartitionerTest : public HloTestBase { protected: ComputationPartitionerTest() { mlir_context_.loadDialect<mlir::func::FuncDialect>(); } mlir::MLIRContext mlir_context_; }; std::string PrintAndErase(mlir::func::FuncOp func) { std::string out; llvm::raw_string_ostream os(out); os << func; func.erase(); return out; } TEST_F(ComputationPartitionerTest, PartitionDiamonds) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module fused_computation { %param = f32[6] parameter(0) %slice0.1 = f32[5] slice(f32[6]{0} %param), slice={[0:5]} %slice0.2 = f32[5] slice(f32[6]{0} %param), slice={[1:6]} %add0 = f32[5] add(f32[5]{0} %slice0.1, f32[5]{0} %slice0.2) %slice1.1 = f32[4] slice(f32[5]{0} %add0), slice={[0:4]} %slice1.2 = f32[4] slice(f32[5]{0} %add0), slice={[1:5]} %add1 = f32[4] add(f32[4]{0} %slice1.1, f32[4]{0} %slice1.2) %slice2.1 = f32[3] slice(f32[4]{0} %add1), slice={[0:3]} %slice2.2 = f32[3] slice(f32[4]{0} %add1), slice={[1:4]} %add2 = f32[3] add(f32[3]{0} %slice2.1, f32[3]{0} %slice2.2) %slice3.1 = f32[2] slice(f32[3]{0} %add2), slice={[0:2]} %slice3.2 = f32[2] slice(f32[3]{0} %add2), slice={[1:3]} ROOT %add3 = f32[2] add(f32[2]{0} %slice3.1, f32[2]{0} %slice3.2) })") .value(); auto* fusion = module->GetComputationWithName("fused_computation"); ASSERT_NE(fusion, nullptr); PartitionedComputation computation(fusion, &mlir_context_); constexpr auto kExpected = R"(PartitionedComputation fused_computation: SUBGRAPH fused_computation_add3 { %slice3.1 = f32[2]{0} slice(f32[3]{0} %add2), slice={[0:2]} %slice3.2 = f32[2]{0} slice(f32[3]{0} %add2), slice={[1:3]} ROOT %add3 = f32[2]{0} add(f32[2]{0} %slice3.1, f32[2]{0} %slice3.2) } SUBGRAPH fused_computation_add2 { %slice2.1 = f32[3]{0} slice(f32[4]{0} %add1), slice={[0:3]} %slice2.2 = f32[3]{0} slice(f32[4]{0} %add1), slice={[1:4]} ROOT %add2 = f32[3]{0} add(f32[3]{0} %slice2.1, f32[3]{0} %slice2.2) } SUBGRAPH fused_computation_add1 { %slice1.1 = f32[4]{0} slice(f32[5]{0} %add0), slice={[0:4]} %slice1.2 = f32[4]{0} slice(f32[5]{0} %add0), slice={[1:5]} ROOT %add1 = f32[4]{0} add(f32[4]{0} %slice1.1, f32[4]{0} %slice1.2) } SUBGRAPH fused_computation_add0 { %slice0.1 = f32[5]{0} slice(f32[6]{0} %param), slice={[0:5]} %slice0.2 = f32[5]{0} slice(f32[6]{0} %param), slice={[1:6]} ROOT %add0 = f32[5]{0} add(f32[5]{0} %slice0.1, f32[5]{0} %slice0.2) } SUBGRAPH fused_computation_param { ROOT %param = f32[6]{0} parameter(0) })"; EXPECT_EQ(computation.ToString(6), kExpected); } TEST_F(ComputationPartitionerTest, SimpleConcatenate) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module fused_computation { %param1 = f32[6] parameter(0) %param2 = f32[3] parameter(1) %neg = f32[6] negate(%param1) %exp = f32[3] exponential(%param2) ROOT %concat = f32[9] concatenate(%neg, %exp), dimensions={0} })") .value(); auto* fusion = module->GetComputationWithName("fused_computation"); ASSERT_NE(fusion, nullptr); PartitionedComputation computation(fusion, &mlir_context_); EXPECT_THAT(computation.subgraphs(), SizeIs(1)); } TEST_F(ComputationPartitionerTest, DiamondConcatenate) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module fused_computation { %param1 = f32[6] parameter(0) %param2 = f32[6] parameter(1) %log = f32[6] log(%param1) %add = f32[6] add(%log, %param2) %neg = f32[6] negate(%log) %exp = f32[6] exponential(%add) ROOT %concat = f32[12] concatenate(%neg, %exp), dimensions={0} })") .value(); auto* fusion = module->GetComputationWithName("fused_computation"); ASSERT_NE(fusion, nullptr); PartitionedComputation computation(fusion, &mlir_context_); constexpr auto kExpected = R"(PartitionedComputation fused_computation: SUBGRAPH fused_computation_concat { %neg = f32[6]{0} negate(f32[6]{0} %log) %param2 = f32[6]{0} parameter(1) %add = f32[6]{0} add(f32[6]{0} %log, f32[6]{0} %param2) %exp = f32[6]{0} exponential(f32[6]{0} %add) ROOT %concat = f32[12]{0} concatenate(f32[6]{0} %neg, f32[6]{0} %exp), dimensions={0} } SUBGRAPH fused_computation_log { %param1 = f32[6]{0} parameter(0) ROOT %log = f32[6]{0} log(f32[6]{0} %param1) })"; EXPECT_EQ(computation.ToString(6), kExpected); } TEST_F(ComputationPartitionerTest, TupleRoot) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module fused_computation { %p0 = f32[6] parameter(0) %p1 = f32[6] parameter(1) %add = f32[6] add(p0, p1) %sub = f32[6] subtract(p0, p1) ROOT %root = (f32[6], f32[6]) tuple(%add, %sub) })") .value(); auto* fusion = module->GetComputationWithName("fused_computation"); ASSERT_NE(fusion, nullptr); PartitionedComputation computation(fusion, &mlir_context_); constexpr auto kExpected = R"(PartitionedComputation fused_computation: SUBGRAPH fused_computation_root { %add = f32[6]{0} add(f32[6]{0} %p0, f32[6]{0} %p1) %sub = f32[6]{0} subtract(f32[6]{0} %p0, f32[6]{0} %p1) ROOT %root = (f32[6]{0}, f32[6]{0}) tuple(f32[6]{0} %add, f32[6]{0} %sub) } SUBGRAPH fused_computation_p1 { ROOT %p1 = f32[6]{0} parameter(1) } SUBGRAPH fused_computation_p0 { ROOT %p0 = f32[6]{0} parameter(0) })"; EXPECT_EQ(computation.ToString(6), kExpected); } TEST_F(ComputationPartitionerTest, Epilogue) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module add { p0 = f32[] parameter(0) p1 = f32[] parameter(1) ROOT add = f32[] add(p0, p1) } fused_computation { p0 = f32[4] parameter(0) c0 = f32[] constant(0) reduce = f32[] reduce(p0, c0), dimensions={0}, to_apply=add bitcast = f32[1] bitcast(reduce) abs = f32[1] abs(bitcast) log = f32[1] log(abs) sign = f32[1] sign(bitcast) ROOT tuple = (f32[1], f32[1]) tuple(log, sign) })") .value(); auto* fused_computation = module->GetComputationWithName("fused_computation"); EpilogueSpecification epilogue{ {fused_computation->GetInstructionWithName("reduce")}, {fused_computation->GetInstructionWithName("log"), fused_computation->GetInstructionWithName("sign")}, {1, 42}, {CreateIdentityMap( fused_computation->root_instruction()->shape().tuple_shapes(0), &mlir_context_)}}; PartitionedComputations fusion(fused_computation, &mlir_context_, {epilogue}); mlir::ImplicitLocOpBuilder builder(mlir::UnknownLoc::get(&mlir_context_), &mlir_context_); EXPECT_EQ( PrintAndErase( CreateSubgraphMlirFunction(fusion.epilogues().front(), builder)), "func.func private @fused_computation__epilogue__log_sign(tensor<4xf32>, " "index {xla.range = [0 : index, 0 : index]}, " "index {xla.range = [0 : index, 41 : index]}, " "f32) -> (f32, f32)"); } TEST_F(ComputationPartitionerTest, TransposeAsRoot) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module fused_computation { %p0 = f32[64, 32] parameter(0) %p1 = f32[64, 32] parameter(1) %add = f32[64, 32] add(p0, p1) %transpose = f32[32, 64] transpose(%add), dimensions={1, 0} %exp = f32[32, 64] exponential(%transpose) ROOT %root = f32[32, 64] tanh(%exp) })") .value(); auto* fusion = module->GetComputationWithName("fused_computation"); ASSERT_NE(fusion, nullptr); PartitionedComputation computation( fusion, &mlir_context_, [](const HloInstruction* instr) { return instr->opcode() == HloOpcode::kTranspose; }); ASSERT_THAT(computation.subgraphs(), SizeIs(2)); EXPECT_THAT(computation.GetRootSubgraph().roots, SizeIs(1)); EXPECT_THAT(computation.GetRootSubgraph().instructions, SizeIs(2)); } TEST_F(ComputationPartitionerTest, PartiallyMergable) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module fused_computation { %p0 = f32[10,10] parameter(0) %p1 = f32[10,10] parameter(1) %add = f32[10,10] add(%p0, %p1) %transpose = f32[10,10] transpose(%add), dimensions={1,0} ROOT %sub = f32[10,10] subtract(%add, %transpose) })") .value(); auto* fusion = module->GetComputationWithName("fused_computation"); ASSERT_NE(fusion, nullptr); PartitionedComputation computation(fusion, &mlir_context_); auto transpose = fusion->GetInstructionWithName("transpose"); auto sub = fusion->GetInstructionWithName("sub"); ASSERT_THAT(computation.subgraphs(), SizeIs(2)); EXPECT_THAT(computation.GetRootSubgraph().instructions, UnorderedElementsAre(transpose, sub)); } TEST_F(ComputationPartitionerTest, SubgraphSignatures) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module add { %p0 = f32[] parameter(0) %p1 = f32[] parameter(1) ROOT %add = f32[] add(%p0, %p1) } fusion { %p0 = f32[10,10]{0,1} parameter(0) %p1 = f32[10,10]{1,0} parameter(1) %c0 = f32[] constant(2) %bc = f32[10,10]{0,1} bitcast(%p1) %add = f32[10,10] add(%p0, %bc) ROOT %reduce = f32[10] reduce(%add, %c0), dimensions={1}, to_apply=add } ENTRY main { %p0 = f32[10,10] parameter(0) %p1 = f32[10,10] parameter(1) ROOT %fusion = f32[10] fusion(%p0, %p1), kind=kLoop, calls=fusion })") .value(); mlir::MLIRContext context; context.loadDialect<mlir::func::FuncDialect>(); mlir::ImplicitLocOpBuilder builder(mlir::UnknownLoc::get(&context), &context); PartitionedComputation fusion(module->GetComputationWithName("fusion"), &mlir_context_); EXPECT_EQ( PrintAndErase( CreateSubgraphMlirFunction(fusion.GetRootSubgraph(), builder)), "func.func private @fusion_reduce(tensor<10x10xf32, dense<[0, 1]> : " "tensor<2xi64>>, tensor<10x10xf32>, index {xla.range = [0 : index, 9 : " "index]}) -> f32"); PartitionedComputation add(module->GetComputationWithName("add"), &mlir_context_); EXPECT_EQ( PrintAndErase(CreateSubgraphMlirFunction(add.GetRootSubgraph(), builder)), "func.func private @add_add(f32, f32) -> f32"); } } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/mlir/computation_partitioner.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/fusions/mlir/computation_partitioner_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
c423b68d-59a1-4b87-b4ec-3a13b8143d37
cpp
tensorflow/tensorflow
command_buffer_thunk
third_party/xla/xla/service/gpu/runtime/command_buffer_thunk.cc
third_party/xla/xla/service/gpu/runtime/command_buffer_thunk_test.cc
#include "xla/service/gpu/runtime/command_buffer_thunk.h" #include <algorithm> #include <cstdint> #include <memory> #include <optional> #include <utility> #include <vector> #include "absl/base/thread_annotations.h" #include "absl/status/status.h" #include "absl/synchronization/mutex.h" #include "xla/service/buffer_assignment.h" #include "xla/service/gpu/buffer_allocations.h" #include "xla/service/gpu/runtime/annotation.h" #include "xla/service/gpu/runtime/command_buffer_cmd.h" #include "xla/service/gpu/runtime/sequential_thunk.h" #include "xla/service/gpu/runtime/thunk.h" #include "xla/stream_executor/command_buffer.h" #include "xla/stream_executor/device_memory.h" #include "xla/stream_executor/stream_executor.h" #include "tsl/platform/env.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" #include "tsl/platform/statusor.h" #include "tsl/profiler/lib/profiler_lock.h" #include "tsl/profiler/lib/traceme.h" #include "tsl/profiler/lib/traceme_encode.h" namespace xla::gpu { using tsl::profiler::TraceMe; using tsl::profiler::TraceMeEncode; CommandBufferThunk::ExecutorCommandBuffer::ExecutorCommandBuffer( std::unique_ptr<se::CommandBuffer> command_buffer) : command_buffer(std::move(command_buffer)) {} CommandBufferThunk::CommandBufferThunk( CommandBufferCmdSequence commands, ThunkInfo thunk_info, std::unique_ptr<SequentialThunk> thunks, bool enable_command_buffers_during_profiling) : Thunk(Thunk::kCommandBuffer, std::move(thunk_info)), commands_(std::move(commands)), thunks_(std::move(thunks)), enable_command_buffers_during_profiling_( enable_command_buffers_during_profiling), state_(std::make_shared<State>()) { EvictCommandBuffers(); TrackCommandBuffers(state_); } bool CommandBufferThunk::ExecutorCommandBuffer::ShouldUpdateCommandBuffer( const CommandBufferCmdSequence& commands, const Thunk::ExecuteParams& params) { if (commands.force_update()) { return true; } bool should_update = false; const BufferAllocations* allocs = params.buffer_allocations; for (BufferAllocation::Index index : commands.allocs_indices()) { se::DeviceMemoryBase alloc = allocs->GetDeviceAddress(index); if (recorded_allocs.size() <= index) { recorded_allocs.resize(index + 1); should_update = true; } if (!recorded_allocs[index].IsSameAs(alloc)) { recorded_allocs[index] = alloc; should_update = true; } } return should_update; } absl::Status CommandBufferThunk::Prepare(const PrepareParams& params, ResourceRequests& resource_requests) { if (commands_.empty()) return absl::OkStatus(); TF_RETURN_IF_ERROR(commands_.Prepare(params, resource_requests)); if (thunks_) { TF_RETURN_IF_ERROR(thunks_->Prepare(params, resource_requests)); } return absl::OkStatus(); } absl::Status CommandBufferThunk::Initialize(const InitializeParams& params) { if (commands_.empty()) return absl::OkStatus(); TF_ASSIGN_OR_RETURN(std::shared_ptr<ExecutorCommandBuffer> cmd_buffer, GetOrCreateCommandBuffer(params.executor)); absl::MutexLock lock(&cmd_buffer->mutex); TF_RETURN_IF_ERROR(commands_.Initialize(params, cmd_buffer->state)); if (thunks_) { TF_RETURN_IF_ERROR(thunks_->Initialize(params)); } Thunk::ExecuteParams execute_params( params.buffer_allocations, params.stream, params.command_buffer_trace_stream, params.collective_params, params.collective_cliques, nullptr, nullptr, nullptr, nullptr, params.ffi_execution_context); if (cmd_buffer->command_buffer->state() == se::CommandBuffer::State::kCreate && cmd_buffer->ShouldUpdateCommandBuffer(commands_, execute_params)) { VLOG(3) << "Initialize command buffer on device #" << params.executor->device_ordinal() << " by recoding command buffer cmd sequence" << "; num_commands=" << commands_.size(); TraceMe trace([&] { return TraceMeEncode("command_buffer::initialize", {{"device", params.executor->device_ordinal()}, {"num_commands", commands_.size()}}); }); uint64_t start_micros = tsl::Env::Default()->NowMicros(); CommandBufferCmd::RecordParams record_params = {cmd_buffer->state}; TF_RETURN_IF_ERROR(commands_.Record(execute_params, record_params, cmd_buffer->command_buffer.get())); uint64_t end_micros = tsl::Env::Default()->NowMicros(); VLOG(3) << "Initialized command buffer on device #" << params.executor->device_ordinal() << " in " << (end_micros - start_micros) << " μs; num_commands=" << commands_.size(); cmd_buffer->num_executions = 0; } return absl::OkStatus(); } absl::Status CommandBufferThunk::ExecuteOnStream(const ExecuteParams& params) { if (commands_.empty()) return absl::OkStatus(); if (tsl::profiler::ProfilerLock::HasActiveSession() && thunks_ && !enable_command_buffers_during_profiling_) { VLOG(1) << "Execute command buffer thunk as a regular thunk sequence " "because we detected active profiling session"; TF_RETURN_IF_ERROR(thunks_->ExecuteOnStream(params)); return absl::OkStatus(); } se::StreamExecutor* executor = params.stream->parent(); TF_ASSIGN_OR_RETURN(std::shared_ptr<ExecutorCommandBuffer> cmd_buffer, GetOrCreateCommandBuffer(executor)); absl::MutexLock lock(&cmd_buffer->mutex); if (cmd_buffer->ShouldUpdateCommandBuffer(commands_, params)) { VLOG(3) << "Update command buffer on device #" << executor->device_ordinal() << " by recoding command buffer cmd sequence" << " after " << cmd_buffer->num_executions << " executions since last update" << "; num_commands=" << commands_.size(); TraceMe trace([&] { cmd_buffer->mutex.AssertHeld(); return TraceMeEncode("command_buffer::update", {{"device", executor->device_ordinal()}, {"num_commands", commands_.size()}, {"num_executions", cmd_buffer->num_executions}}); }); uint64_t start_micros = tsl::Env::Default()->NowMicros(); CommandBufferCmd::RecordParams record_params = {cmd_buffer->state}; TF_RETURN_IF_ERROR(commands_.Record(params, record_params, cmd_buffer->command_buffer.get())); uint64_t end_micros = tsl::Env::Default()->NowMicros(); VLOG(3) << "Updated command buffer in " << (end_micros - start_micros) << " μs; num_commands=" << commands_.size(); cmd_buffer->num_executions = 0; } ++cmd_buffer->num_executions; VLOG(3) << "Execute command buffer on device #" << executor->device_ordinal() << "; num_executions=" << cmd_buffer->num_executions; TraceMe trace([&] { cmd_buffer->mutex.AssertHeld(); return TraceMeEncode("command_buffer::execute", {{"device", executor->device_ordinal()}, {"num_commands", commands_.size()}, {"num_executions", cmd_buffer->num_executions}}); }); return cmd_buffer->command_buffer->Submit(params.stream); } absl::StatusOr<std::shared_ptr<CommandBufferThunk::ExecutorCommandBuffer>> CommandBufferThunk::GetOrCreateCommandBuffer(se::StreamExecutor* executor) { absl::MutexLock lock(&state_->mutex); if (auto it = state_->command_buffers.find(executor); it != state_->command_buffers.end()) { return it->second; } TF_ASSIGN_OR_RETURN( auto command_buffer, executor->CreateCommandBuffer(se::CommandBuffer::Mode::kPrimary)); auto emplaced = state_->command_buffers.emplace( executor, std::make_shared<ExecutorCommandBuffer>(std::move(command_buffer))); return emplaced.first->second; } struct CommandBufferThunk::GlobalState { absl::Mutex mutex; std::vector<std::weak_ptr<CommandBufferThunk::State>> state ABSL_GUARDED_BY(mutex); }; CommandBufferThunk::GlobalState* CommandBufferThunk::GetGlobalState() { static auto* global_state = new GlobalState(); return global_state; } void CommandBufferThunk::TrackCommandBuffers( std::weak_ptr<CommandBufferThunk::State> state) { auto* global_state = GetGlobalState(); absl::MutexLock global_state_lock(&global_state->mutex); global_state->state.push_back(state); } void CommandBufferThunk::EvictCommandBuffers() { TraceMe trace([&] { return "EvictCommandBuffers"; }); auto* global_state = GetGlobalState(); absl::MutexLock global_state_lock(&global_state->mutex); VLOG(3) << "Evict command buffer thunk command buffers; tracked thunks = " << global_state->state.size(); global_state->state.erase( std::remove_if(global_state->state.begin(), global_state->state.end(), [](auto& weak_ptr) { return weak_ptr.expired(); }), global_state->state.end()); int64_t num_evicted = 0; for (auto& weak_ptr : global_state->state) { auto ptr = weak_ptr.lock(); if (!ptr) continue; absl::MutexLock state_lock(&ptr->mutex); num_evicted += ptr->command_buffers.size(); ptr->command_buffers.clear(); } if (num_evicted > 0) { VLOG(3) << "Evicted " << num_evicted << " command buffer thunk command buffers"; } } }
#include "xla/service/gpu/runtime/command_buffer_thunk.h" #include <algorithm> #include <cstdint> #include <memory> #include <optional> #include <string> #include <thread> #include <utility> #include <vector> #include "absl/status/statusor.h" #include "absl/strings/ascii.h" #include "absl/types/span.h" #include "xla/service/buffer_assignment.h" #include "xla/service/gpu/buffer_allocations.h" #include "xla/service/gpu/launch_dimensions.h" #include "xla/service/gpu/matmul_utils.h" #include "xla/service/gpu/runtime/command_buffer_cmd.h" #include "xla/service/gpu/runtime/memset_thunk.h" #include "xla/service/gpu/runtime/sequential_thunk.h" #include "xla/service/gpu/runtime/thunk.h" #include "xla/service/platform_util.h" #include "xla/service/service_executable_run_options.h" #include "xla/shape_util.h" #include "xla/stream_executor/blas.h" #include "xla/stream_executor/command_buffer.h" #include "xla/stream_executor/device_memory.h" #include "xla/stream_executor/device_memory_allocator.h" #include "xla/stream_executor/gpu/gpu_test_kernels.h" #include "xla/stream_executor/gpu/gpu_test_kernels_fatbin.h" #include "xla/stream_executor/gpu/gpu_types.h" #include "xla/stream_executor/kernel.h" #include "xla/stream_executor/kernel_spec.h" #include "xla/stream_executor/platform.h" #include "xla/stream_executor/platform_manager.h" #include "xla/stream_executor/stream_executor.h" #include "xla/stream_executor/stream_executor_memory_allocator.h" #include "xla/tsl/lib/core/status_test_util.h" #include "xla/types.h" #include "xla/xla_data.pb.h" #include "tsl/platform/statusor.h" #include "tsl/platform/test.h" #include "tsl/profiler/lib/profiler_lock.h" #ifdef GOOGLE_CUDA #include "third_party/gpus/cuda/include/cuda.h" #endif namespace xla::gpu { using MemoryAccess = CommandBufferCmd::MemoryAccess; using KernelArgsPacking = se::MultiKernelLoaderSpec::KernelArgsPacking; namespace { se::StreamExecutor* GpuExecutor() { auto name = absl::AsciiStrToUpper(PlatformUtil::CanonicalPlatformName("gpu").value()); auto* platform = se::PlatformManager::PlatformWithName(name).value(); return platform->ExecutorForDevice(0).value(); } struct OwningExecutableSource { std::string text; std::vector<uint8_t> binary; explicit operator Thunk::ExecutableSource() const { return {text, binary}; } }; absl::StatusOr<OwningExecutableSource> ExecutableSource() { TF_ASSIGN_OR_RETURN(std::vector<uint8_t> fatbin, se::gpu::GetGpuTestKernelsFatbin()); return OwningExecutableSource{{}, fatbin}; } KernelArgsPacking CreateDefaultArgsPacking() { using Packed = absl::StatusOr<std::unique_ptr<se::KernelArgsPackedArrayBase>>; return [=](const se::Kernel& kernel, const se::KernelArgs& args) -> Packed { auto* mem_args = se::Cast<se::KernelArgsDeviceMemoryArray>(&args); return se::PackKernelArgs(mem_args->device_memory_args(), args.number_of_shared_bytes()); }; } bool IsAtLeastCuda12300() { #if defined(TENSORFLOW_USE_ROCM) return false; #endif #if CUDA_VERSION >= 12030 return true; #endif return false; } constexpr auto s0 = ExecutionStreamId(0); constexpr auto s1 = ExecutionStreamId(1); } TEST(CommandBufferThunkTest, MemcpyCmd) { se::StreamExecutor* executor = GpuExecutor(); TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream()); int64_t length = 4; int64_t byte_length = sizeof(int32_t) * length; se::DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0); se::DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0); TF_ASSERT_OK(stream->Memset32(&a, 42, byte_length)); TF_ASSERT_OK(stream->MemZero(&b, byte_length)); BufferAllocation alloc_a(0, byte_length, 0); BufferAllocation alloc_b(1, byte_length, 0); BufferAllocation::Slice slice_a(&alloc_a, 0, byte_length); BufferAllocation::Slice slice_b(&alloc_b, 0, byte_length); CommandBufferCmdSequence commands; commands.Emplace<MemcpyDeviceToDeviceCmd>(s0, slice_b, slice_a, byte_length); CommandBufferThunk thunk(std::move(commands), Thunk::ThunkInfo()); se::StreamExecutorMemoryAllocator allocator(executor); ServiceExecutableRunOptions run_options; BufferAllocations allocations({a, b}, 0, &allocator); Thunk::ExecuteParams params = Thunk::ExecuteParams::Create( run_options, allocations, stream.get(), stream.get(), nullptr, nullptr); TF_ASSERT_OK(thunk.ExecuteOnStream(params)); TF_ASSERT_OK(stream->BlockHostUntilDone()); std::vector<int32_t> dst(4, 0); TF_ASSERT_OK(stream->Memcpy(dst.data(), b, byte_length)); ASSERT_EQ(dst, std::vector<int32_t>(4, 42)); TF_ASSERT_OK(stream->MemZero(&b, byte_length)); TF_ASSERT_OK(thunk.ExecuteOnStream(params)); TF_ASSERT_OK(stream->BlockHostUntilDone()); std::fill(dst.begin(), dst.end(), 0); TF_ASSERT_OK(stream->Memcpy(dst.data(), b, byte_length)); ASSERT_EQ(dst, std::vector<int32_t>(4, 42)); } TEST(CommandBufferThunkTest, MemzeroCmd) { se::StreamExecutor* executor = GpuExecutor(); TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream()); int64_t length = 4; int64_t byte_length = sizeof(int32_t) * length; se::DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0); TF_ASSERT_OK(stream->Memset32(&a, 42, byte_length)); BufferAllocation alloc_a(0, byte_length, 0); BufferAllocation::Slice slice_a(&alloc_a, 0, byte_length); CommandBufferCmdSequence commands; commands.Emplace<MemzeroCmd>(s0, slice_a); CommandBufferThunk thunk(std::move(commands), Thunk::ThunkInfo()); ServiceExecutableRunOptions run_options; se::StreamExecutorMemoryAllocator allocator(executor); BufferAllocations allocations({a}, 0, &allocator); Thunk::ExecuteParams params = Thunk::ExecuteParams::Create( run_options, allocations, stream.get(), stream.get(), nullptr, nullptr); TF_ASSERT_OK(thunk.ExecuteOnStream(params)); TF_ASSERT_OK(stream->BlockHostUntilDone()); std::vector<int32_t> dst(4, 0); TF_ASSERT_OK(stream->Memcpy(dst.data(), a, byte_length)); ASSERT_EQ(dst, std::vector<int32_t>(4, 0)); } TEST(CommandBufferThunkTest, Memset32Cmd) { se::StreamExecutor* executor = GpuExecutor(); TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream()); int64_t length = 4; int64_t byte_length = sizeof(int32_t) * length; se::DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0); TF_ASSERT_OK(stream->Memset32(&a, 42, byte_length)); BufferAllocation alloc_a(0, byte_length, 0); BufferAllocation::Slice slice_a(&alloc_a, 0, byte_length); CommandBufferCmdSequence commands; commands.Emplace<Memset32Cmd>(s0, slice_a, int32_t{84}); CommandBufferThunk thunk(std::move(commands), Thunk::ThunkInfo()); ServiceExecutableRunOptions run_options; se::StreamExecutorMemoryAllocator allocator(executor); BufferAllocations allocations({a}, 0, &allocator); Thunk::ExecuteParams params = Thunk::ExecuteParams::Create( run_options, allocations, stream.get(), stream.get(), nullptr, nullptr); TF_ASSERT_OK(thunk.ExecuteOnStream(params)); TF_ASSERT_OK(stream->BlockHostUntilDone()); std::vector<int32_t> dst(4, 0); TF_ASSERT_OK(stream->Memcpy(dst.data(), a, byte_length)); ASSERT_EQ(dst, std::vector<int32_t>(4, 84)); } TEST(CommandBufferThunkTest, Memset32CmdCommandBuffersDisabledDuringProfiling) { se::StreamExecutor* executor = GpuExecutor(); TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream()); int64_t length = 4; int64_t byte_length = sizeof(int32_t) * length; se::DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0); TF_ASSERT_OK(stream->Memset32(&a, 42, byte_length)); BufferAllocation alloc_a(0, byte_length, 0); BufferAllocation::Slice slice_a(&alloc_a, 0, byte_length); auto memset_thunk = std::make_unique<Memset32BitValueThunk>(Thunk::ThunkInfo(), 84, slice_a); std::vector<std::unique_ptr<Thunk>> thunks; thunks.push_back(std::move(memset_thunk)); auto seq_thunks = std::make_unique<SequentialThunk>(Thunk::ThunkInfo(), std::move(thunks)); CommandBufferCmdSequence commands; commands.Emplace<Memset32Cmd>(s0, slice_a, int32_t{12}); constexpr bool kProfileCommandBuffersEnabled = false; CommandBufferThunk thunk(std::move(commands), Thunk::ThunkInfo(), std::move(seq_thunks), kProfileCommandBuffersEnabled); ServiceExecutableRunOptions run_options; se::StreamExecutorMemoryAllocator allocator(executor); BufferAllocations allocations({a}, 0, &allocator); Thunk::ExecuteParams params = Thunk::ExecuteParams::Create( run_options, allocations, stream.get(), stream.get(), nullptr, nullptr); TF_ASSERT_OK_AND_ASSIGN(auto profiler_lock, tsl::profiler::ProfilerLock::Acquire()); TF_ASSERT_OK(thunk.ExecuteOnStream(params)); TF_ASSERT_OK(stream->BlockHostUntilDone()); std::vector<int32_t> dst(4, 0); TF_ASSERT_OK(stream->Memcpy(dst.data(), a, byte_length)); ASSERT_EQ(dst, std::vector<int32_t>(4, 84)); } TEST(CommandBufferThunkTest, Memset32CmdCommandBuffersEnabledDuringProfiling) { se::StreamExecutor* executor = GpuExecutor(); TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream()); int64_t length = 4; int64_t byte_length = sizeof(int32_t) * length; se::DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0); TF_ASSERT_OK(stream->Memset32(&a, 42, byte_length)); BufferAllocation alloc_a(0, byte_length, 0); BufferAllocation::Slice slice_a(&alloc_a, 0, byte_length); auto memset_thunk = std::make_unique<Memset32BitValueThunk>(Thunk::ThunkInfo(), 84, slice_a); std::vector<std::unique_ptr<Thunk>> thunks; thunks.push_back(std::move(memset_thunk)); auto seq_thunks = std::make_unique<SequentialThunk>(Thunk::ThunkInfo(), std::move(thunks)); CommandBufferCmdSequence commands; commands.Emplace<Memset32Cmd>(s0, slice_a, int32_t{12}); constexpr bool kProfileCommandBuffersEnabled = true; CommandBufferThunk thunk(std::move(commands), Thunk::ThunkInfo(), std::move(seq_thunks), kProfileCommandBuffersEnabled); ServiceExecutableRunOptions run_options; se::StreamExecutorMemoryAllocator allocator(executor); BufferAllocations allocations({a}, 0, &allocator); Thunk::ExecuteParams params = Thunk::ExecuteParams::Create( run_options, allocations, stream.get(), stream.get(), nullptr, nullptr); TF_ASSERT_OK_AND_ASSIGN(auto profiler_lock, tsl::profiler::ProfilerLock::Acquire()); TF_ASSERT_OK(thunk.ExecuteOnStream(params)); TF_ASSERT_OK(stream->BlockHostUntilDone()); std::vector<int32_t> dst(4, 0); TF_ASSERT_OK(stream->Memcpy(dst.data(), a, byte_length)); ASSERT_EQ(dst, std::vector<int32_t>(4, 12)); } TEST(CommandBufferThunkTest, Memset32CmdOnDifferentStreams) { se::StreamExecutor* executor = GpuExecutor(); TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream()); se::DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(2, 0); TF_ASSERT_OK(stream->MemZero(&a, 2 * sizeof(int32_t))); BufferAllocation alloc(0, a.size(), 0); BufferAllocation::Slice slice0(&alloc, 0 * sizeof(int32_t), sizeof(int32_t)); BufferAllocation::Slice slice1(&alloc, 1 * sizeof(int32_t), sizeof(int32_t)); CommandBufferCmdSequence commands; commands.Emplace<Memset32Cmd>(s0, slice0, int32_t{12}); commands.Emplace<Memset32Cmd>(s1, slice1, int32_t{34}); CommandBufferThunk thunk(std::move(commands), Thunk::ThunkInfo()); ServiceExecutableRunOptions run_options; se::StreamExecutorMemoryAllocator allocator(executor); BufferAllocations allocations({a}, 0, &allocator); Thunk::ExecuteParams params = Thunk::ExecuteParams::Create( run_options, allocations, stream.get(), stream.get(), nullptr, nullptr); TF_ASSERT_OK(thunk.ExecuteOnStream(params)); TF_ASSERT_OK(stream->BlockHostUntilDone()); std::vector<int32_t> dst(2, 0); TF_ASSERT_OK(stream->Memcpy(dst.data(), a, a.size())); ASSERT_EQ(dst, std::vector<int32_t>({12, 34})); } TEST(CommandBufferThunkTest, LaunchCmd) { se::StreamExecutor* executor = GpuExecutor(); TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream()); int64_t length = 4; int64_t byte_length = sizeof(int32_t) * length; se::DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0); se::DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0); TF_ASSERT_OK(stream->Memset32(&a, 42, byte_length)); TF_ASSERT_OK(stream->MemZero(&b, byte_length)); BufferAllocation alloc_a(0, byte_length, 0); BufferAllocation alloc_b(1, byte_length, 0); BufferAllocation::Slice slice_a(&alloc_a, 0, byte_length); BufferAllocation::Slice slice_b(&alloc_b, 0, byte_length); auto args = {slice_a, slice_a, slice_b}; auto args_access = {MemoryAccess::kRead, MemoryAccess::kRead, MemoryAccess::kWrite}; CommandBufferCmdSequence commands; commands.Emplace<LaunchCmd>(s0, "AddI32", args, args_access, LaunchDimensions(1, 4), 0); CommandBufferThunk thunk(std::move(commands), Thunk::ThunkInfo()); ServiceExecutableRunOptions run_options; se::StreamExecutorMemoryAllocator allocator(executor); BufferAllocations allocations({a, b}, 0, &allocator); Thunk::ExecuteParams params = Thunk::ExecuteParams::Create( run_options, allocations, stream.get(), stream.get(), nullptr, nullptr); TF_ASSERT_OK_AND_ASSIGN(OwningExecutableSource source, ExecutableSource()); TF_ASSERT_OK( thunk.Initialize({executor, static_cast<Thunk::ExecutableSource>(source), &allocations, stream.get()})); TF_ASSERT_OK(thunk.ExecuteOnStream(params)); TF_ASSERT_OK(stream->BlockHostUntilDone()); std::vector<int32_t> dst(4, 0); TF_ASSERT_OK(stream->Memcpy(dst.data(), b, byte_length)); ASSERT_EQ(dst, std::vector<int32_t>(4, 42 + 42)); se::DeviceMemory<int32_t> c = executor->AllocateArray<int32_t>(length, 0); TF_ASSERT_OK(stream->MemZero(&c, byte_length)); allocations = BufferAllocations({a, c}, 0, &allocator); TF_ASSERT_OK(thunk.ExecuteOnStream(params)); TF_ASSERT_OK(stream->BlockHostUntilDone()); std::fill(dst.begin(), dst.end(), 0); TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length)); ASSERT_EQ(dst, std::vector<int32_t>(4, 42 + 42)); TF_ASSERT_OK(stream->MemZero(&c, byte_length)); TF_ASSERT_OK(thunk.ExecuteOnStream(params)); TF_ASSERT_OK(stream->BlockHostUntilDone()); std::fill(dst.begin(), dst.end(), 0); TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length)); ASSERT_EQ(dst, std::vector<int32_t>(4, 42 + 42)); } TEST(CommandBufferThunkTest, CustomAddKernelLaunchCmd) { se::StreamExecutor* executor = GpuExecutor(); TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream()); auto packing = CreateDefaultArgsPacking(); se::MultiKernelLoaderSpec spec(3, std::move(packing)); spec.AddInProcessSymbol(se::gpu::internal::GetAddI32Kernel(), "add"); auto custom_kernel = CustomKernel("AddI32", std::move(spec), se::BlockDim(), se::ThreadDim(4, 1, 1), 0); int64_t length = 4; int64_t byte_length = sizeof(int32_t) * length; se::DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0); se::DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0); TF_ASSERT_OK(stream->Memset32(&a, 42, byte_length)); TF_ASSERT_OK(stream->MemZero(&b, byte_length)); BufferAllocation alloc_a(0, byte_length, 0); BufferAllocation alloc_b(1, byte_length, 0); BufferAllocation::Slice slice_a(&alloc_a, 0, byte_length); BufferAllocation::Slice slice_b(&alloc_b, 0, byte_length); auto args = {slice_a, slice_a, slice_b}; auto args_access = {MemoryAccess::kRead, MemoryAccess::kRead, MemoryAccess::kWrite}; CommandBufferCmdSequence commands; commands.Emplace<LaunchCmd>(s0, "AddI32", args, args_access, LaunchDimensions(1, 4), 0); CommandBufferThunk thunk(std::move(commands), Thunk::ThunkInfo()); ServiceExecutableRunOptions run_options; se::StreamExecutorMemoryAllocator allocator(executor); BufferAllocations allocations({a, b}, 0, &allocator); Thunk::ExecuteParams params = Thunk::ExecuteParams::Create( run_options, allocations, stream.get(), stream.get(), nullptr, nullptr); TF_ASSERT_OK_AND_ASSIGN(OwningExecutableSource source, ExecutableSource()); TF_ASSERT_OK( thunk.Initialize({executor, static_cast<Thunk::ExecutableSource>(source), &allocations, stream.get()})); TF_ASSERT_OK(thunk.ExecuteOnStream(params)); TF_ASSERT_OK(stream->BlockHostUntilDone()); std::vector<int32_t> dst(4, 0); TF_ASSERT_OK(stream->Memcpy(dst.data(), b, byte_length)); ASSERT_EQ(dst, std::vector<int32_t>(4, 42 + 42)); se::DeviceMemory<int32_t> c = executor->AllocateArray<int32_t>(length, 0); TF_ASSERT_OK(stream->MemZero(&c, byte_length)); allocations = BufferAllocations({a, c}, 0, &allocator); TF_ASSERT_OK(thunk.ExecuteOnStream(params)); TF_ASSERT_OK(stream->BlockHostUntilDone()); std::fill(dst.begin(), dst.end(), 0); TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length)); ASSERT_EQ(dst, std::vector<int32_t>(4, 42 + 42)); TF_ASSERT_OK(stream->MemZero(&c, byte_length)); TF_ASSERT_OK(thunk.ExecuteOnStream(params)); TF_ASSERT_OK(stream->BlockHostUntilDone()); std::fill(dst.begin(), dst.end(), 0); TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length)); ASSERT_EQ(dst, std::vector<int32_t>(4, 42 + 42)); } TEST(CommandBufferThunkTest, GemmCmd) { if (!IsAtLeastCuda12300()) { GTEST_SKIP() << "CUDA graph tracing is not supported"; } se::StreamExecutor* executor = GpuExecutor(); TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream()); int64_t lhs_length = sizeof(float) * 2 * 4; int64_t rhs_length = sizeof(float) * 4 * 3; int64_t out_length = sizeof(float) * 2 * 3; se::DeviceMemory<float> lhs = executor->AllocateArray<float>(2 * 4); std::vector<float> lhs_arr{1, 2, 3, 4, 5, 6, 7, 8}; TF_ASSERT_OK(stream->Memcpy(&lhs, lhs_arr.data(), lhs_length)); se::DeviceMemory<float> rhs = executor->AllocateArray<float>(4 * 3); std::vector<float> rhs_arr(12, 1); TF_ASSERT_OK(stream->Memcpy(&rhs, rhs_arr.data(), rhs_length)); se::DeviceMemory<float> out = executor->AllocateArray<float>(2 * 3); TF_ASSERT_OK(stream->MemZero(&out, out_length)); se::DeviceMemory<float> workspace = executor->AllocateArray<float>(1024 * 1024); TF_ASSERT_OK(stream->MemZero(&workspace, 1024 * 1024)); BufferAllocation alloc_lhs(0, lhs_length, 0); BufferAllocation alloc_rhs(1, rhs_length, 0); BufferAllocation alloc_out(2, out_length, 0); BufferAllocation alloc_workspace(3, 1024 * 1024, 0); BufferAllocation::Slice slice_lhs(&alloc_lhs, 0, lhs_length); BufferAllocation::Slice slice_rhs(&alloc_rhs, 0, rhs_length); BufferAllocation::Slice slice_out(&alloc_out, 0, out_length); BufferAllocation::Slice slice_workspace(&alloc_workspace, 0, 1024 * 1024); auto config = GemmConfig::For(ShapeUtil::MakeShape(PrimitiveType::F32, {2, 4}), {}, {1}, ShapeUtil::MakeShape(PrimitiveType::F32, {4, 3}), {}, {0}, ShapeUtil::MakeShape(PrimitiveType::F32, {2, 3}), 1.0, 0.0, 0.0, PrecisionConfig::ALG_UNSET, std::nullopt, se::blas::kDefaultComputePrecision, false, false); ASSERT_TRUE(config.ok()); CommandBufferCmdSequence commands; commands.Emplace<GemmCmd>(s0, config.value(), slice_lhs, slice_rhs, slice_out, slice_workspace, true); CommandBufferThunk thunk(std::move(commands), Thunk::ThunkInfo()); ServiceExecutableRunOptions run_options; se::StreamExecutorMemoryAllocator allocator(executor); BufferAllocations allocations({lhs, rhs, out, workspace}, 0, &allocator); Thunk::ExecuteParams params = Thunk::ExecuteParams::Create( run_options, allocations, stream.get(), stream.get(), nullptr, nullptr); Thunk::ExecutableSource source = {"", {}}; TF_ASSERT_OK(thunk.Initialize( {executor, source, &allocations, stream.get(), stream.get()})); TF_ASSERT_OK(thunk.ExecuteOnStream(params)); TF_ASSERT_OK(stream->BlockHostUntilDone()); std::vector<float> dst(6, 0); TF_ASSERT_OK(stream->Memcpy(dst.data(), out, out_length)); ASSERT_EQ(dst, std::vector<float>({10, 10, 10, 26, 26, 26})); se::DeviceMemory<float> updated_out = executor->AllocateArray<float>(2 * 3); TF_ASSERT_OK(stream->MemZero(&updated_out, out_length)); allocations = BufferAllocations({lhs, rhs, updated_out, workspace}, 0, &allocator); TF_ASSERT_OK(thunk.ExecuteOnStream(params)); TF_ASSERT_OK(stream->BlockHostUntilDone()); std::fill(dst.begin(), dst.end(), 0); TF_ASSERT_OK(stream->Memcpy(dst.data(), updated_out, out_length)); ASSERT_EQ(dst, std::vector<float>({10, 10, 10, 26, 26, 26})); TF_ASSERT_OK(stream->MemZero(&updated_out, out_length)); TF_ASSERT_OK(thunk.ExecuteOnStream(params)); TF_ASSERT_OK(stream->BlockHostUntilDone()); std::fill(dst.begin(), dst.end(), 0); TF_ASSERT_OK(stream->Memcpy(dst.data(), updated_out, out_length)); ASSERT_EQ(dst, std::vector<float>({10, 10, 10, 26, 26, 26})); } TEST(CommandBufferThunkTest, CublasLtCmd) { if (!IsAtLeastCuda12300()) { GTEST_SKIP() << "CUDA graph tracing is not supported"; } se::StreamExecutor* executor = GpuExecutor(); TF_ASSERT_OK_AND_ASSIGN(auto stream1, executor->CreateStream()); TF_ASSERT_OK_AND_ASSIGN(auto stream2, executor->CreateStream()); int64_t a_length = sizeof(float) * 2 * 4; int64_t b_length = sizeof(float) * 4 * 3; int64_t c_length = sizeof(float) * 2 * 3; int64_t d_length = sizeof(float) * 2 * 3; BufferAllocation alloc_a(0, a_length, 0); BufferAllocation alloc_b(1, b_length, 0); BufferAllocation alloc_c(2, c_length, 0); BufferAllocation alloc_d(3, d_length, 0); BufferAllocation alloc_workspace(4, 1024 * 1024, 0); BufferAllocation::Slice slice_a(&alloc_a, 0, a_length); BufferAllocation::Slice slice_b(&alloc_b, 0, b_length); BufferAllocation::Slice slice_c(&alloc_c, 0, c_length); BufferAllocation::Slice slice_d(&alloc_d, 0, d_length); BufferAllocation::Slice slice_workspace(&alloc_workspace, 0, 1024 * 1024); auto config = GemmConfig::For( ShapeUtil::MakeShape(PrimitiveType::F32, {2, 4}), {}, {1}, ShapeUtil::MakeShape(PrimitiveType::F32, {4, 3}), {}, {0}, ShapeUtil::MakeShape(PrimitiveType::F32, {2, 3}), nullptr, ShapeUtil::MakeShape(PrimitiveType::F32, {2, 3}), 1.0, 0, 1.0, PrecisionConfig::ALG_UNSET, std::nullopt, se::blas::kDefaultComputePrecision, false, false); ASSERT_TRUE(config.ok()); CommandBufferCmdSequence commands; commands.Emplace<CublasLtCmd>( s0, config.value(), se::gpu::BlasLt::Epilogue::kDefault, 0, slice_a, slice_b, slice_c, slice_d, BufferAllocation::Slice(), BufferAllocation::Slice(), BufferAllocation::Slice(), BufferAllocation::Slice(), BufferAllocation::Slice(), BufferAllocation::Slice(), BufferAllocation::Slice(), slice_workspace); CommandBufferThunk thunk(std::move(commands), Thunk::ThunkInfo()); std::vector<float> a_arr_1{1, 2, 3, 4, 5, 6, 7, 8}; std::vector<float> a_arr_2{2, 3, 4, 5, 6, 7, 8, 9}; std::vector<float> result_1{11, 11, 11, 27, 27, 27}; std::vector<float> result_2{15, 15, 15, 31, 31, 31}; auto run_cublaslt_test = [&](std::unique_ptr<se::Stream>& stream, std::vector<float> a_arr, std::vector<float> result) { se::DeviceMemory<float> a = executor->AllocateArray<float>(2 * 4); TF_ASSERT_OK(stream->Memcpy(&a, a_arr.data(), a_length)); se::DeviceMemory<float> b = executor->AllocateArray<float>(4 * 3); std::vector<float> b_arr(12, 1); TF_ASSERT_OK(stream->Memcpy(&b, b_arr.data(), b_length)); se::DeviceMemory<float> c = executor->AllocateArray<float>(2 * 3); std::vector<float> c_arr(6, 1); TF_ASSERT_OK(stream->Memcpy(&c, c_arr.data(), c_length)); se::DeviceMemory<float> d = executor->AllocateArray<float>(2 * 3); TF_ASSERT_OK(stream->MemZero(&d, d_length)); se::DeviceMemory<float> workspace = executor->AllocateArray<float>(1024 * 1024); TF_ASSERT_OK(stream->MemZero(&workspace, 1024 * 1024)); ServiceExecutableRunOptions run_options; se::StreamExecutorMemoryAllocator allocator(executor); BufferAllocations allocations({a, b, c, d, workspace}, 0, &allocator); Thunk::ExecuteParams params = Thunk::ExecuteParams::Create( run_options, allocations, stream.get(), stream.get(), nullptr, nullptr); Thunk::ExecutableSource source = {"", {}}; TF_ASSERT_OK(thunk.Initialize( {executor, source, &allocations, stream.get(), stream.get()})); TF_ASSERT_OK(thunk.ExecuteOnStream(params)); TF_ASSERT_OK(stream->BlockHostUntilDone()); std::vector<float> dst(6, 0); TF_ASSERT_OK(stream->Memcpy(dst.data(), d, d_length)); ASSERT_EQ(dst, result); se::DeviceMemory<float> updated_d = executor->AllocateArray<float>(2 * 3); TF_ASSERT_OK(stream->MemZero(&updated_d, d_length)); allocations = BufferAllocations({a, b, c, updated_d, workspace}, 0, &allocator); TF_ASSERT_OK(thunk.ExecuteOnStream(params)); TF_ASSERT_OK(stream->BlockHostUntilDone()); std::fill(dst.begin(), dst.end(), 0); TF_ASSERT_OK(stream->Memcpy(dst.data(), updated_d, d_length)); ASSERT_EQ(dst, result); TF_ASSERT_OK(stream->MemZero(&updated_d, d_length)); TF_ASSERT_OK(thunk.ExecuteOnStream(params)); TF_ASSERT_OK(stream->BlockHostUntilDone()); std::fill(dst.begin(), dst.end(), 0); TF_ASSERT_OK(stream->Memcpy(dst.data(), updated_d, d_length)); ASSERT_EQ(dst, result); }; std::thread t1(run_cublaslt_test, std::ref(stream1), a_arr_1, result_1); std::thread t2(run_cublaslt_test, std::ref(stream2), a_arr_2, result_2); t1.join(); t2.join(); } TEST(CommandBufferThunkTest, MultipleLaunchCmd) { se::StreamExecutor* executor = GpuExecutor(); TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream()); int64_t length = 4; int64_t byte_length = sizeof(int32_t) * length; se::DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0); se::DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0); se::DeviceMemory<int32_t> c = executor->AllocateArray<int32_t>(length, 0); se::DeviceMemory<int32_t> d = executor->AllocateArray<int32_t>(length, 0); TF_ASSERT_OK(stream->Memset32(&a, 42, byte_length)); TF_ASSERT_OK(stream->MemZero(&b, byte_length)); TF_ASSERT_OK(stream->Memset32(&c, 21, byte_length)); TF_ASSERT_OK(stream->MemZero(&d, byte_length)); BufferAllocation alloc_a(0, byte_length, 0); BufferAllocation alloc_b(1, byte_length, 0); BufferAllocation alloc_c(2, byte_length, 0); BufferAllocation alloc_d(3, byte_length, 0); BufferAllocation::Slice slice_a(&alloc_a, 0, byte_length); BufferAllocation::Slice slice_b(&alloc_b, 0, byte_length); BufferAllocation::Slice slice_c(&alloc_c, 0, byte_length); BufferAllocation::Slice slice_d(&alloc_d, 0, byte_length); auto args = {slice_a, slice_a, slice_b}; auto args_1 = {slice_c, slice_c, slice_d}; auto args_access = {MemoryAccess::kRead, MemoryAccess::kRead, MemoryAccess::kWrite}; CommandBufferCmdSequence commands; commands.Emplace<LaunchCmd>(s0, "AddI32", args, args_access, LaunchDimensions(1, 4), 0); commands.Emplace<LaunchCmd>(s0, "AddI32", args_1, args_access, LaunchDimensions(1, 4), 0); CommandBufferThunk thunk(std::move(commands), Thunk::ThunkInfo()); ServiceExecutableRunOptions run_options; se::StreamExecutorMemoryAllocator allocator(executor); BufferAllocations allocations({a, b, c, d}, 0, &allocator); Thunk::ExecuteParams params = Thunk::ExecuteParams::Create( run_options, allocations, stream.get(), stream.get(), nullptr, nullptr); TF_ASSERT_OK_AND_ASSIGN(OwningExecutableSource source, ExecutableSource()); TF_ASSERT_OK( thunk.Initialize({executor, static_cast<Thunk::ExecutableSource>(source), &allocations, stream.get()})); TF_ASSERT_OK(thunk.ExecuteOnStream(params)); TF_ASSERT_OK(stream->BlockHostUntilDone()); std::vector<int32_t> dst(4, 0); TF_ASSERT_OK(stream->Memcpy(dst.data(), b, byte_length)); ASSERT_EQ(dst, std::vector<int32_t>(4, 42 + 42)); std::fill(dst.begin(), dst.end(), 0); TF_ASSERT_OK(stream->Memcpy(dst.data(), d, byte_length)); ASSERT_EQ(dst, std::vector<int32_t>(4, 21 + 21)); BufferAllocation alloc_e(3, byte_length, 0); BufferAllocation::Slice slice_e(&alloc_e, 0, byte_length); se::DeviceMemory<int32_t> e = executor->AllocateArray<int32_t>(length, 0); TF_ASSERT_OK(stream->MemZero(&e, byte_length)); allocations = BufferAllocations({a, b, c, e}, 0, &allocator); TF_ASSERT_OK(thunk.ExecuteOnStream(params)); TF_ASSERT_OK(stream->BlockHostUntilDone()); std::fill(dst.begin(), dst.end(), 0); TF_ASSERT_OK(stream->Memcpy(dst.data(), b, byte_length)); ASSERT_EQ(dst, std::vector<int32_t>(4, 42 + 42)); std::fill(dst.begin(), dst.end(), 0); TF_ASSERT_OK(stream->Memcpy(dst.data(), e, byte_length)); ASSERT_EQ(dst, std::vector<int32_t>(4, 21 + 21)); TF_ASSERT_OK(stream->MemZero(&e, byte_length)); TF_ASSERT_OK(thunk.ExecuteOnStream(params)); TF_ASSERT_OK(stream->BlockHostUntilDone()); std::fill(dst.begin(), dst.end(), 0); TF_ASSERT_OK(stream->Memcpy(dst.data(), b, byte_length)); ASSERT_EQ(dst, std::vector<int32_t>(4, 42 + 42)); std::fill(dst.begin(), dst.end(), 0); TF_ASSERT_OK(stream->Memcpy(dst.data(), e, byte_length)); ASSERT_EQ(dst, std::vector<int32_t>(4, 21 + 21)); } TEST(CommandBufferThunkTest, IfCmd) { if (!IsAtLeastCuda12300()) { GTEST_SKIP() << "CUDA graph conditionals are not supported"; } se::StreamExecutor* executor = GpuExecutor(); TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream()); int64_t length = 4; int64_t byte_length = sizeof(int32_t) * length; se::DeviceMemory<bool> pred = executor->AllocateArray<bool>(1, 0); se::DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0); se::DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0); constexpr bool kTrue = true; TF_ASSERT_OK(stream->Memcpy(&pred, &kTrue, 1)); TF_ASSERT_OK(stream->Memset32(&a, 42, byte_length)); TF_ASSERT_OK(stream->MemZero(&b, byte_length)); BufferAllocation alloc_p(0, 1, 0); BufferAllocation alloc_a(1, byte_length, 0); BufferAllocation alloc_b(2, byte_length, 0); BufferAllocation::Slice slice_p(&alloc_p, 0, 1); BufferAllocation::Slice slice_a(&alloc_a, 0, byte_length); BufferAllocation::Slice slice_b(&alloc_b, 0, byte_length); auto args = {slice_a, slice_a, slice_b}; auto args_access = {MemoryAccess::kRead, MemoryAccess::kRead, MemoryAccess::kWrite}; CommandBufferCmdSequence then_commands; then_commands.Emplace<LaunchCmd>(s0, "AddI32", args, args_access, LaunchDimensions(1, 4), 0); CommandBufferCmdSequence commands; commands.Emplace<IfCmd>(s0, slice_p, std::move(then_commands)); CommandBufferThunk thunk(std::move(commands), Thunk::ThunkInfo()); ServiceExecutableRunOptions run_options; se::StreamExecutorMemoryAllocator allocator(executor); BufferAllocations allocations({pred, a, b}, 0, &allocator); Thunk::ExecuteParams params = Thunk::ExecuteParams::Create( run_options, allocations, stream.get(), stream.get(), nullptr, nullptr); TF_ASSERT_OK_AND_ASSIGN(OwningExecutableSource source, ExecutableSource()); TF_ASSERT_OK( thunk.Initialize({executor, static_cast<Thunk::ExecutableSource>(source), &allocations, stream.get()})); TF_ASSERT_OK(thunk.ExecuteOnStream(params)); TF_ASSERT_OK(stream->BlockHostUntilDone()); std::vector<int32_t> dst(4, 0); TF_ASSERT_OK(stream->Memcpy(dst.data(), b, byte_length)); ASSERT_EQ(dst, std::vector<int32_t>(4, 42 + 42)); se::DeviceMemory<int32_t> c = executor->AllocateArray<int32_t>(length, 0); TF_ASSERT_OK(stream->MemZero(&c, byte_length)); allocations = BufferAllocations({pred, a, c}, 0, &allocator); TF_ASSERT_OK(thunk.ExecuteOnStream(params)); TF_ASSERT_OK(stream->BlockHostUntilDone()); std::fill(dst.begin(), dst.end(), 0); TF_ASSERT_OK(stream->Memcpy(dst.data(), c, byte_length)); ASSERT_EQ(dst, std::vector<int32_t>(4, 42 + 42)); } TEST(CommandBufferThunkTest, IfElseCmd) { if (!IsAtLeastCuda12300()) { GTEST_SKIP() << "CUDA graph conditionals are not supported"; } se::StreamExecutor* executor = GpuExecutor(); TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream()); int64_t length = 4; int64_t byte_length = sizeof(int32_t) * length; se::DeviceMemory<bool> pred = executor->AllocateArray<bool>(1, 0); se::DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0); se::DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0); constexpr bool kTrue = true; TF_ASSERT_OK(stream->Memcpy(&pred, &kTrue, 1)); TF_ASSERT_OK(stream->Memset32(&a, 42, byte_length)); TF_ASSERT_OK(stream->MemZero(&b, byte_length)); BufferAllocation alloc_p(0, 1, 0); BufferAllocation alloc_a(1, byte_length, 0); BufferAllocation alloc_b(2, byte_length, 0); BufferAllocation::Slice slice_p(&alloc_p, 0, 1); BufferAllocation::Slice slice_a(&alloc_a, 0, byte_length); BufferAllocation::Slice slice_b(&alloc_b, 0, byte_length); CommandBufferCmdSequence then_commands; CommandBufferCmdSequence else_commands; auto args_access = {MemoryAccess::kRead, MemoryAccess::kRead, MemoryAccess::kWrite}; { auto args = {slice_a, slice_a, slice_b}; then_commands.Emplace<LaunchCmd>(s0, "AddI32", args, args_access, LaunchDimensions(1, 4), 0); } { auto args = {slice_b, slice_b, slice_b}; else_commands.Emplace<LaunchCmd>(s0, "AddI32", args, args_access, LaunchDimensions(1, 4), 0); } CommandBufferCmdSequence commands; commands.Emplace<IfElseCmd>(s0, slice_p, std::move(then_commands), std::move(else_commands)); CommandBufferThunk thunk(std::move(commands), Thunk::ThunkInfo()); ServiceExecutableRunOptions run_options; se::StreamExecutorMemoryAllocator allocator(executor); BufferAllocations allocations({pred, a, b}, 0, &allocator); Thunk::ExecuteParams params = Thunk::ExecuteParams::Create( run_options, allocations, stream.get(), stream.get(), nullptr, nullptr); TF_ASSERT_OK_AND_ASSIGN(OwningExecutableSource source, ExecutableSource()); TF_ASSERT_OK( thunk.Initialize({executor, static_cast<Thunk::ExecutableSource>(source), &allocations, stream.get()})); TF_ASSERT_OK(thunk.ExecuteOnStream(params)); TF_ASSERT_OK(stream->BlockHostUntilDone()); std::vector<int32_t> dst(4, 0); TF_ASSERT_OK(stream->Memcpy(dst.data(), b, byte_length)); ASSERT_EQ(dst, std::vector<int32_t>(4, 42 + 42)); constexpr bool kFalse = false; TF_ASSERT_OK(stream->Memcpy(&pred, &kFalse, 1)); TF_ASSERT_OK(thunk.ExecuteOnStream(params)); TF_ASSERT_OK(stream->BlockHostUntilDone()); TF_ASSERT_OK(stream->Memcpy(dst.data(), b, byte_length)); ASSERT_EQ(dst, std::vector<int32_t>(4, 2 * (42 + 42))); } TEST(CommandBufferThunkTest, CaseCmd) { if (!IsAtLeastCuda12300()) { GTEST_SKIP() << "CUDA graph conditionals are not supported"; } se::StreamExecutor* executor = GpuExecutor(); TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream()); int64_t length = 4; int64_t byte_length = sizeof(int32_t) * length; se::DeviceMemory<int32_t> index = executor->AllocateArray<int32_t>(1, 0); se::DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0); se::DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0); TF_ASSERT_OK(stream->Memset32(&index, 0, sizeof(int32_t))); TF_ASSERT_OK(stream->Memset32(&a, 42, byte_length)); TF_ASSERT_OK(stream->MemZero(&b, byte_length)); BufferAllocation alloc_i(0, 1, 0); BufferAllocation alloc_a(1, byte_length, 0); BufferAllocation alloc_b(2, byte_length, 0); BufferAllocation::Slice slice_i(&alloc_i, 0, sizeof(int32_t)); BufferAllocation::Slice slice_a(&alloc_a, 0, byte_length); BufferAllocation::Slice slice_b(&alloc_b, 0, byte_length); std::vector<CommandBufferCmdSequence> branches(2); auto args_access = {MemoryAccess::kRead, MemoryAccess::kRead, MemoryAccess::kWrite}; { auto args = {slice_a, slice_a, slice_b}; branches[0].Emplace<LaunchCmd>(s0, "AddI32", args, args_access, LaunchDimensions(1, 4), 0); } { auto args = {slice_b, slice_b, slice_b}; branches[1].Emplace<LaunchCmd>(s0, "AddI32", args, args_access, LaunchDimensions(1, 4), 0); } CommandBufferCmdSequence commands; commands.Emplace<CaseCmd>(s0, slice_i, std::move(branches)); CommandBufferThunk thunk(std::move(commands), Thunk::ThunkInfo()); ServiceExecutableRunOptions run_options; se::StreamExecutorMemoryAllocator allocator(executor); BufferAllocations allocations({index, a, b}, 0, &allocator); Thunk::ExecuteParams params = Thunk::ExecuteParams::Create( run_options, allocations, stream.get(), stream.get(), nullptr, nullptr); TF_ASSERT_OK_AND_ASSIGN(OwningExecutableSource source, ExecutableSource()); TF_ASSERT_OK( thunk.Initialize({executor, static_cast<Thunk::ExecutableSource>(source), &allocations, stream.get()})); TF_ASSERT_OK(thunk.ExecuteOnStream(params)); TF_ASSERT_OK(stream->BlockHostUntilDone()); std::vector<int32_t> dst(4, 0); TF_ASSERT_OK(stream->Memcpy(dst.data(), b, byte_length)); ASSERT_EQ(dst, std::vector<int32_t>(4, 42 + 42)); TF_ASSERT_OK(stream->Memset32(&index, 1, sizeof(int32_t))); TF_ASSERT_OK(thunk.ExecuteOnStream(params)); TF_ASSERT_OK(stream->BlockHostUntilDone()); TF_ASSERT_OK(stream->Memcpy(dst.data(), b, byte_length)); ASSERT_EQ(dst, std::vector<int32_t>(4, 2 * (42 + 42))); } TEST(CommandBufferThunkTest, ForCmd) { if (!IsAtLeastCuda12300()) { GTEST_SKIP() << "CUDA graph conditionals are not supported"; } se::StreamExecutor* executor = GpuExecutor(); TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream()); int64_t length = 4; int64_t byte_length = sizeof(int32_t) * length; se::DeviceMemory<int32_t> loop_cnt = executor->AllocateArray<int32_t>(1, 0); se::DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0); se::DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0); TF_ASSERT_OK(stream->Memset32(&loop_cnt, 0, sizeof(int32_t))); TF_ASSERT_OK(stream->Memset32(&a, 1, byte_length)); TF_ASSERT_OK(stream->MemZero(&b, byte_length)); BufferAllocation alloc_cnt(0, 1, 0); BufferAllocation alloc_a(1, byte_length, 0); BufferAllocation alloc_b(2, byte_length, 0); BufferAllocation::Slice slice_cnt(&alloc_cnt, 0, sizeof(int32_t)); BufferAllocation::Slice slice_a(&alloc_a, 0, byte_length); BufferAllocation::Slice slice_b(&alloc_b, 0, byte_length); auto args = {slice_a, slice_b, slice_b}; auto args_access = {MemoryAccess::kRead, MemoryAccess::kRead, MemoryAccess::kWrite}; CommandBufferCmdSequence body_commands; body_commands.Emplace<LaunchCmd>(s0, "AddI32", args, args_access, LaunchDimensions(1, 4), 0); CommandBufferCmdSequence commands; commands.Emplace<ForCmd>(s0, 10, slice_cnt, std::move(body_commands)); CommandBufferThunk thunk(std::move(commands), Thunk::ThunkInfo()); ServiceExecutableRunOptions run_options; se::StreamExecutorMemoryAllocator allocator(executor); BufferAllocations allocations({loop_cnt, a, b}, 0, &allocator); Thunk::ExecuteParams params = Thunk::ExecuteParams::Create( run_options, allocations, stream.get(), stream.get(), nullptr, nullptr); TF_ASSERT_OK_AND_ASSIGN(OwningExecutableSource source, ExecutableSource()); TF_ASSERT_OK( thunk.Initialize({executor, static_cast<Thunk::ExecutableSource>(source), &allocations, stream.get()})); TF_ASSERT_OK(thunk.ExecuteOnStream(params)); TF_ASSERT_OK(stream->BlockHostUntilDone()); std::vector<int32_t> dst(4, 0); TF_ASSERT_OK(stream->Memcpy(dst.data(), b, byte_length)); ASSERT_EQ(dst, std::vector<int32_t>(4, 10)); } TEST(CommandBufferThunkTest, WhileCmd) { } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/runtime/command_buffer_thunk.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/runtime/command_buffer_thunk_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
ff66b5ca-2709-4292-b7be-cb7d378338e4
cpp
tensorflow/tensorflow
command_buffer_cmd
third_party/xla/xla/service/gpu/runtime/command_buffer_cmd.cc
third_party/xla/xla/service/gpu/runtime/command_buffer_cmd_test.cc
#include "xla/service/gpu/runtime/command_buffer_cmd.h" #include <cassert> #include <cstddef> #include <cstdint> #include <iterator> #include <memory> #include <optional> #include <string> #include <string_view> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/base/optimization.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/container/inlined_vector.h" #include "absl/functional/function_ref.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "absl/synchronization/mutex.h" #include "absl/types/span.h" #include "xla/debug_options_flags.h" #include "xla/executable_run_options.h" #include "xla/ffi/call_frame.h" #include "xla/ffi/ffi_api.h" #include "xla/service/buffer_assignment.h" #include "xla/service/collective_ops_utils.h" #include "xla/service/computation_placer.h" #include "xla/service/global_device_id.h" #include "xla/service/gpu/buffer_allocations.h" #include "xla/service/gpu/kernels/custom_kernel.h" #include "xla/service/gpu/launch_dimensions.h" #include "xla/service/gpu/matmul_utils.h" #include "xla/service/gpu/runtime/annotation.h" #include "xla/service/gpu/runtime/nccl_all_gather_thunk.h" #include "xla/service/gpu/runtime/nccl_all_reduce_thunk.h" #include "xla/service/gpu/runtime/nccl_all_to_all_thunk.h" #include "xla/service/gpu/runtime/nccl_api.h" #include "xla/service/gpu/runtime/nccl_clique_key.h" #include "xla/service/gpu/runtime/nccl_collective_broadcast_thunk.h" #include "xla/service/gpu/runtime/nccl_collective_thunk.h" #include "xla/service/gpu/runtime/thunk.h" #include "xla/service/gpu/stream_executor_util.h" #include "xla/service/service_executable_run_options.h" #include "xla/stream_executor/command_buffer.h" #include "xla/stream_executor/device_memory.h" #include "xla/stream_executor/dnn.h" #include "xla/stream_executor/kernel.h" #include "xla/stream_executor/launch_dim.h" #include "xla/stream_executor/lazy_op_runner.h" #include "xla/stream_executor/stream.h" #include "xla/stream_executor/stream_executor.h" #include "xla/stream_executor/trace_command_buffer_factory.h" #include "xla/tsl/concurrency/ref_count.h" #include "xla/types.h" #include "xla/util.h" #include "tsl/platform/env.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" #include "tsl/platform/statusor.h" #include "tsl/profiler/lib/scoped_annotation.h" #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #include "xla/service/custom_call_status.h" #include "xla/service/custom_call_status_internal.h" #include "xla/stream_executor/gpu/gpu_stream.h" #include "xla/stream_executor/gpu/gpu_types.h" #endif namespace xla::gpu { namespace { std::optional<se::DeviceMemoryBase> AssignBufferIfNotNull( const BufferAllocations& buffer_allocations, BufferAllocation::Slice& slice) { return slice.allocation() != nullptr ? std::optional<se::DeviceMemoryBase>{buffer_allocations .GetDeviceAddress(slice)} : std::nullopt; } } using ExecutionScopeId = se::CommandBuffer::ExecutionScopeId; using MemoryAccess = CommandBufferCmd::MemoryAccess; std::string CommandBufferCmdString(CommandBufferCmdType type) { switch (type) { #define CASE_CMD_STRING(enum_name, cmd_name, ...) \ case CommandBufferCmdType::enum_name: \ return cmd_name; COMMAND_BUFFER_CMD_LIST(CASE_CMD_STRING) #undef CASE_CMD_STRING default: return "UnknownCmd"; } } static std::string_view ReductionKindString(ReductionKind kind) { switch (kind) { case ReductionKind::MAX: return "max"; case ReductionKind::MIN: return "min"; case ReductionKind::PRODUCT: return "product"; case ReductionKind::SUM: return "sum"; } } static se::CommandBuffer::Builder CreateBuilder( CommandBufferCmdSequence* commands, const Thunk::ExecuteParams* execute_params, const CommandBufferCmd::RecordParams* record_params) { return [=](se::CommandBuffer* command_buffer) { return commands->Record(*execute_params, *record_params, command_buffer, CommandBufferCmdSequence::RecordMode::kConditional); }; } static std::vector<se::CommandBuffer::Builder> CreateBuilders( absl::Span<CommandBufferCmdSequence> commands, const Thunk::ExecuteParams* execute_params, const CommandBufferCmd::RecordParams* record_params) { std::vector<se::CommandBuffer::Builder> builders; for (CommandBufferCmdSequence& cmd : commands) { builders.push_back(CreateBuilder(&cmd, execute_params, record_params)); } return builders; } static se::CommandBuffer::ExecutionScopeBuilder CreateExecutionScopeBuilder( CommandBufferCmdSequence* commands, const Thunk::ExecuteParams* execute_params, const CommandBufferCmd::RecordParams* record_params) { return [=](ExecutionScopeId id, se::CommandBuffer* command_buffer) { CommandBufferCmd::RecordParams params = *record_params; params.execution_scope_id = id; return commands->Record(*execute_params, params, command_buffer, CommandBufferCmdSequence::RecordMode::kConditional); }; } CommandBufferCmd::State* CommandBufferCmd::StateManager::GetOrNull( const CommandBufferCmd* cmd) { if (auto it = state_.find(cmd); it != state_.end()) { return it->second.get(); } return nullptr; } CommandBufferCmd::State* CommandBufferCmd::StateManager::GetOrCreate( const CommandBufferCmd* cmd, absl::FunctionRef<std::unique_ptr<State>()> create) { if (auto it = state_.find(cmd); it != state_.end()) { return it->second.get(); } return state_.try_emplace(cmd, create()).first->second.get(); } se::CommandBuffer::ExecutionScopeId CommandBufferCmd::GetExecutionScope( const RecordParams& record_params, ExecutionStreamId execution_stream_id) const { uint64_t base = record_params.execution_scope_id.value(); uint64_t offset = execution_stream_id.value(); return se::CommandBuffer::ExecutionScopeId(base + offset); } se::CommandBuffer::ExecutionScopeId CommandBufferCmd::GetExecutionScope( const RecordParams& record_params) const { return GetExecutionScope(record_params, execution_stream_id_); } CommandBufferCmdSequence::CommandBufferCmdSequence( SynchronizationMode synchronization_mode) : synchronization_mode_(synchronization_mode) {} void CommandBufferCmdSequence::Append(std::unique_ptr<CommandBufferCmd> cmd) { for (const CommandBufferCmd::BufferUsage& buffer : cmd->buffers()) { buffers_.insert(buffer); allocs_indices_.insert(buffer.slice.index()); } ExecutionStreamId execution_stream_id = cmd->execution_stream_id(); CommandBufferCmd::BufferUsageVector buffers = cmd->buffers(); bool requires_barrier = HasConflicts(execution_stream_id, buffers); if (synchronization_mode_ == SynchronizationMode::kSerialize && !commands_.empty()) { requires_barrier = true; } if (commands_.size() == 1 && commands_.front().cmd->IsNestedCommandBuffer()) { requires_barrier = true; } if (requires_barrier) ClearTrackedBuffers(execution_stream_id); commands_.push_back({std::move(cmd), requires_barrier}); TrackBuffers(execution_stream_id, buffers); } absl::Status CommandBufferCmdSequence::Prepare( const Thunk::PrepareParams& params, Thunk::ResourceRequests& resource_requests) { for (auto& command : commands_) { TF_RETURN_IF_ERROR(command.cmd->Prepare(params, resource_requests)); } return absl::OkStatus(); } absl::Status CommandBufferCmdSequence::Initialize( const Thunk::InitializeParams& params, CommandBufferCmd::StateManager& state) { for (auto& command : commands_) { TF_RETURN_IF_ERROR(command.cmd->Initialize(params, state)); } return absl::OkStatus(); } bool CommandBufferCmdSequence::HasConflicts( ExecutionStreamId execution_stream_id, const CommandBufferCmd::BufferUsageVector& buffers) { auto& rwset = read_write_sets_[execution_stream_id]; auto read_overlap = [&](const BufferAllocation::Slice& slice) { if (rwset.read.contains(slice)) return true; for (auto& read : rwset.read) if (read.OverlapsWith(slice)) return true; return false; }; auto write_overlap = [&](const BufferAllocation::Slice& slice) { if (rwset.write.contains(slice)) return true; for (auto& write : rwset.write) if (write.OverlapsWith(slice)) return true; return false; }; return absl::c_any_of(buffers, [&](const auto& buffer) { return buffer.access == MemoryAccess::kWrite ? write_overlap(buffer.slice) || read_overlap(buffer.slice) : write_overlap(buffer.slice); }); } void CommandBufferCmdSequence::TrackBuffers( ExecutionStreamId execution_stream_id, const CommandBufferCmd::BufferUsageVector& buffers) { auto& rwset = read_write_sets_[execution_stream_id]; for (const CommandBufferCmd::BufferUsage& buffer : buffers) { if (buffer.access == MemoryAccess::kWrite) rwset.write.insert(buffer.slice); if (buffer.access == MemoryAccess::kRead) rwset.read.insert(buffer.slice); } } void CommandBufferCmdSequence::ClearTrackedBuffers( ExecutionStreamId execution_stream_id) { read_write_sets_[execution_stream_id] = ReadWriteSet(); } static std::string_view RecordModeString( CommandBufferCmdSequence::RecordMode mode) { switch (mode) { case CommandBufferCmdSequence::RecordMode::kExclusive: return "exclusive"; case CommandBufferCmdSequence::RecordMode::kConditional: return "conditional"; } } absl::Status CommandBufferCmdSequence::Record( const Thunk::ExecuteParams& execute_params, const CommandBufferCmd::RecordParams& record_params, se::CommandBuffer* command_buffer, RecordMode mode) { VLOG(3) << "Record " << commands_.size() << " commands into command buffer" << "; mode=" << RecordModeString(mode); uint64_t start_micros = tsl::Env::Default()->NowMicros(); if (mode == RecordMode::kExclusive) { if (command_buffer->state() == se::CommandBuffer::State::kFinalized) { TF_RETURN_IF_ERROR(command_buffer->Update()); } } absl::flat_hash_map<ExecutionScopeId, int64_t> num_recorded_commands; for (CommandInfo& command : commands_) { if (execute_params.mock_collectives && dynamic_cast<CollectiveCmd*>(command.cmd.get())) { continue; } ExecutionScopeId execution_scope_id = command.cmd->GetExecutionScope(record_params); std::optional<tsl::profiler::ScopedAnnotation> annotation = GetKernelAnnotation(command.cmd->profile_annotation()); if (command.requires_barrier) { VLOG(3) << "Add command buffer barrier after " << num_recorded_commands[execution_scope_id] << " recorded commands into the execution scope #" << execution_scope_id.value(); TF_RETURN_IF_ERROR(command_buffer->Barrier(execution_scope_id)); num_recorded_commands.erase(execution_scope_id); } VLOG(5) << "Record command buffer with scope id " << execution_scope_id.value(); TF_RETURN_IF_ERROR( command.cmd->Record(execute_params, record_params, command_buffer)); ++num_recorded_commands[execution_scope_id]; } if (mode == RecordMode::kExclusive) { TF_RETURN_IF_ERROR(command_buffer->Finalize()); } uint64_t end_micros = tsl::Env::Default()->NowMicros(); VLOG(3) << "Recorded " << commands_.size() << " commands into command buffer in " << (end_micros - start_micros) << " μs; mode=" << RecordModeString(mode); return absl::OkStatus(); } const absl::flat_hash_set<CommandBufferCmd::BufferUsage>& CommandBufferCmdSequence::buffers() const { return buffers_; } const absl::flat_hash_set<BufferAllocation::Index>& CommandBufferCmdSequence::allocs_indices() const { return allocs_indices_; } std::vector<bool> CommandBufferCmdSequence::barriers() const { std::vector<bool> barriers; absl::c_transform(commands_, std::back_inserter(barriers), [](auto& command) { return command.requires_barrier; }); return barriers; } TracedCommandBuffer::TracedCommandBuffer( const CommandBufferCmd* trace_cmd, CommandBufferCmd::BufferUsageVector buffers, int64_t capacity) : trace_cmd_(trace_cmd), capacity_(capacity), entries_(capacity) { CHECK_GT(capacity, 0) << "capacity must be larger than 0"; absl::flat_hash_set<BufferAllocation::Index> allocs_indices; for (auto& buffer : buffers) allocs_indices.insert(buffer.slice.index()); allocs_indices_.assign(allocs_indices.begin(), allocs_indices.end()); } absl::StatusOr<se::CommandBuffer*> TracedCommandBuffer::GetOrTraceCommandBuffer( const BufferAllocations* buffer_allocation, se::StreamExecutor* executor, se::Stream* stream, absl::FunctionRef<absl::Status(se::Stream*)> trace) { absl::InlinedVector<se::DeviceMemoryBase, 4> allocs; allocs.reserve(allocs_indices_.size()); for (auto& index : allocs_indices_) { allocs.emplace_back(buffer_allocation->GetDeviceAddress(index)); } auto shift_right = [&](size_t i) -> Entry& { if (i == 0) return entries_[0]; Entry entry = std::move(entries_[i]); do { entries_[i] = std::move(entries_[i - 1]); } while (--i > 0); return entries_[0] = std::move(entry); }; for (size_t i = 0; i < capacity_; ++i) { if (ABSL_PREDICT_TRUE(absl::c_equal(entries_[i].recorded_allocs, allocs) && entries_[i].command_buffer)) { VLOG(6) << "Command buffer trace cache hit for command " << trace_cmd_->ToString(); return shift_right(i).command_buffer.get(); } if (entries_[i].command_buffer == nullptr) { TF_ASSIGN_OR_RETURN( entries_[i].command_buffer, se::TraceCommandBufferFactory::Create(executor, stream, trace)); entries_[i].recorded_allocs.assign(allocs.begin(), allocs.end()); VLOG(6) << "Command buffer trace cache create new item for command " << trace_cmd_->ToString(); return shift_right(i).command_buffer.get(); } } TF_ASSIGN_OR_RETURN( entries_[capacity_ - 1].command_buffer, se::TraceCommandBufferFactory::Create(executor, stream, trace)); entries_[capacity_ - 1].recorded_allocs.assign(allocs.begin(), allocs.end()); VLOG(6) << "Command buffer trace cache does replacement for command " << trace_cmd_->ToString(); return shift_right(capacity_ - 1).command_buffer.get(); } TracedCommandBufferCmd::TracedCommandBufferCmd( CommandBufferCmdType cmd_type, ExecutionStreamId execution_stream_id) : CommandBufferCmd(cmd_type, execution_stream_id) {} absl::Status TracedCommandBufferCmd::AddTracedCommandBuffer( const Thunk::ExecuteParams& execute_params, const RecordParams& record_params, se::CommandBuffer* command_buffer, absl::FunctionRef<absl::Status(se::Stream*)> trace) { auto traced_cmd = record_params.state.GetOrCreate<TracedCommandBuffer>(this, [&] { const auto& debug_options = xla::GetDebugOptionsFromFlags(); return std::make_unique<TracedCommandBuffer>( this, buffers(), debug_options.xla_cmd_buffer_trace_cache_size()); }); TF_ASSIGN_OR_RETURN( auto nested_cmd, traced_cmd->GetOrTraceCommandBuffer( execute_params.buffer_allocations, execute_params.stream->parent(), execute_params.command_buffer_trace_stream, trace)); ExecutionScopeId execution_scope_id = GetExecutionScope(record_params); VLOG(5) << "Add nested command buffer to execution scope: " << execution_scope_id.value(); return command_buffer->AddNestedCommandBuffer(execution_scope_id, *nested_cmd); } inline constexpr std::string_view kMemset32Kernel = R"( .version 4.0 .target sm_50 .address_size 64 .visible .entry memset32( .param .u64 memset32_param_0, .param .u32 memset32_param_1, .param .u64 memset32_param_2 ) { .reg .pred %p<2>; .reg .b32 %r<6>; .reg .b64 %rd<7>; .loc 1 3 0 ld.param.u64 %rd3, [memset32_param_0]; ld.param.u32 %r1, [memset32_param_1]; ld.param.u64 %rd2, [memset32_param_2]; .loc 1 5 3 mov.u32 %r2, %ctaid.x; mov.u32 %r3, %ntid.x; mov.u32 %r4, %tid.x; mad.lo.s32 %r5, %r2, %r3, %r4; .loc 1 6 3 cvt.s64.s32 %rd1, %r5; setp.ge.s64 %p1, %rd1, %rd3; @%p1 bra $L__BB0_2; .loc 1 5 3 cvta.to.global.u64 %rd4, %rd2; .loc 1 6 3 shl.b64 %rd5, %rd1, 2; add.s64 %rd6, %rd4, %rd5; st.global.u32 [%rd6], %r1; $L__BB0_2: .loc 1 7 1 ret; })"; ComputationIdCmd::ComputationIdCmd(ExecutionStreamId execution_stream_id, BufferAllocation::Slice dest, Kind kind) : CommandBufferCmd(CommandBufferCmdType::kComputationIdCmd, execution_stream_id), dest_(dest), kind_(kind) {} CommandBufferCmd::BufferUsageVector ComputationIdCmd::buffers() { return {{dest_, MemoryAccess::kWrite}}; } absl::Status ComputationIdCmd::Initialize(const Thunk::InitializeParams& params, StateManager& state) { #if defined(GOOGLE_CUDA) { absl::MutexLock lock(&mutex_); if (memset_kernels_.contains(params.executor)) return absl::OkStatus(); } TF_ASSIGN_OR_RETURN(std::unique_ptr<se::Kernel> kernel, CreateKernel("memset32", 3, kMemset32Kernel, {}, params.executor, 0)); absl::MutexLock lock(&mutex_); memset_kernels_.emplace(params.executor, std::move(kernel)); #endif return absl::OkStatus(); } absl::Status ComputationIdCmd::Record( const Thunk::ExecuteParams& execute_params, const RecordParams& record_params, se::CommandBuffer* command_buffer) { se::DeviceMemoryBase dst = execute_params.buffer_allocations->GetDeviceAddress(dest_); GlobalDeviceId global_device_id = execute_params.collective_params->global_device_id; TF_ASSIGN_OR_RETURN( const DeviceAssignment::LogicalID logical_id, execute_params.collective_params->device_assn->LogicalIdForDevice( global_device_id)); uint32_t value = kind_ == Kind::kReplica ? logical_id.replica_id : logical_id.computation_id; ExecutionScopeId execution_scope_id = GetExecutionScope(record_params); VLOG(5) << "ComputationIdCmd" << ": kind=" << (kind_ == Kind::kReplica ? "replica" : "partition") << "; value=" << value << "; execution_scope_id=" << execution_scope_id.value(); VLOG(5) << " Id: " << dest_ << " (" << dst.opaque() << ")"; #if defined(GOOGLE_CUDA) se::Kernel* memset_kernel = [&] { absl::MutexLock lock(&mutex_); return memset_kernels_[execute_params.stream->parent()].get(); }(); if (memset_kernel == nullptr) { return absl::InternalError( "Memset kernel not loaded on a command buffer executor"); } auto args = se::PackKernelArgs(0, int64_t{1}, value, dst); return command_buffer->Launch(execution_scope_id, se::ThreadDim(1), se::BlockDim(1), *memset_kernel, *args); #else return command_buffer->Memset(execution_scope_id, &dst, value, 1); #endif } LaunchCmd::LaunchCmd(ExecutionStreamId execution_stream_id, std::string kernel_name, absl::Span<const BufferAllocation::Slice> args, absl::Span<const MemoryAccess> args_access, LaunchDimensions dims, int64_t shmem_bytes) : CommandBufferCmd(CommandBufferCmdType::kLaunchCmd, execution_stream_id), kernel_name_(std::move(kernel_name)), args_(args.begin(), args.end()), args_access_(args_access.begin(), args_access.end()), dims_(dims), shmem_bytes_(shmem_bytes) {} absl::Status LaunchCmd::Initialize(const Thunk::InitializeParams& params, StateManager& state) { { absl::MutexLock lock(&mutex_); if (kernels_.contains(params.executor)) return absl::OkStatus(); } TF_ASSIGN_OR_RETURN( std::unique_ptr<se::Kernel> kernel, CreateKernel(kernel_name_, args_.size(), params.src.text, params.src.binary, params.executor, shmem_bytes_)); absl::MutexLock lock(&mutex_); kernels_.emplace(params.executor, std::move(kernel)); return absl::OkStatus(); } absl::Status LaunchCmd::Record(const Thunk::ExecuteParams& execute_params, const RecordParams& record_params, se::CommandBuffer* command_buffer) { ExecutionScopeId execution_scope_id = GetExecutionScope(record_params); VLOG(5) << "LaunchCmd: kernel=" << kernel_name_ << "; shmem_bytes=" << shmem_bytes_ << "; execution_scope_id=" << execution_scope_id.value(); se::Kernel* kernel = [&] { absl::MutexLock lock(&mutex_); return kernels_[execute_params.stream->parent()].get(); }(); if (kernel == nullptr) { return absl::InternalError(absl::StrCat( "Kernel not loaded on a command buffer executor: ", kernel_name_)); } absl::InlinedVector<se::DeviceMemoryBase, 4> buffers; for (const BufferAllocation::Slice& arg : args_) { se::DeviceMemoryBase buf = execute_params.buffer_allocations->GetDeviceAddress(arg); VLOG(5) << " Arg: " << arg << ": " << buf.opaque(); buffers.push_back(buf); } TF_ASSIGN_OR_RETURN(auto kernel_args, se::PackKernelArgs(buffers, shmem_bytes_)); return command_buffer->Launch(execution_scope_id, dims_.thread_counts_per_block(), dims_.block_counts(), *kernel, *kernel_args); } CommandBufferCmd::BufferUsageVector LaunchCmd::buffers() { BufferUsageVector buffers; for (int32_t i = 0; i < args_.size(); ++i) { buffers.emplace_back(args_[i], args_access_[i]); } return buffers; } CustomKernelLaunchCmd::CustomKernelLaunchCmd( ExecutionStreamId execution_stream_id, absl::Span<const BufferAllocation::Slice> args, absl::Span<const MemoryAccess> args_access, CustomKernel custom_kernel) : CommandBufferCmd(CommandBufferCmdType::kCustomKernelLaunchCmd, execution_stream_id), args_(args.begin(), args.end()), args_access_(args_access.begin(), args_access.end()), custom_kernel_(std::move(custom_kernel)) {} absl::Status CustomKernelLaunchCmd::Initialize( const Thunk::InitializeParams& params, StateManager& state) { { absl::MutexLock lock(&mutex_); if (kernels_.contains(params.executor)) return absl::OkStatus(); } TF_ASSIGN_OR_RETURN( std::unique_ptr<se::Kernel> kernel, params.executor->LoadKernel(custom_kernel_.kernel_spec())); absl::MutexLock lock(&mutex_); kernels_.emplace(params.executor, std::move(kernel)); return absl::OkStatus(); } absl::Status CustomKernelLaunchCmd::Record( const Thunk::ExecuteParams& execute_params, const RecordParams& record_params, se::CommandBuffer* command_buffer) { ExecutionScopeId execution_scope_id = GetExecutionScope(record_params); VLOG(5) << "CustomKernelLaunchCmd: custom_kernel=" << custom_kernel_.name() << "; execution_scope_id=" << execution_scope_id.value(); se::Kernel* kernel = [&] { absl::MutexLock lock(&mutex_); return kernels_[execute_params.stream->parent()].get(); }(); if (kernel == nullptr) { return absl::InternalError( absl::StrCat("Custom kernel not loaded on a command buffer executor: ", custom_kernel_.name())); } absl::InlinedVector<se::DeviceMemoryBase, 4> buffers; for (const BufferAllocation::Slice& arg : args_) { se::DeviceMemoryBase buf = execute_params.buffer_allocations->GetDeviceAddress(arg); VLOG(5) << " Arg: " << arg << ": " << buf.opaque(); buffers.push_back(buf); } se::KernelArgsDeviceMemoryArray kernel_args( buffers, custom_kernel_.shared_memory_bytes()); return command_buffer->Launch( execution_scope_id, custom_kernel_.thread_dims(), custom_kernel_.block_dims(), *kernel, kernel_args); } CommandBufferCmd::BufferUsageVector CustomKernelLaunchCmd::buffers() { BufferUsageVector buffers; for (int32_t i = 0; i < args_.size(); ++i) { buffers.emplace_back(args_[i], args_access_[i]); } return buffers; } MemcpyDeviceToDeviceCmd::MemcpyDeviceToDeviceCmd( ExecutionStreamId execution_stream_id, BufferAllocation::Slice dst, BufferAllocation::Slice src, int64_t num_bytes) : CommandBufferCmd(CommandBufferCmdType::kMemcpyDeviceToDeviceCmd, execution_stream_id), dst_(dst), src_(src), num_bytes_(num_bytes) {} absl::Status MemcpyDeviceToDeviceCmd::Record( const Thunk::ExecuteParams& execute_params, const RecordParams& record_params, se::CommandBuffer* command_buffer) { se::DeviceMemoryBase dst = execute_params.buffer_allocations->GetDeviceAddress(dst_); se::DeviceMemoryBase src = execute_params.buffer_allocations->GetDeviceAddress(src_); ExecutionScopeId execution_scope_id = GetExecutionScope(record_params); VLOG(5) << "MemcpyDeviceToDeviceCmd: num_bytes = " << num_bytes_ << "; execution_scope_id=" << execution_scope_id.value(); VLOG(5) << " Dst: " << dst_ << " (" << dst.opaque() << ")"; VLOG(5) << " Src: " << src_ << " (" << src.opaque() << ")"; if (num_bytes_ == 0) { VLOG(5) << "Skip recording MemcpyDeviceToDeviceCmd command of 0 bytes"; return absl::OkStatus(); } return command_buffer->MemcpyDeviceToDevice(execution_scope_id, &dst, src, num_bytes_); } CommandBufferCmd::BufferUsageVector MemcpyDeviceToDeviceCmd::buffers() { return {{dst_, MemoryAccess::kWrite}, {src_, MemoryAccess::kRead}}; } MemzeroCmd::MemzeroCmd(ExecutionStreamId execution_stream_id, BufferAllocation::Slice dst) : CommandBufferCmd(CommandBufferCmdType::kMemzeroCmd, execution_stream_id), dst_(dst) {} absl::Status MemzeroCmd::Record(const Thunk::ExecuteParams& execute_params, const RecordParams& record_params, se::CommandBuffer* command_buffer) { se::DeviceMemoryBase dst = execute_params.buffer_allocations->GetDeviceAddress(dst_); ExecutionScopeId execution_scope_id = GetExecutionScope(record_params); VLOG(5) << "MemzeroCmd: execution_scope_id=" << execution_scope_id.value(); VLOG(5) << " Dst: " << dst_ << " (" << dst.opaque() << ")"; if (dst_.size() == 0) { VLOG(5) << "Skip recording MemzeroCmd command of 0 bytes"; return absl::OkStatus(); } return command_buffer->Memset(execution_scope_id, &dst, uint8_t{0}, dst_.size()); } CommandBufferCmd::BufferUsageVector MemzeroCmd::buffers() { return {{dst_, MemoryAccess::kWrite}}; } Memset32Cmd::Memset32Cmd(ExecutionStreamId execution_stream_id, BufferAllocation::Slice dst, uint32_t bit_pattern) : CommandBufferCmd(CommandBufferCmdType::kMemset32Cmd, execution_stream_id), dst_(dst), bit_pattern_(bit_pattern) {} absl::Status Memset32Cmd::Record(const Thunk::ExecuteParams& execute_params, const RecordParams& record_params, se::CommandBuffer* command_buffer) { se::DeviceMemoryBase dst = execute_params.buffer_allocations->GetDeviceAddress(dst_); ExecutionScopeId execution_scope_id = GetExecutionScope(record_params); VLOG(5) << "Memset32Cmd: bit_pattern=" << bit_pattern_ << "; execution_scope_id=" << execution_scope_id.value(); VLOG(5) << " Dst: " << dst_ << " (" << dst.opaque() << ")"; if (dst_.size() == 0) { VLOG(5) << "Skip recording Memset32Cmd command of 0 bytes"; return absl::OkStatus(); } return command_buffer->Memset( execution_scope_id, &dst, bit_pattern_, dst_.size() / sizeof(uint32_t)); } CommandBufferCmd::BufferUsageVector Memset32Cmd::buffers() { return {{dst_, MemoryAccess::kWrite}}; } IfCmd::IfCmd(ExecutionStreamId execution_stream_id, BufferAllocation::Slice pred, CommandBufferCmdSequence then_commands) : CommandBufferCmd(CommandBufferCmdType::kIfCmd, execution_stream_id), pred_(pred), then_commands_(std::move(then_commands)) {} absl::Status IfCmd::Initialize(const Thunk::InitializeParams& params, StateManager& state) { return then_commands_.Initialize(params, state); } absl::Status IfCmd::Record(const Thunk::ExecuteParams& execute_params, const RecordParams& record_params, se::CommandBuffer* command_buffer) { se::DeviceMemoryBase pred = execute_params.buffer_allocations->GetDeviceAddress(pred_); ExecutionScopeId execution_scope_id = GetExecutionScope(record_params); VLOG(5) << "IfCmd: execution_scope_id=" << execution_scope_id.value(); VLOG(5) << " pred: " << pred_ << " (" << pred.opaque() << ")"; return command_buffer->If( execution_scope_id, se::DeviceMemory<bool>(pred), CreateBuilder(&then_commands_, &execute_params, &record_params)); } bool IfCmd::force_update() { return then_commands_.force_update(); } CommandBufferCmd::BufferUsageVector IfCmd::buffers() { absl::flat_hash_set<CommandBufferCmd::BufferUsage> buffers; buffers.emplace(pred_, MemoryAccess::kRead); buffers.insert(then_commands_.buffers().begin(), then_commands_.buffers().end()); return {buffers.begin(), buffers.end()}; } IfElseCmd::IfElseCmd(ExecutionStreamId execution_stream_id, BufferAllocation::Slice pred, CommandBufferCmdSequence then_commands, CommandBufferCmdSequence else_commands) : CommandBufferCmd(CommandBufferCmdType::kIfElseCmd, execution_stream_id), pred_(pred), then_commands_(std::move(then_commands)), else_commands_(std::move(else_commands)) {} absl::Status IfElseCmd::Initialize(const Thunk::InitializeParams& params, StateManager& state) { TF_RETURN_IF_ERROR(then_commands_.Initialize(params, state)); TF_RETURN_IF_ERROR(else_commands_.Initialize(params, state)); return absl::OkStatus(); } absl::Status IfElseCmd::Record(const Thunk::ExecuteParams& execute_params, const RecordParams& record_params, se::CommandBuffer* command_buffer) { se::DeviceMemoryBase pred = execute_params.buffer_allocations->GetDeviceAddress(pred_); ExecutionScopeId execution_scope_id = GetExecutionScope(record_params); VLOG(5) << "IfElseCmd: execution_scope_id=" << execution_scope_id.value(); VLOG(5) << " pred: " << pred_ << " (" << pred.opaque() << ")"; return command_buffer->IfElse( execution_scope_id, se::DeviceMemory<bool>(pred), CreateBuilder(&then_commands_, &execute_params, &record_params), CreateBuilder(&else_commands_, &execute_params, &record_params)); } bool IfElseCmd::force_update() { return (then_commands_.force_update() || else_commands_.force_update()); } CommandBufferCmd::BufferUsageVector IfElseCmd::buffers() { absl::flat_hash_set<CommandBufferCmd::BufferUsage> buffers; buffers.emplace(pred_, MemoryAccess::kRead); buffers.insert(then_commands_.buffers().begin(), then_commands_.buffers().end()); buffers.insert(else_commands_.buffers().begin(), else_commands_.buffers().end()); return {buffers.begin(), buffers.end()}; } CaseCmd::CaseCmd(ExecutionStreamId execution_stream_id, BufferAllocation::Slice index, std::vector<CommandBufferCmdSequence> branches_commands) : CommandBufferCmd(CommandBufferCmdType::kCaseCmd, execution_stream_id), index_(index), branches_commands_(std::move(branches_commands)) {} absl::Status CaseCmd::Initialize(const Thunk::InitializeParams& params, StateManager& state) { for (auto& branch : branches_commands_) { TF_RETURN_IF_ERROR(branch.Initialize(params, state)); } return absl::OkStatus(); } absl::Status CaseCmd::Record(const Thunk::ExecuteParams& execute_params, const RecordParams& record_params, se::CommandBuffer* command_buffer) { se::DeviceMemoryBase index = execute_params.buffer_allocations->GetDeviceAddress(index_); ExecutionScopeId execution_scope_id = GetExecutionScope(record_params); VLOG(5) << "CaseCmd: execution_scope_id=" << execution_scope_id.value(); VLOG(5) << " index: " << index_ << " (" << index.opaque() << ")"; return command_buffer->Case(execution_scope_id, se::DeviceMemory<int32_t>(index), CreateBuilders(absl::MakeSpan(branches_commands_), &execute_params, &record_params)); } bool CaseCmd::force_update() { return absl::c_any_of(branches_commands_, [](const auto& seq) { return seq.force_update(); }); } CommandBufferCmd::BufferUsageVector CaseCmd::buffers() { absl::flat_hash_set<CommandBufferCmd::BufferUsage> buffers; buffers.emplace(index_, MemoryAccess::kRead); for (auto& branch : branches_commands_) { buffers.insert(branch.buffers().begin(), branch.buffers().end()); } return {buffers.begin(), buffers.end()}; } ForCmd::ForCmd(ExecutionStreamId execution_stream_id, int32_t num_iterations, BufferAllocation::Slice loop_counter, CommandBufferCmdSequence body_commands) : CommandBufferCmd(CommandBufferCmdType::kForCmd, execution_stream_id), num_iterations_(num_iterations), loop_counter_(loop_counter), body_commands_(std::move(body_commands)) {} absl::Status ForCmd::Initialize(const Thunk::InitializeParams& params, StateManager& state) { return body_commands_.Initialize(params, state); } absl::Status ForCmd::Record(const Thunk::ExecuteParams& execute_params, const RecordParams& record_params, se::CommandBuffer* command_buffer) { se::DeviceMemoryBase loop_counter = execute_params.buffer_allocations->GetDeviceAddress(loop_counter_); ExecutionScopeId execution_scope_id = GetExecutionScope(record_params); VLOG(5) << "ForCmd: num_iterations=" << num_iterations_ << "; body_commands=" << body_commands_.size() << "; execution_scope_id=" << execution_scope_id.value(); VLOG(5) << " loop_counter: " << loop_counter_ << " (" << loop_counter.opaque() << ")"; return command_buffer->For( execution_scope_id, num_iterations_, se::DeviceMemory<int32_t>(loop_counter), CreateBuilder(&body_commands_, &execute_params, &record_params)); } bool ForCmd::force_update() { return body_commands_.force_update(); } CommandBufferCmd::BufferUsageVector ForCmd::buffers() { absl::flat_hash_set<CommandBufferCmd::BufferUsage> buffers; buffers.emplace(loop_counter_, MemoryAccess::kWrite); buffers.insert(body_commands_.buffers().begin(), body_commands_.buffers().end()); return {buffers.begin(), buffers.end()}; } WhileCmd::WhileCmd(ExecutionStreamId execution_stream_id, BufferAllocation::Slice pred, CommandBufferCmdSequence cond_commands, CommandBufferCmdSequence body_commands) : CommandBufferCmd(CommandBufferCmdType::kWhileCmd, execution_stream_id), pred_(pred), cond_commands_(std::move(cond_commands)), body_commands_(std::move(body_commands)) {} absl::Status WhileCmd::Initialize(const Thunk::InitializeParams& params, StateManager& state) { TF_RETURN_IF_ERROR(cond_commands_.Initialize(params, state)); return body_commands_.Initialize(params, state); } absl::Status WhileCmd::Record(const Thunk::ExecuteParams& execute_params, const RecordParams& record_params, se::CommandBuffer* command_buffer) { se::DeviceMemoryBase pred = execute_params.buffer_allocations->GetDeviceAddress(pred_); ExecutionScopeId execution_scope_id = GetExecutionScope(record_params); VLOG(5) << "WhileCmd: cond_commands=" << cond_commands_.size() << " body_commands=" << body_commands_.size() << "; execution_scope_id=" << execution_scope_id.value(); VLOG(5) << " pred: " << pred_ << " (" << pred.opaque() << ")"; return command_buffer->While( execution_scope_id, se::DeviceMemory<bool>(pred), CreateExecutionScopeBuilder(&cond_commands_, &execute_params, &record_params), CreateBuilder(&body_commands_, &execute_params, &record_params)); } bool WhileCmd::force_update() { return (cond_commands_.force_update() || body_commands_.force_update()); } CommandBufferCmd::BufferUsageVector WhileCmd::buffers() { absl::flat_hash_set<CommandBufferCmd::BufferUsage> buffers; buffers.emplace(pred_, MemoryAccess::kWrite); buffers.insert(cond_commands_.buffers().begin(), cond_commands_.buffers().end()); buffers.insert(body_commands_.buffers().begin(), body_commands_.buffers().end()); return {buffers.begin(), buffers.end()}; } GemmCmd::GemmCmd(ExecutionStreamId execution_stream_id, GemmConfig config, const BufferAllocation::Slice& lhs_buffer, const BufferAllocation::Slice& rhs_buffer, const BufferAllocation::Slice& output_buffer, const BufferAllocation::Slice& workspace, bool deterministic) : TracedCommandBufferCmd(CommandBufferCmdType::kGemmCmd, execution_stream_id), config_(std::move(config)), lhs_buffer_(lhs_buffer), rhs_buffer_(rhs_buffer), output_buffer_(output_buffer), workspace_(workspace), deterministic_(deterministic) {} absl::Status GemmCmd::Initialize(const Thunk::InitializeParams& params, StateManager& state) { if (!params.stream->parent()->AsBlas()) { return absl::InternalError("Failed to initialize BLAS support for GemmCmd"); } return absl::OkStatus(); } absl::Status GemmCmd::Record(const Thunk::ExecuteParams& execute_params, const RecordParams& record_params, se::CommandBuffer* command_buffer) { se::DeviceMemoryBase lhs = execute_params.buffer_allocations->GetDeviceAddress(lhs_buffer_); se::DeviceMemoryBase rhs = execute_params.buffer_allocations->GetDeviceAddress(rhs_buffer_); se::DeviceMemoryBase out = execute_params.buffer_allocations->GetDeviceAddress(output_buffer_); se::DeviceMemoryBase workspace = execute_params.buffer_allocations->GetDeviceAddress(workspace_); ExecutionScopeId execution_scope_id = GetExecutionScope(record_params); VLOG(5) << "GemmCmd: deterministic=" << deterministic_ << "; execution_scope_id=" << execution_scope_id.value(); VLOG(5) << " Lhs: " << lhs_buffer_ << " (" << lhs.opaque() << ")"; VLOG(5) << " Lhs: " << rhs_buffer_ << " (" << rhs.opaque() << ")"; VLOG(5) << " Out: " << output_buffer_ << " (" << out.opaque() << ")"; VLOG(5) << " Workspace: " << workspace_ << " (" << workspace.opaque() << ")"; return AddTracedCommandBuffer( execute_params, record_params, command_buffer, [&](se::Stream* stream) { return RunGemm(config_, lhs, rhs, out, workspace, deterministic_, stream); }); } CommandBufferCmd::BufferUsageVector GemmCmd::buffers() { return {{lhs_buffer_, MemoryAccess::kRead}, {rhs_buffer_, MemoryAccess::kRead}, {output_buffer_, MemoryAccess::kWrite}, {workspace_, MemoryAccess::kWrite}}; } CublasLtCmd::CublasLtCmd( ExecutionStreamId execution_stream_id, GemmConfig gemm_config, se::gpu::BlasLt::Epilogue epilogue, int64_t algorithm_idx, BufferAllocation::Slice a_buffer, BufferAllocation::Slice b_buffer, BufferAllocation::Slice c_buffer, BufferAllocation::Slice d_buffer, BufferAllocation::Slice bias_buffer , BufferAllocation::Slice aux_buffer , BufferAllocation::Slice a_scale_buffer , BufferAllocation::Slice b_scale_buffer , BufferAllocation::Slice c_scale_buffer , BufferAllocation::Slice d_scale_buffer , BufferAllocation::Slice d_amax_buffer , BufferAllocation::Slice workspace_buffer) : TracedCommandBufferCmd(CommandBufferCmdType::kCublasLtCmd, execution_stream_id), gemm_config_(std::move(gemm_config)), epilogue_(epilogue), algorithm_idx_(algorithm_idx), a_buffer_(a_buffer), b_buffer_(b_buffer), c_buffer_(c_buffer), d_buffer_(d_buffer), bias_buffer_(bias_buffer), aux_buffer_(aux_buffer), a_scale_buffer_(a_scale_buffer), b_scale_buffer_(b_scale_buffer), c_scale_buffer_(c_scale_buffer), d_scale_buffer_(d_scale_buffer), d_amax_buffer_(d_amax_buffer), workspace_buffer_(workspace_buffer) {} absl::StatusOr<se::gpu::BlasLt::MatmulPlan*> CublasLtCmd::GetMatmulPlan( const stream_executor::Stream* stream) { auto it = matmul_plans_cache_.find(stream); if (it != matmul_plans_cache_.end()) return it->second.get(); TF_ASSIGN_OR_RETURN(auto plan, se::gpu::BlasLt::GetMatmulPlan( stream, gemm_config_, epilogue_)); auto [it_insert, _] = matmul_plans_cache_.emplace(stream, std::move(plan)); return it_insert->second.get(); } absl::StatusOr<se::gpu::BlasLt::MatmulAlgorithm> CublasLtCmd::GetMatmulAlgorithm(const se::gpu::BlasLt::MatmulPlan* plan, int64_t max_workspace) { auto it = matmul_algorithm_cache_.find(plan); if (it != matmul_algorithm_cache_.end()) return it->second; TF_ASSIGN_OR_RETURN( auto algorithms, plan->GetAlgorithms( 128, max_workspace)); TF_RET_CHECK(algorithm_idx_ >= 0 && algorithm_idx_ < algorithms.size()); auto [it_insert, _] = matmul_algorithm_cache_.emplace(plan, algorithms[algorithm_idx_]); return it_insert->second; } absl::Status CublasLtCmd::Initialize(const Thunk::InitializeParams& params, StateManager& state) { if (!params.stream->parent()->AsBlas()) { return absl::InternalError("Failed to initialize BLAS support for GemmCmd"); } TF_ASSIGN_OR_RETURN(auto plan, GetMatmulPlan(params.stream)); TF_RETURN_IF_ERROR( GetMatmulAlgorithm(plan, workspace_buffer_.size()).status()); return absl::OkStatus(); } absl::Status CublasLtCmd::Record(const Thunk::ExecuteParams& execute_params, const RecordParams& record_params, se::CommandBuffer* command_buffer) { TF_ASSIGN_OR_RETURN(auto plan, GetMatmulPlan(execute_params.stream)); TF_ASSIGN_OR_RETURN(auto algorithm, GetMatmulAlgorithm(plan, workspace_buffer_.size())); const BufferAllocations& allocs = *execute_params.buffer_allocations; se::DeviceMemoryBase bias, a_scale, b_scale, c_scale, d_scale, aux, d_amax; if (bias_buffer_.allocation() != nullptr) { bias = allocs.GetDeviceAddress(bias_buffer_); } if (a_scale_buffer_.allocation() != nullptr) { a_scale = allocs.GetDeviceAddress(a_scale_buffer_); } if (b_scale_buffer_.allocation() != nullptr) { b_scale = allocs.GetDeviceAddress(b_scale_buffer_); } if (c_scale_buffer_.allocation() != nullptr) { c_scale = allocs.GetDeviceAddress(c_scale_buffer_); } if (d_scale_buffer_.allocation() != nullptr) { d_scale = allocs.GetDeviceAddress(d_scale_buffer_); } if (d_amax_buffer_.allocation() != nullptr) { d_amax = allocs.GetDeviceAddress(d_amax_buffer_); } if (aux_buffer_.allocation() != nullptr) { aux = allocs.GetDeviceAddress(aux_buffer_); } ExecutionScopeId execution_scope_id = GetExecutionScope(record_params); VLOG(5) << "CublasLtCmd with execution_scope_id: " << execution_scope_id.value(); VLOG(5) << " a_buffer: " << a_buffer_.ToString(); VLOG(5) << " b_buffer: " << b_buffer_.ToString(); VLOG(5) << " c_buffer: " << c_buffer_.ToString(); VLOG(5) << " d_buffer: " << d_buffer_.ToString(); VLOG(5) << " bias_buffer: " << bias_buffer_.ToString(); VLOG(5) << " aux_buffer: " << aux_buffer_.ToString(); VLOG(5) << " a_scale_buffer: " << a_scale_buffer_.ToString(); VLOG(5) << " b_scale_buffer: " << b_scale_buffer_.ToString(); VLOG(5) << " c_scale_buffer: " << c_scale_buffer_.ToString(); VLOG(5) << " d_scale_buffer: " << d_scale_buffer_.ToString(); VLOG(5) << " d_amax_buffer: " << d_amax_buffer_.ToString(); VLOG(5) << " workspace_buffer: " << workspace_buffer_.ToString(); return AddTracedCommandBuffer( execute_params, record_params, command_buffer, [&](se::Stream* stream) { return plan->ExecuteOnStream( stream, allocs.GetDeviceAddress(a_buffer_), allocs.GetDeviceAddress(b_buffer_), allocs.GetDeviceAddress(c_buffer_), allocs.GetDeviceAddress(d_buffer_), bias, aux, a_scale, b_scale, c_scale, d_scale, d_amax, algorithm, allocs.GetDeviceAddress(workspace_buffer_)); }); } CommandBufferCmd::BufferUsageVector CublasLtCmd::buffers() { BufferUsageVector buffer_usage; buffer_usage.reserve(13); buffer_usage.push_back({a_buffer_, MemoryAccess::kRead}); buffer_usage.push_back({b_buffer_, MemoryAccess::kRead}); buffer_usage.push_back({c_buffer_, MemoryAccess::kRead}); buffer_usage.push_back({d_buffer_, MemoryAccess::kWrite}); buffer_usage.push_back({workspace_buffer_, MemoryAccess::kWrite}); if (bias_buffer_.allocation() != nullptr) { buffer_usage.push_back({bias_buffer_, MemoryAccess::kRead}); } if (a_scale_buffer_.allocation() != nullptr) { buffer_usage.push_back({a_scale_buffer_, MemoryAccess::kRead}); } if (b_scale_buffer_.allocation() != nullptr) { buffer_usage.push_back({b_scale_buffer_, MemoryAccess::kRead}); } if (c_scale_buffer_.allocation() != nullptr) { buffer_usage.push_back({c_scale_buffer_, MemoryAccess::kRead}); } if (d_scale_buffer_.allocation() != nullptr) { buffer_usage.push_back({d_scale_buffer_, MemoryAccess::kRead}); } if (aux_buffer_.allocation() != nullptr) { buffer_usage.push_back({aux_buffer_, MemoryAccess::kWrite}); } if (d_amax_buffer_.allocation() != nullptr) { buffer_usage.push_back({d_amax_buffer_, MemoryAccess::kRead}); } return buffer_usage; } CuDnnCmd::CuDnnCmd(ExecutionStreamId execution_stream_id, absl::Span<const BufferAllocation::Slice> args, const std::shared_ptr<se::dnn::LazyDnnGraph> graph) : TracedCommandBufferCmd(CommandBufferCmdType::kCuDnnCmd, execution_stream_id), args_(args.cbegin(), args.cend()), graph_(graph) {} absl::Status CuDnnCmd::Initialize(const Thunk::InitializeParams& params, StateManager&) { if (!params.stream->parent()->AsDnn()) { return absl::InternalError("Failed to initialize DNN support for CuDnnCmd"); } return absl::OkStatus(); } absl::Status CuDnnCmd::Record(const Thunk::ExecuteParams& execute_params, const RecordParams& record_params, se::CommandBuffer* command_buffer) { CHECK(graph_ != nullptr); std::vector<se::DeviceMemoryBase> operands; operands.reserve(args_.size()); for (const BufferAllocation::Slice& arg : args_) { se::DeviceMemoryBase buf = execute_params.buffer_allocations->GetDeviceAddress(arg); VLOG(5) << " Arg: " << arg << ": " << buf.opaque(); operands.push_back(buf); } return AddTracedCommandBuffer( execute_params, record_params, command_buffer, [&](se::Stream* stream) { return graph_->get()->Execute( *stream, absl::Span<se::DeviceMemoryBase>(operands), execute_params.collective_params->local_device_ordinal); }); } CommandBufferCmd::BufferUsageVector CuDnnCmd::buffers() { CommandBufferCmd::BufferUsageVector buffer_usage; buffer_usage.reserve(args_.size()); for (int i = 0; i < args_.size() - 1; ++i) { buffer_usage.push_back({args_[i], MemoryAccess::kRead}); } buffer_usage.push_back({args_.back(), MemoryAccess::kWrite}); return buffer_usage; } absl::Status CustomCallCmd::Record(const Thunk::ExecuteParams& execute_params, const RecordParams& record_params, se::CommandBuffer* command_buffer) { if (handler_ == nullptr) { return RecordLegacyCustomCall(execute_params, record_params, command_buffer); } return RecordXlaFfiCall(execute_params, record_params, command_buffer); } absl::Status CustomCallCmd::RecordLegacyCustomCall( const Thunk::ExecuteParams& execute_params, const RecordParams& record_params, se::CommandBuffer* command_buffer) { std::vector<void*> buffers; buffers.reserve(operands_.size() + results_.size()); for (auto& slices : {operands_, results_}) { for (const std::optional<Slice>& slice : slices) { if (!slice.has_value()) { buffers.push_back(nullptr); continue; } if (!slice->slice.allocation()) { return absl::InternalError( "custom call input missing buffer allocation"); } buffers.push_back( execute_params.buffer_allocations->GetDeviceAddress(slice->slice) .opaque()); } } ExecutionScopeId execution_scope_id = GetExecutionScope(record_params); VLOG(5) << "CustomCallCmd: execution_scope_id=" << execution_scope_id.value(); for (int i = 0; i < operands_.size(); ++i) { if (operands_[i].has_value()) { VLOG(5) << " Operand " << i << ": " << operands_[i]->slice << " (" << buffers[i] << ")"; } else { VLOG(5) << " Operand " << i << ": null"; } } for (int i = 0; i < results_.size(); ++i) { if (results_[i].has_value()) { VLOG(5) << " Result " << i << ": " << results_[i]->slice << " (" << buffers[operands_.size() + i] << ")"; } else { VLOG(5) << " Result " << i << ": null"; } } #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM TF_ASSIGN_OR_RETURN( auto nested_cmd, se::TraceCommandBufferFactory::Create( execute_params.stream->parent(), execute_params.command_buffer_trace_stream, [&](se::Stream* stream) { se::gpu::GpuStreamHandle gpu_stream = se::gpu::AsGpuStreamValue(stream); XlaCustomCallStatus custom_call_status; call_target_(gpu_stream, buffers.data(), opaque_.data(), opaque_.size(), &custom_call_status); auto message = CustomCallStatusGetMessage(&custom_call_status); if (message) { return absl::InternalError( absl::StrCat("CustomCall failed: ", *message)); } return absl::OkStatus(); })); return command_buffer->AddNestedCommandBuffer(execution_scope_id, *nested_cmd); #else return Unavailable( "Custom calls on GPU are not supported in this configuration. Please " "build with --config=cuda or --config=rocm"); #endif } absl::Status CustomCallCmd::RecordXlaFfiCall( const Thunk::ExecuteParams& execute_params, const RecordParams& record_params, se::CommandBuffer* command_buffer) { ffi::CallFrameBuilder builder(operands_.size(), results_.size()); ExecutionScopeId execution_scope_id = GetExecutionScope(record_params); VLOG(5) << "CustomCallCmd: execution_scope_id=" << execution_scope_id.value(); for (int i = 0; i < operands_.size(); ++i) { const std::optional<Slice>& slice = operands_[i]; if (!slice.has_value()) { return Internal("FFI handlers do not support tokens (yet)!"); } if (!slice->slice.allocation()) return Internal("custom call input missing buffer allocation"); se::DeviceMemoryBase buffer = execute_params.buffer_allocations->GetDeviceAddress(slice->slice); VLOG(5) << " Operand " << i << ": " << slice->slice << " (" << buffer.opaque() << ")"; builder.AddBufferArg(buffer, slice->shape.element_type(), slice->shape.dimensions()); } for (int i = 0; i < results_.size(); ++i) { const std::optional<Slice>& slice = results_[i]; if (!slice.has_value()) { return Internal("FFI handlers do not support tokens (yet)!"); } if (!slice->slice.allocation()) return Internal("custom call input missing buffer allocation"); se::DeviceMemoryBase buffer = execute_params.buffer_allocations->GetDeviceAddress(slice->slice); VLOG(5) << " Result " << i << ": " << slice->slice << " (" << buffer.opaque() << ")"; builder.AddBufferArg(buffer, slice->shape.element_type(), slice->shape.dimensions()); } ffi::CallFrameBuilder::AttributesBuilder attrs; attrs.Append(attributes_); builder.AddAttributes(attrs.Build()); ffi::CallFrame call_frame = builder.Build(); #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM TF_ASSIGN_OR_RETURN( auto nested_cmd, se::TraceCommandBufferFactory::Create( execute_params.stream->parent(), execute_params.command_buffer_trace_stream, [&](se::Stream* stream) { ffi::CallOptions options = { execute_params.buffer_allocations->device_ordinal(), ffi::CallOptions::GpuOptions{ execute_params.stream, execute_params.buffer_allocations->memory_allocator()}, nullptr, execute_params.ffi_execution_context}; return ffi::Call(handler_, call_frame, options); })); return command_buffer->AddNestedCommandBuffer(execution_scope_id, *nested_cmd); #else return Unavailable( "Custom calls on GPU are not supported in this configuration. Please " "build with --config=cuda or --config=rocm"); #endif } CommandBufferCmd::BufferUsageVector CustomCallCmd::buffers() { CommandBufferCmd::BufferUsageVector buffer_usage; for (auto& slices : {operands_, results_}) { for (const std::optional<Slice>& slice : slices) { if (!slice.has_value()) continue; buffer_usage.push_back({slice->slice, MemoryAccess::kWrite}); } } return buffer_usage; } BarrierCmd::BarrierCmd(ExecutionStreamId execution_stream_id, ExecutionStreamId from_stream_id) : CommandBufferCmd(CommandBufferCmdType::kBarrierCmd, execution_stream_id), from_stream_id_(from_stream_id) {} absl::Status BarrierCmd::Record(const Thunk::ExecuteParams& execute_params, const RecordParams& record_params, se::CommandBuffer* command_buffer) { VLOG(5) << "BarrierCmd from stream " << from_stream_id_.value() << " to stream " << execution_stream_id().value(); if (from_stream_id_ != execution_stream_id()) { TF_RETURN_IF_ERROR(command_buffer->Barrier( CommandBufferCmd::GetExecutionScope(record_params, from_stream_id_), CommandBufferCmd::GetExecutionScope(record_params, execution_stream_id()))); } return absl::OkStatus(); } BarrierCmd::BufferUsageVector BarrierCmd::buffers() { return {}; } CollectiveCmd::CollectiveCmd(CommandBufferCmdType cmd_type, ExecutionStreamId execution_stream_id, ExecutionStreamId async_from_stream_id, NcclApi* nccl_api, NcclCollectiveConfig config) : CommandBufferCmd(cmd_type, execution_stream_id), async_from_stream_id_(async_from_stream_id), nccl_api_(nccl_api), config_(std::move(config)) {} absl::Status CollectiveCmd::BarrierIfAsync( se::CommandBuffer* command_buffer, se::StreamExecutor* executor, const CommandBufferCmd::RecordParams& record_params) { if (IsAsync()) { TF_RETURN_IF_ERROR( command_buffer->Barrier(CommandBufferCmd::GetExecutionScope( record_params, async_from_stream_id_), CommandBufferCmd::GetExecutionScope( record_params, execution_stream_id()))); VLOG(5) << "Insert Async barrier from stream " << async_from_stream_id_.value() << " to stream " << execution_stream_id().value(); } return absl::OkStatus(); } absl::Status CollectiveCmd::Prepare( const Thunk::PrepareParams& params, Thunk::ResourceRequests& resource_requests) { TF_ASSIGN_OR_RETURN( NcclCliqueKey clique_key, GetNcclCliqueKey(*params.collective_params, config().replica_groups, config().group_mode, nccl_stream_id(), GetAsyncStreamKind())); TF_ASSIGN_OR_RETURN( size_t num_local_participants, GetNumLocalParticipants(*params.collective_params, config().replica_groups, config().group_mode)); return resource_requests.AddClique(clique_key, num_local_participants); } absl::Status CollectiveCmd::AddTracedCommandBuffer( const Thunk::ExecuteParams& execute_params, const RecordParams& record_params, se::CommandBuffer* command_buffer, absl::FunctionRef<absl::Status(se::Stream*)> trace) { TF_ASSIGN_OR_RETURN(std::unique_ptr<se::CommandBuffer> nested_cmd, se::TraceCommandBufferFactory::Create( execute_params.stream->parent(), execute_params.command_buffer_trace_stream, trace)); ExecutionScopeId execution_scope_id = GetExecutionScope(record_params); return command_buffer->AddNestedCommandBuffer(execution_scope_id, *nested_cmd); } AllReduceCmd::AllReduceCmd( ExecutionStreamId execution_stream_id, ExecutionStreamId async_from_stream_id, NcclApi* nccl_api, NcclCollectiveConfig config, ReductionKind reduction_kind, absl::Span<const NcclCollectiveThunk::Buffer> buffers) : CollectiveCmd(CommandBufferCmdType::kAllReduceCmd, execution_stream_id, async_from_stream_id, nccl_api, std::move(config)), reduction_kind_(reduction_kind), buffers_(buffers.begin(), buffers.end()) {} absl::Status AllReduceCmd::Record(const Thunk::ExecuteParams& execute_params, const RecordParams& record_params, se::CommandBuffer* command_buffer) { TF_RETURN_IF_ERROR(BarrierIfAsync( command_buffer, execute_params.stream->parent(), record_params)); TF_ASSIGN_OR_RETURN( std::vector<DeviceBufferPair> device_buffers, ConvertToDeviceBuffers(execute_params.buffer_allocations, buffers_, config().operand_element_type)); ExecutionScopeId execution_scope_id = GetExecutionScope(record_params); VLOG(5) << "AllReduceCmd: reduction=" << ReductionKindString(reduction_kind_) << "; execution_scope_id=" << execution_scope_id.value(); for (size_t i = 0; i < device_buffers.size(); ++i) { VLOG(5) << " Src: " << buffers_[i].source_buffer << " (" << device_buffers[i].source_buffer.opaque() << ")"; VLOG(5) << " Dst: " << buffers_[i].destination_buffer << " (" << device_buffers[i].destination_buffer.opaque() << ")"; } if (!execute_params.collective_params || !execute_params.collective_cliques) { return absl::InvalidArgumentError( "AllReduceCmd requires collective parameters and cliques"); } TF_ASSIGN_OR_RETURN( NcclCommHandleWrapper comm_handle, GetNcclComm(*execute_params.collective_params, *execute_params.collective_cliques, config().replica_groups, config().group_mode, nccl_stream_id(), GetAsyncStreamKind())); NcclApi::NcclCommHandle comm = comm_handle.comm_handle; NcclApi::ScopedPersistentPlanAllocator scoped_allocator( comm, tsl::MakeRef<NcclApi::PersistentPlanAllocator>( execute_params.buffer_allocations->device_ordinal(), execute_params.buffer_allocations->memory_allocator(), execute_params.stream)); return AddTracedCommandBuffer( execute_params, record_params, command_buffer, [&](se::Stream* stream) { return RunAllReduce(nccl_api(), reduction_kind_, device_buffers, *stream, comm); }); } CommandBufferCmd::BufferUsageVector AllReduceCmd::buffers() { BufferUsageVector buffer_usage; for (auto& buffer : buffers_) { buffer_usage.emplace_back(buffer.source_buffer, MemoryAccess::kRead); buffer_usage.emplace_back(buffer.destination_buffer, MemoryAccess::kWrite); } return buffer_usage; } ReduceScatterCmd::ReduceScatterCmd( ExecutionStreamId execution_stream_id, ExecutionStreamId async_from_stream_id, NcclApi* nccl_api, NcclCollectiveConfig config, ReductionKind reduction_kind, absl::Span<const NcclCollectiveThunk::Buffer> buffers) : CollectiveCmd(CommandBufferCmdType::kReduceScatter, execution_stream_id, async_from_stream_id, nccl_api, std::move(config)), reduction_kind_(reduction_kind), buffers_(buffers.begin(), buffers.end()) {} absl::Status ReduceScatterCmd::Record( const Thunk::ExecuteParams& execute_params, const RecordParams& record_params, se::CommandBuffer* command_buffer) { TF_RETURN_IF_ERROR(BarrierIfAsync( command_buffer, execute_params.stream->parent(), record_params)); TF_ASSIGN_OR_RETURN( std::vector<DeviceBufferPair> device_buffers, ConvertToDeviceBuffers(execute_params.buffer_allocations, buffers_, config().operand_element_type)); ExecutionScopeId execution_scope_id = GetExecutionScope(record_params); VLOG(5) << "ReduceScatterCmd: reduction=" << ReductionKindString(reduction_kind_) << "; execution_scope_id=" << execution_scope_id.value(); for (size_t i = 0; i < device_buffers.size(); ++i) { VLOG(5) << " Src: " << buffers_[i].source_buffer << " (" << device_buffers[i].source_buffer.opaque() << ")"; VLOG(5) << " Dst: " << buffers_[i].destination_buffer << " (" << device_buffers[i].destination_buffer.opaque() << ")"; } if (!execute_params.collective_params || !execute_params.collective_cliques) { return absl::InvalidArgumentError( "ReduceScatterCmd requires collective parameters and cliques"); } TF_ASSIGN_OR_RETURN( NcclCommHandleWrapper comm_handle, GetNcclComm(*execute_params.collective_params, *execute_params.collective_cliques, config().replica_groups, config().group_mode, nccl_stream_id(), GetAsyncStreamKind())); NcclApi::NcclCommHandle comm = comm_handle.comm_handle; NcclApi::ScopedPersistentPlanAllocator scoped_allocator( comm, tsl::MakeRef<NcclApi::PersistentPlanAllocator>( execute_params.buffer_allocations->device_ordinal(), execute_params.buffer_allocations->memory_allocator(), execute_params.stream)); return AddTracedCommandBuffer( execute_params, record_params, command_buffer, [&](se::Stream* stream) { return RunReduceScatter(nccl_api(), reduction_kind_, device_buffers, *stream, comm); }); } CommandBufferCmd::BufferUsageVector ReduceScatterCmd::buffers() { BufferUsageVector buffer_usage; for (auto& buffer : buffers_) { buffer_usage.emplace_back(buffer.source_buffer, MemoryAccess::kRead); buffer_usage.emplace_back(buffer.destination_buffer, MemoryAccess::kWrite); } return buffer_usage; } AllToAllCmd::AllToAllCmd(ExecutionStreamId execution_stream_id, ExecutionStreamId async_from_stream_id, NcclApi* nccl_api, NcclCollectiveConfig config, bool has_split_dimension, absl::Span<const NcclCollectiveThunk::Buffer> buffers) : CollectiveCmd(CommandBufferCmdType::kAllToAll, execution_stream_id, async_from_stream_id, nccl_api, std::move(config)), has_split_dimension_(has_split_dimension), buffers_(buffers.begin(), buffers.end()) {} absl::Status AllToAllCmd::Record(const Thunk::ExecuteParams& execute_params, const RecordParams& record_params, se::CommandBuffer* command_buffer) { TF_RETURN_IF_ERROR(BarrierIfAsync( command_buffer, execute_params.stream->parent(), record_params)); TF_ASSIGN_OR_RETURN( std::vector<DeviceBufferPair> device_buffers, ConvertToDeviceBuffers(execute_params.buffer_allocations, buffers_, config().operand_element_type)); ExecutionScopeId execution_scope_id = GetExecutionScope(record_params); VLOG(5) << "AllToAllCmd, has_split_dimension=" << has_split_dimension_ << ", execution_scope_id=" << execution_scope_id.value(); for (size_t i = 0; i < device_buffers.size(); ++i) { VLOG(5) << " Src: " << buffers_[i].source_buffer << " (" << device_buffers[i].source_buffer.opaque() << ")"; VLOG(5) << " Dst: " << buffers_[i].destination_buffer << " (" << device_buffers[i].destination_buffer.opaque() << ")"; } if (!execute_params.collective_params || !execute_params.collective_cliques) { return absl::InvalidArgumentError( "ReduceScatterCmd requires collective parameters and cliques"); } TF_ASSIGN_OR_RETURN( NcclCommHandleWrapper comm_handle, GetNcclComm(*execute_params.collective_params, *execute_params.collective_cliques, config().replica_groups, config().group_mode, nccl_stream_id(), GetAsyncStreamKind())); NcclApi::NcclCommHandle comm = comm_handle.comm_handle; NcclApi::ScopedPersistentPlanAllocator scoped_allocator( comm, tsl::MakeRef<NcclApi::PersistentPlanAllocator>( execute_params.buffer_allocations->device_ordinal(), execute_params.buffer_allocations->memory_allocator(), execute_params.stream)); return AddTracedCommandBuffer( execute_params, record_params, command_buffer, [&](se::Stream* stream) { return RunAllToAll(nccl_api(), has_split_dimension_, device_buffers, *stream, comm); }); } CommandBufferCmd::BufferUsageVector AllToAllCmd::buffers() { BufferUsageVector buffer_usage; for (auto& buffer : buffers_) { buffer_usage.emplace_back(buffer.source_buffer, MemoryAccess::kRead); buffer_usage.emplace_back(buffer.destination_buffer, MemoryAccess::kWrite); } return buffer_usage; } AllGatherCmd::AllGatherCmd( ExecutionStreamId execution_stream_id, ExecutionStreamId async_from_stream_id, NcclApi* nccl_api, NcclCollectiveConfig config, absl::Span<const NcclCollectiveThunk::Buffer> buffers) : CollectiveCmd(CommandBufferCmdType::kAllGatherCmd, execution_stream_id, async_from_stream_id, nccl_api, std::move(config)), buffers_(buffers.begin(), buffers.end()) {} absl::Status AllGatherCmd::Record(const Thunk::ExecuteParams& execute_params, const RecordParams& record_params, se::CommandBuffer* command_buffer) { TF_RETURN_IF_ERROR(BarrierIfAsync( command_buffer, execute_params.stream->parent(), record_params)); TF_ASSIGN_OR_RETURN( std::vector<DeviceBufferPair> device_buffers, ConvertToDeviceBuffers(execute_params.buffer_allocations, buffers_, config().operand_element_type)); ExecutionScopeId execution_scope_id = GetExecutionScope(record_params); VLOG(5) << "AllGatherCmd: execution_scope_id=" << execution_scope_id.value(); for (size_t i = 0; i < device_buffers.size(); ++i) { VLOG(5) << " Src: " << buffers_[i].source_buffer << " (" << device_buffers[i].source_buffer.opaque() << ")"; VLOG(5) << " Dst: " << buffers_[i].destination_buffer << " (" << device_buffers[i].destination_buffer.opaque() << ")"; } if (!execute_params.collective_params || !execute_params.collective_cliques) { return absl::InvalidArgumentError( "AllGatherCmd requires collective parameters and cliques"); } TF_ASSIGN_OR_RETURN( NcclCommHandleWrapper comm_handle, GetNcclComm(*execute_params.collective_params, *execute_params.collective_cliques, config().replica_groups, config().group_mode, nccl_stream_id(), GetAsyncStreamKind())); NcclApi::NcclCommHandle comm = comm_handle.comm_handle; NcclApi::ScopedPersistentPlanAllocator scoped_allocator( comm, tsl::MakeRef<NcclApi::PersistentPlanAllocator>( execute_params.buffer_allocations->device_ordinal(), execute_params.buffer_allocations->memory_allocator(), execute_params.stream)); return AddTracedCommandBuffer( execute_params, record_params, command_buffer, [&](se::Stream* stream) { return RunAllGather(nccl_api(), device_buffers, *stream, comm); }); } CommandBufferCmd::BufferUsageVector AllGatherCmd::buffers() { BufferUsageVector buffer_usage; for (auto& buffer : buffers_) { buffer_usage.emplace_back(buffer.source_buffer, MemoryAccess::kRead); buffer_usage.emplace_back(buffer.destination_buffer, MemoryAccess::kWrite); } return buffer_usage; } CollectiveBroadcastCmd::CollectiveBroadcastCmd( ExecutionStreamId execution_stream_id, ExecutionStreamId async_from_stream_id, NcclApi* nccl_api, NcclCollectiveConfig config, absl::Span<const NcclCollectiveThunk::Buffer> buffers) : CollectiveCmd(CommandBufferCmdType::kCollectiveBroadcastCmd, execution_stream_id, async_from_stream_id, nccl_api, std::move(config)), buffers_(buffers.begin(), buffers.end()) {} absl::Status CollectiveBroadcastCmd::Record( const Thunk::ExecuteParams& execute_params, const RecordParams& record_params, se::CommandBuffer* command_buffer) { TF_RETURN_IF_ERROR(BarrierIfAsync( command_buffer, execute_params.stream->parent(), record_params)); TF_ASSIGN_OR_RETURN( std::vector<DeviceBufferPair> device_buffers, ConvertToDeviceBuffers(execute_params.buffer_allocations, buffers_, config().operand_element_type)); ExecutionScopeId execution_scope_id = GetExecutionScope(record_params); VLOG(5) << "CollectiveBroadcastCmd: execution_scope_id=" << execution_scope_id.value(); for (size_t i = 0; i < device_buffers.size(); ++i) { VLOG(5) << " Src: " << buffers_[i].source_buffer << " (" << device_buffers[i].source_buffer.opaque() << ")"; VLOG(5) << " Dst: " << buffers_[i].destination_buffer << " (" << device_buffers[i].destination_buffer.opaque() << ")"; } if (!execute_params.collective_params || !execute_params.collective_cliques) { return absl::InvalidArgumentError( "CollectiveBroadcastCmd requires collective parameters and cliques"); } TF_ASSIGN_OR_RETURN( NcclCommHandleWrapper comm_handle, GetNcclComm(*execute_params.collective_params, *execute_params.collective_cliques, config().replica_groups, config().group_mode, nccl_stream_id(), GetAsyncStreamKind())); NcclApi::NcclCommHandle comm = comm_handle.comm_handle; NcclApi::ScopedPersistentPlanAllocator scoped_allocator( comm, tsl::MakeRef<NcclApi::PersistentPlanAllocator>( execute_params.buffer_allocations->device_ordinal(), execute_params.buffer_allocations->memory_allocator(), execute_params.stream)); return AddTracedCommandBuffer( execute_params, record_params, command_buffer, [&](se::Stream* stream) { return RunCollectiveBroadcast(device_buffers, *stream, comm, nccl_api()); }); } CommandBufferCmd::BufferUsageVector CollectiveBroadcastCmd::buffers() { BufferUsageVector buffer_usage; for (auto& buffer : buffers_) { buffer_usage.emplace_back(buffer.source_buffer, MemoryAccess::kRead); buffer_usage.emplace_back(buffer.destination_buffer, MemoryAccess::kWrite); } return buffer_usage; } }
#include "xla/service/gpu/runtime/command_buffer_cmd.h" #include <array> #include <cstdint> #include <vector> #include "absl/functional/function_ref.h" #include "absl/status/status.h" #include "absl/strings/ascii.h" #include "absl/types/span.h" #include "xla/service/buffer_assignment.h" #include "xla/service/gpu/buffer_allocations.h" #include "xla/service/gpu/launch_dimensions.h" #include "xla/service/gpu/runtime/thunk.h" #include "xla/service/platform_util.h" #include "xla/service/service_executable_run_options.h" #include "xla/stream_executor/command_buffer.h" #include "xla/stream_executor/device_memory.h" #include "xla/stream_executor/gpu/gpu_test_kernels_fatbin.h" #include "xla/stream_executor/platform.h" #include "xla/stream_executor/platform_manager.h" #include "xla/stream_executor/stream_executor.h" #include "xla/stream_executor/stream_executor_memory_allocator.h" #include "xla/tsl/lib/core/status_test_util.h" #include "xla/types.h" #include "tsl/platform/status.h" #include "tsl/platform/statusor.h" #include "tsl/platform/test.h" #include "tsl/platform/test_benchmark.h" namespace xla::gpu { using BufferUsage = CommandBufferCmd::BufferUsage; using BufferUsageVector = CommandBufferCmd::BufferUsageVector; using MemoryAccess = CommandBufferCmd::MemoryAccess; static se::StreamExecutor* GpuExecutor() { auto name = absl::AsciiStrToUpper(PlatformUtil::CanonicalPlatformName("gpu").value()); auto* platform = se::PlatformManager::PlatformWithName(name).value(); return platform->ExecutorForDevice(0).value(); } static constexpr auto s0 = ExecutionStreamId(0); static constexpr auto s1 = ExecutionStreamId(1); struct TestOnlyCommandBufferCmd : public CommandBufferCmd { TestOnlyCommandBufferCmd(ExecutionStreamId execution_stream_id, BufferUsageVector buffer_usage) : CommandBufferCmd(CommandBufferCmdType::kUnknownCmd, execution_stream_id), buffer_usage(buffer_usage) {} absl::Status Record(const Thunk::ExecuteParams&, const RecordParams&, se::CommandBuffer*) override { return absl::OkStatus(); } BufferUsageVector buffers() override { return buffer_usage; } BufferUsageVector buffer_usage; }; class FakeCmd : public CommandBufferCmd { public: FakeCmd(ExecutionStreamId execution_stream_id) : CommandBufferCmd(CommandBufferCmdType::kTracedCommandBufferCmd, execution_stream_id) {} absl::Status Record(const Thunk::ExecuteParams& execute_params, const RecordParams& record_params, se::CommandBuffer* command_buffer) override { return absl::OkStatus(); } BufferUsageVector buffers() override { return BufferUsageVector{}; } }; TEST(CommandBufferCmdTest, SerializeExecution) { BufferAllocation alloc0(0, 1024, 0); auto slice0 = BufferAllocation::Slice(&alloc0, 0, 100); auto slice1 = BufferAllocation::Slice(&alloc0, 50, 100); auto use0 = BufferUsage(slice0, MemoryAccess::kRead); auto use1 = BufferUsage(slice1, MemoryAccess::kRead); CommandBufferCmdSequence commands( CommandBufferCmdSequence::SynchronizationMode::kSerialize); commands.Emplace<TestOnlyCommandBufferCmd>(s0, BufferUsageVector{use0}); commands.Emplace<TestOnlyCommandBufferCmd>(s0, BufferUsageVector{use1}); ASSERT_EQ(commands.barriers().size(), 2); EXPECT_EQ(commands.barriers().at(0), false); EXPECT_EQ(commands.barriers().at(1), true); } TEST(CommandBufferCmdTest, NoReadBarrier) { BufferAllocation alloc0(0, 1024, 0); auto slice0 = BufferAllocation::Slice(&alloc0, 0, 100); auto slice1 = BufferAllocation::Slice(&alloc0, 50, 100); auto use0 = BufferUsage(slice0, MemoryAccess::kRead); auto use1 = BufferUsage(slice1, MemoryAccess::kRead); CommandBufferCmdSequence commands; commands.Emplace<TestOnlyCommandBufferCmd>(s0, BufferUsageVector{use0}); commands.Emplace<TestOnlyCommandBufferCmd>(s0, BufferUsageVector{use1}); ASSERT_EQ(commands.barriers().size(), 2); EXPECT_EQ(commands.barriers().at(0), false); EXPECT_EQ(commands.barriers().at(1), false); } TEST(CommandBufferCmdTest, NoWriteBarrier) { BufferAllocation alloc0(0, 1024, 0); auto slice0 = BufferAllocation::Slice(&alloc0, 0, 100); auto slice1 = BufferAllocation::Slice(&alloc0, 200, 100); auto use0 = BufferUsage(slice0, MemoryAccess::kWrite); auto use1 = BufferUsage(slice1, MemoryAccess::kWrite); CommandBufferCmdSequence commands; commands.Emplace<TestOnlyCommandBufferCmd>(s0, BufferUsageVector{use0}); commands.Emplace<TestOnlyCommandBufferCmd>(s0, BufferUsageVector{use1}); ASSERT_EQ(commands.barriers().size(), 2); EXPECT_EQ(commands.barriers().at(0), false); EXPECT_EQ(commands.barriers().at(1), false); } TEST(CommandBufferCmdTest, WriteConflictBarrier) { BufferAllocation alloc0(0, 1024, 0); auto slice0 = BufferAllocation::Slice(&alloc0, 0, 100); auto slice1 = BufferAllocation::Slice(&alloc0, 50, 100); auto use0 = BufferUsage(slice0, MemoryAccess::kRead); auto use1 = BufferUsage(slice0, MemoryAccess::kRead); auto use2 = BufferUsage(slice1, MemoryAccess::kWrite); CommandBufferCmdSequence commands; commands.Emplace<TestOnlyCommandBufferCmd>(s0, BufferUsageVector{use0}); commands.Emplace<TestOnlyCommandBufferCmd>(s0, BufferUsageVector{use1}); commands.Emplace<TestOnlyCommandBufferCmd>(s0, BufferUsageVector{use2}); ASSERT_EQ(commands.barriers().size(), 3); EXPECT_EQ(commands.barriers().at(0), false); EXPECT_EQ(commands.barriers().at(1), false); EXPECT_EQ(commands.barriers().at(2), true); } TEST(CommandBufferCmdTest, NoWriteConflictsAcrossStreams) { BufferAllocation alloc0(0, 1024, 0); auto slice0 = BufferAllocation::Slice(&alloc0, 0, 100); auto slice1 = BufferAllocation::Slice(&alloc0, 50, 100); auto use0 = BufferUsage(slice0, MemoryAccess::kRead); auto use1 = BufferUsage(slice1, MemoryAccess::kWrite); CommandBufferCmdSequence commands; commands.Emplace<TestOnlyCommandBufferCmd>(s0, BufferUsageVector{use0}); commands.Emplace<TestOnlyCommandBufferCmd>(s1, BufferUsageVector{use1}); ASSERT_EQ(commands.barriers().size(), 2); EXPECT_EQ(commands.barriers().at(0), false); EXPECT_EQ(commands.barriers().at(1), false); } TEST(CommandBufferCmdTest, MemcpyCmd) { se::StreamExecutor* executor = GpuExecutor(); auto stream = executor->CreateStream().value(); int64_t length = 4; int64_t byte_length = sizeof(int32_t) * length; se::DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0); se::DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0); TF_ASSERT_OK(stream->Memset32(&a, 42, byte_length)); TF_ASSERT_OK(stream->MemZero(&b, byte_length)); BufferAllocation alloc_a(0, byte_length, 0); BufferAllocation alloc_b(1, byte_length, 0); BufferAllocation::Slice slice_a(&alloc_a, 0, byte_length); BufferAllocation::Slice slice_b(&alloc_b, 0, byte_length); CommandBufferCmdSequence commands; commands.Emplace<MemcpyDeviceToDeviceCmd>(s0, slice_b, slice_a, byte_length); ServiceExecutableRunOptions run_options; se::StreamExecutorMemoryAllocator allocator(executor); BufferAllocations allocations({a, b}, 0, &allocator); CommandBufferCmd::StateManager state; Thunk::ExecuteParams params = Thunk::ExecuteParams::Create( run_options, allocations, stream.get(), stream.get(), nullptr, nullptr); CommandBufferCmd::RecordParams record_params = {state}; auto command_buffer = executor->CreateCommandBuffer(se::CommandBuffer::Mode::kPrimary).value(); TF_ASSERT_OK(commands.Record(params, record_params, command_buffer.get())); TF_ASSERT_OK(command_buffer->Submit(stream.get())); std::vector<int32_t> dst(4, 0); TF_ASSERT_OK(stream->Memcpy(dst.data(), b, byte_length)); ASSERT_EQ(dst, std::vector<int32_t>(4, 42)); } TEST(CommandBufferCmdTest, BarrierCmd) { se::StreamExecutor* executor = GpuExecutor(); auto stream = executor->CreateStream().value(); int64_t length = 4; int64_t byte_length = sizeof(int32_t) * length; se::DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0); se::DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0); se::DeviceMemory<int32_t> c = executor->AllocateArray<int32_t>(length, 0); se::DeviceMemory<int32_t> d = executor->AllocateArray<int32_t>(length, 0); se::DeviceMemory<int32_t> e = executor->AllocateArray<int32_t>(length, 0); TF_ASSERT_OK(stream->Memset32(&a, 42, byte_length)); TF_ASSERT_OK(stream->MemZero(&b, byte_length)); TF_ASSERT_OK(stream->MemZero(&c, byte_length)); TF_ASSERT_OK(stream->MemZero(&d, byte_length)); TF_ASSERT_OK(stream->MemZero(&e, byte_length)); BufferAllocation alloc_a(0, byte_length, 0); BufferAllocation alloc_b(1, byte_length, 0); BufferAllocation alloc_c(2, byte_length, 0); BufferAllocation alloc_d(3, byte_length, 0); BufferAllocation alloc_e(4, byte_length, 0); BufferAllocation::Slice slice_a(&alloc_a, 0, byte_length); BufferAllocation::Slice slice_b(&alloc_b, 0, byte_length); BufferAllocation::Slice slice_c(&alloc_c, 0, byte_length); BufferAllocation::Slice slice_d(&alloc_d, 0, byte_length); BufferAllocation::Slice slice_e(&alloc_e, 0, byte_length); CommandBufferCmdSequence commands; commands.Emplace<MemcpyDeviceToDeviceCmd>(s0, slice_b, slice_a, byte_length); commands.Emplace<BarrierCmd>(s1, s0); commands.Emplace<MemcpyDeviceToDeviceCmd>(s1, slice_c, slice_b, byte_length); commands.Emplace<BarrierCmd>(s0, s1); commands.Emplace<MemcpyDeviceToDeviceCmd>(s0, slice_d, slice_c, byte_length); commands.Emplace<BarrierCmd>(s1, s0); commands.Emplace<MemcpyDeviceToDeviceCmd>(s1, slice_e, slice_d, byte_length); ServiceExecutableRunOptions run_options; se::StreamExecutorMemoryAllocator allocator(executor); BufferAllocations allocations({a, b, c, d, e}, 0, &allocator); CommandBufferCmd::StateManager state; Thunk::ExecuteParams params = Thunk::ExecuteParams::Create( run_options, allocations, stream.get(), stream.get(), nullptr, nullptr); CommandBufferCmd::RecordParams record_params = {state}; auto command_buffer = executor->CreateCommandBuffer(se::CommandBuffer::Mode::kPrimary).value(); TF_ASSERT_OK(commands.Record(params, record_params, command_buffer.get())); TF_ASSERT_OK(command_buffer->Submit(stream.get())); std::vector<int32_t> dst_b(4, 0); std::vector<int32_t> dst_c(4, 0); std::vector<int32_t> dst_d(4, 0); std::vector<int32_t> dst_e(4, 0); TF_ASSERT_OK(stream->Memcpy(dst_b.data(), b, byte_length)); TF_ASSERT_OK(stream->Memcpy(dst_c.data(), c, byte_length)); TF_ASSERT_OK(stream->Memcpy(dst_d.data(), d, byte_length)); TF_ASSERT_OK(stream->Memcpy(dst_e.data(), e, byte_length)); ASSERT_EQ(dst_b, std::vector<int32_t>(4, 42)); ASSERT_EQ(dst_c, std::vector<int32_t>(4, 42)); ASSERT_EQ(dst_d, std::vector<int32_t>(4, 42)); ASSERT_EQ(dst_e, std::vector<int32_t>(4, 42)); } TEST(CommandBufferCmdTest, LaunchCmd) { se::StreamExecutor* executor = GpuExecutor(); auto stream = executor->CreateStream().value(); int64_t length = 4; int64_t byte_length = sizeof(int32_t) * length; se::DeviceMemory<int32_t> a = executor->AllocateArray<int32_t>(length, 0); se::DeviceMemory<int32_t> b = executor->AllocateArray<int32_t>(length, 0); TF_ASSERT_OK(stream->Memset32(&a, 42, byte_length)); TF_ASSERT_OK(stream->MemZero(&b, byte_length)); BufferAllocation alloc_a(0, byte_length, 0); BufferAllocation alloc_b(1, byte_length, 0); BufferAllocation::Slice slice_a(&alloc_a, 0, byte_length); BufferAllocation::Slice slice_b(&alloc_b, 0, byte_length); auto args = {slice_a, slice_a, slice_b}; auto args_access = {MemoryAccess::kRead, MemoryAccess::kRead, MemoryAccess::kWrite}; CommandBufferCmdSequence commands; commands.Emplace<LaunchCmd>(s0, "AddI32", args, args_access, LaunchDimensions(1, 4), 0); TF_ASSERT_OK_AND_ASSIGN(std::vector<uint8_t> fatbin, se::gpu::GetGpuTestKernelsFatbin()); Thunk::ExecutableSource source = {{}, fatbin}; CommandBufferCmd::StateManager state; TF_ASSERT_OK(commands.Initialize({executor, source}, state)); ServiceExecutableRunOptions run_options; se::StreamExecutorMemoryAllocator allocator(executor); BufferAllocations allocations({a, b}, 0, &allocator); Thunk::ExecuteParams params = Thunk::ExecuteParams::Create( run_options, allocations, stream.get(), stream.get(), nullptr, nullptr); CommandBufferCmd::RecordParams record_params = {state}; auto command_buffer = executor->CreateCommandBuffer(se::CommandBuffer::Mode::kPrimary).value(); TF_ASSERT_OK(commands.Record(params, record_params, command_buffer.get())); TF_ASSERT_OK(command_buffer->Submit(stream.get())); std::vector<int32_t> dst(4, 0); TF_ASSERT_OK(stream->Memcpy(dst.data(), b, byte_length)); ASSERT_EQ(dst, std::vector<int32_t>(4, 42 + 42)); } TEST(CommandBufferCmdStateManageTest, GetOrCreateState) { struct TestState : public CommandBufferCmd::State { int32_t value = 0; }; CommandBufferCmd* cmd = reinterpret_cast<CommandBufferCmd*>(0x1234567); CommandBufferCmd::StateManager state_manager; auto* state0 = state_manager.GetOrNull<TestState>(cmd); ASSERT_EQ(state0, nullptr); auto* state1 = state_manager.GetOrCreate<TestState>(cmd); ASSERT_EQ(state1->value, 0); state1->value += 42; auto* state2 = state_manager.GetOrCreate<TestState>(cmd); ASSERT_EQ(state2->value, 42); ASSERT_EQ(state1, state2); } TEST(TracedCommandBuffer, GetOrUpdateCommandBuffer) { auto run_traced_test = [](int trace_cache_size) { se::StreamExecutor* executor = GpuExecutor(); auto stream = executor->CreateStream().value(); auto traced_cmd = FakeCmd(ExecutionStreamId(0)); BufferAllocation alloc0(0, 1024, 0); BufferAllocation alloc1(1, 1024, 0); CommandBufferCmd::BufferUsageVector buffers = { {BufferAllocation::Slice(&alloc0, 0, 1024), MemoryAccess::kRead}, {BufferAllocation::Slice(&alloc1, 0, 1024), MemoryAccess::kWrite}}; TracedCommandBuffer traced_cmd_buffer(&traced_cmd, buffers, trace_cache_size); se::DeviceMemoryBase mem0(reinterpret_cast<void*>(0x01234567)); se::DeviceMemoryBase mem1(reinterpret_cast<void*>(0x12345670)); se::StreamExecutorMemoryAllocator allocator(executor); BufferAllocations allocations({mem0, mem1}, 0, &allocator); int64_t num_calls = 0; auto trace = [&](se::Stream*) { num_calls++; return absl::OkStatus(); }; TF_ASSERT_OK_AND_ASSIGN(auto* command_buffer0, traced_cmd_buffer.GetOrTraceCommandBuffer( &allocations, executor, stream.get(), trace)); TF_ASSERT_OK_AND_ASSIGN(auto* command_buffer1, traced_cmd_buffer.GetOrTraceCommandBuffer( &allocations, executor, stream.get(), trace)); ASSERT_EQ(command_buffer0, command_buffer1); EXPECT_EQ(num_calls, 1); se::DeviceMemoryBase mem2(reinterpret_cast<void*>(0x23456701)); allocations = BufferAllocations({mem0, mem2}, 0, &allocator); TF_ASSERT_OK_AND_ASSIGN(auto* command_buffer2, traced_cmd_buffer.GetOrTraceCommandBuffer( &allocations, executor, stream.get(), trace)); ASSERT_NE(command_buffer0, command_buffer2); EXPECT_EQ(num_calls, 2); allocations = BufferAllocations({mem0, mem1}, 0, &allocator); TF_ASSERT_OK_AND_ASSIGN(auto* command_buffer3, traced_cmd_buffer.GetOrTraceCommandBuffer( &allocations, executor, stream.get(), trace)); ASSERT_EQ(command_buffer0, command_buffer3); EXPECT_EQ(num_calls, 2); allocations = BufferAllocations({mem0, mem0}, 0, &allocator); TF_ASSERT_OK_AND_ASSIGN(auto* command_buffer4, traced_cmd_buffer.GetOrTraceCommandBuffer( &allocations, executor, stream.get(), trace)); ASSERT_NE(command_buffer4, command_buffer3); ASSERT_NE(command_buffer4, command_buffer2); EXPECT_EQ(num_calls, 3); allocations = BufferAllocations({mem0, mem1}, 0, &allocator); TF_ASSERT_OK_AND_ASSIGN(auto* command_buffer5, traced_cmd_buffer.GetOrTraceCommandBuffer( &allocations, executor, stream.get(), trace)); ASSERT_EQ(command_buffer0, command_buffer5); EXPECT_EQ(num_calls, 3); }; run_traced_test(2); run_traced_test(3); } static void BM_GetOrTraceCommandBuffer(benchmark::State& state) { se::StreamExecutor* executor = GpuExecutor(); TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream()); BufferAllocation alloc0(0, 1024, 0); BufferAllocation alloc1(1, 1024, 0); CommandBufferCmd::BufferUsageVector buffers = { {BufferAllocation::Slice(&alloc0, 0, 1024), MemoryAccess::kRead}, {BufferAllocation::Slice(&alloc1, 0, 1024), MemoryAccess::kWrite}}; se::DeviceMemoryBase mem0(reinterpret_cast<void*>(0x01234567)); se::DeviceMemoryBase mem1(reinterpret_cast<void*>(0x12345670)); se::StreamExecutorMemoryAllocator allocator(executor); std::array<BufferAllocations, 4> allocations = { BufferAllocations({mem0, mem1}, 0, &allocator), BufferAllocations({mem1, mem0}, 0, &allocator), BufferAllocations({mem0, mem0}, 0, &allocator), BufferAllocations({mem1, mem1}, 0, &allocator), }; int32_t index = 0; auto traced_cmd = FakeCmd(ExecutionStreamId(0)); TracedCommandBuffer traced_cmd_buffer(&traced_cmd, buffers); auto trace = [](se::Stream*) { return absl::OkStatus(); }; absl::FunctionRef<absl::Status(se::Stream*)> trace_ref(trace); for (auto s : state) { TF_CHECK_OK(traced_cmd_buffer .GetOrTraceCommandBuffer(&allocations[index++ % 4], executor, stream.get(), trace_ref) .status()); } } BENCHMARK(BM_GetOrTraceCommandBuffer); }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/runtime/command_buffer_cmd.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/runtime/command_buffer_cmd_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
d68cdea6-84b3-4713-87b5-65ec06ff8085
cpp
tensorflow/tensorflow
nccl_clique_key
third_party/xla/xla/service/gpu/runtime/nccl_clique_key.cc
third_party/xla/xla/service/gpu/runtime/nccl_clique_key_test.cc
#include "xla/service/gpu/runtime/nccl_clique_key.h" #include <algorithm> #include <cstdint> #include <optional> #include <string> #include <string_view> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_format.h" #include "absl/strings/str_join.h" #include "absl/types/span.h" #include "xla/service/global_device_id.h" #include "tsl/platform/logging.h" namespace xla::gpu { NcclCliqueKey::NcclCliqueKey( std::vector<GlobalDeviceId> devices, NcclStreamId stream_id, AsyncStreamKind stream_kind, std::vector<std::vector<GlobalDeviceId>> participant_groups) : devices_(std::move(devices)), stream_id_(stream_id), stream_kind_(stream_kind), participant_groups_(std::move(participant_groups)) { for (std::vector<GlobalDeviceId>& group : participant_groups_) { absl::c_sort(group); } auto compare_groups = [](const std::vector<GlobalDeviceId>& lhs, const std::vector<GlobalDeviceId>& rhs) { CHECK(!lhs.empty()); CHECK(!rhs.empty()); return lhs[0] < rhs[0]; }; absl::c_sort(participant_groups_, compare_groups); } absl::Span<const GlobalDeviceId> NcclCliqueKey::devices() const { return devices_; } NcclStreamId NcclCliqueKey::stream_id() const { return stream_id_; } std::optional<int64_t> NcclCliqueKey::rank(GlobalDeviceId id) const { if (auto it = absl::c_find(devices_, id); it != devices_.end()) { return it - devices_.begin(); } return std::nullopt; } bool NcclCliqueKey::IsSubsetOf(const NcclCliqueKey& other) const { return stream_id_ == other.stream_id_ && absl::c_all_of(devices_, [&](GlobalDeviceId id) { return absl::c_linear_search(other.devices_, id); }); } std::string NcclCliqueKey::ToString() const { std::string group_string = ""; if (!participant_groups_.empty()) { std::vector<std::string> values; values.reserve(participant_groups_.size()); for (const auto& group : participant_groups_) { values.push_back("[" + GlobalDeviceIdsToString(group) + "]"); } group_string = absl::StrFormat("; groups=[%s]", absl::StrJoin(values, ",")); } return absl::StrFormat("devices=[%s]; stream=%d%s", GlobalDeviceIdsToString(devices_), stream_id_.value(), group_string); } bool operator==(const NcclCliqueKey& a, const NcclCliqueKey& b) { return a.devices_ == b.devices_ && a.stream_id_ == b.stream_id_ && a.participant_groups_ == b.participant_groups_; } bool operator<(const NcclCliqueKey& a, const NcclCliqueKey& b) { if (a.devices_.size() < b.devices_.size()) return true; if (b.devices_.size() < a.devices_.size()) return false; if (a.devices_ < b.devices_) return true; if (b.devices_ < a.devices_) return false; return a.stream_id_.value() < b.stream_id_.value(); } bool operator>(const NcclCliqueKey& a, const NcclCliqueKey& b) { if (a.devices_.size() > b.devices_.size()) return true; if (b.devices_.size() > a.devices_.size()) return false; if (a.devices_ > b.devices_) return true; if (b.devices_ > a.devices_) return false; return a.stream_id_.value() < b.stream_id_.value(); } NcclCliqueId::NcclCliqueId() { std::fill(data_.begin(), data_.end(), 0); } NcclCliqueId::NcclCliqueId(char bytes[kSize]) { std::copy(bytes, bytes + kSize, data_.data()); } absl::StatusOr<NcclCliqueId> NcclCliqueId::FromString(std::string_view str) { if (str.size() != kSize) { return absl::InvalidArgumentError( absl::StrFormat("Invalid NCCL clique id size: %d , expected %d bytes", str.size(), kSize)); } char bytes[kSize]; std::copy(str.data(), str.data() + kSize, bytes); return NcclCliqueId(bytes); } absl::Span<const char> NcclCliqueId::data() const { return data_; } std::string NcclCliqueId::ToString() const { return std::string(data_.data(), data_.size()); } }
#include "xla/service/gpu/runtime/nccl_clique_key.h" #include <algorithm> #include <array> #include <cstdint> #include <cstring> #include <functional> #include <optional> #include <vector> #include "absl/container/btree_map.h" #include "absl/status/status.h" #include "xla/service/global_device_id.h" #include "tsl/platform/status_matchers.h" #include "tsl/platform/test.h" namespace xla::gpu { using ::tsl::testing::StatusIs; static NcclCliqueKey GetBaseCliqueKey() { return NcclCliqueKey({GlobalDeviceId(0), GlobalDeviceId(1)}, NcclStreamId(0), AsyncStreamKind::kCollective, std::vector<std::vector<GlobalDeviceId>>{ {GlobalDeviceId(0), GlobalDeviceId(1)}, {GlobalDeviceId(2), GlobalDeviceId(3)}}); } TEST(NcclCliqueKeyTest, IsSubsetOf) { GlobalDeviceId id0 = GlobalDeviceId(0); GlobalDeviceId id1 = GlobalDeviceId(1); GlobalDeviceId id2 = GlobalDeviceId(2); GlobalDeviceId id3 = GlobalDeviceId(3); NcclCliqueKey key0({id0, id1}, NcclStreamId(0)); NcclCliqueKey key1({id0, id1, id2, id3}, NcclStreamId(0)); NcclCliqueKey key2({id0, id1, id2, id3}, NcclStreamId(1)); NcclCliqueKey key3({id1, id2, id3}, NcclStreamId(0)); EXPECT_TRUE(key0.IsSubsetOf(key1)); EXPECT_FALSE(key0.IsSubsetOf(key2)); EXPECT_FALSE(key0.IsSubsetOf(key3)); } TEST(NcclCliqueKeyTest, Compare) { GlobalDeviceId id0 = GlobalDeviceId(0); GlobalDeviceId id1 = GlobalDeviceId(1); GlobalDeviceId id2 = GlobalDeviceId(2); GlobalDeviceId id3 = GlobalDeviceId(3); NcclCliqueKey key0({id0, id1}, NcclStreamId(0)); NcclCliqueKey key1({id1, id2, id3}, NcclStreamId(0)); NcclCliqueKey key2({id1, id2, id3}, NcclStreamId(1)); EXPECT_LT(key0, key1); EXPECT_GT(key1, key0); EXPECT_LT(key1, key2); } TEST(NcclCliqueKeyTest, CompareWithParticipantGroups) { GlobalDeviceId id0 = GlobalDeviceId(0); GlobalDeviceId id1 = GlobalDeviceId(1); GlobalDeviceId id2 = GlobalDeviceId(2); GlobalDeviceId id3 = GlobalDeviceId(3); NcclCliqueKey key0({id0, id1}, NcclStreamId(0), AsyncStreamKind::kCollective, std::vector<std::vector<GlobalDeviceId>>{{id0, id1}}); NcclCliqueKey key1( {id0, id1}, NcclStreamId(0), AsyncStreamKind::kCollective, std::vector<std::vector<GlobalDeviceId>>{{id0, id1}, {id2, id3}}); EXPECT_FALSE(key0 == key1); NcclCliqueKey key0_nogroups({id0, id1}, NcclStreamId(0)); NcclCliqueKey key1_nogroups({id0, id1}, NcclStreamId(0)); EXPECT_EQ(key0_nogroups, key1_nogroups); } TEST(NcclCliqueKeyTest, CompareWithPermutedParticipantGroups) { GlobalDeviceId id0 = GlobalDeviceId(0); GlobalDeviceId id1 = GlobalDeviceId(1); GlobalDeviceId id2 = GlobalDeviceId(2); GlobalDeviceId id3 = GlobalDeviceId(3); NcclCliqueKey key0( {id0, id1}, NcclStreamId(0), AsyncStreamKind::kCollective, std::vector<std::vector<GlobalDeviceId>>{{id3, id2}, {id0, id1}}); NcclCliqueKey key1( {id0, id1}, NcclStreamId(0), AsyncStreamKind::kCollective, std::vector<std::vector<GlobalDeviceId>>{{id0, id1}, {id2, id3}}); EXPECT_EQ(key0, key1); NcclCliqueKey key_other( {id0, id1}, NcclStreamId(0), AsyncStreamKind::kCollective, std::vector<std::vector<GlobalDeviceId>>{{id0, id2}, {id1, id3}}); EXPECT_FALSE(key0 == key_other); } TEST(NcclCliqueKeyTest, BtreeIterationOrder) { GlobalDeviceId id0 = GlobalDeviceId(0); GlobalDeviceId id1 = GlobalDeviceId(1); GlobalDeviceId id2 = GlobalDeviceId(2); GlobalDeviceId id3 = GlobalDeviceId(3); NcclCliqueKey key0({id0, id2}, NcclStreamId(0)); NcclCliqueKey key1({id0, id1, id2, id3}, NcclStreamId(0)); absl::btree_map<NcclCliqueKey, int64_t, std::greater<NcclCliqueKey>> map; map[key0] = 0; map[key1] = 1; EXPECT_EQ(map.begin()->first, key1); } TEST(NcclCliqueKeyGettersTest, Devices) { EXPECT_THAT( GetBaseCliqueKey().devices(), ::testing::UnorderedElementsAre(GlobalDeviceId(0), GlobalDeviceId(1))); } TEST(NcclCliqueKeyGettersTest, Rank) { auto key = GetBaseCliqueKey(); EXPECT_EQ(key.rank(GlobalDeviceId(0)), 0); EXPECT_EQ(key.rank(GlobalDeviceId(1)), 1); EXPECT_EQ(key.rank(GlobalDeviceId(2)), std::nullopt); EXPECT_EQ(key.rank(GlobalDeviceId(3)), std::nullopt); } TEST(NcclCliqueKeyGettersTest, StreamId) { EXPECT_EQ(GetBaseCliqueKey().stream_id(), NcclStreamId(0)); } TEST(NcclCliqueKeyGetterTest, ToString) { EXPECT_EQ(GetBaseCliqueKey().ToString(), "devices=[0,1]; stream=0; groups=[[0,1],[2,3]]"); } TEST(NcclCliqueIdGettersTest, Data) { std::array<char, 128> id; std::fill(id.begin(), id.end(), 0x01); NcclCliqueId clique_id(id.data()); EXPECT_EQ(std::memcmp(clique_id.data().data(), id.data(), 128), 0); } TEST(NcclCliqueIdStringTest, ToString) { std::array<char, 128> id; std::fill(id.begin(), id.end(), 0x01); NcclCliqueId clique_id(id.data()); for (int i = 0; i < 128; ++i) { EXPECT_THAT(clique_id.ToString().substr(i, 1), "\x1"); } } TEST(NcclCliqueIdStringTest, FromInvalidString) { EXPECT_THAT(NcclCliqueId::FromString("123"), StatusIs(absl::StatusCode::kInvalidArgument)); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/runtime/nccl_clique_key.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/runtime/nccl_clique_key_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
a996db50-b62e-4fa2-a166-2a18a1eab45f
cpp
tensorflow/tensorflow
for_all_thunks
third_party/xla/xla/service/gpu/runtime/for_all_thunks.cc
third_party/xla/xla/service/gpu/runtime/for_all_thunks_test.cc
#include "xla/service/gpu/runtime/for_all_thunks.h" #include <memory> #include <optional> #include "absl/functional/function_ref.h" #include "xla/service/gpu/runtime/command_buffer_thunk.h" #include "xla/service/gpu/runtime/conditional_thunk.h" #include "xla/service/gpu/runtime/dynamic_slice_thunk.h" #include "xla/service/gpu/runtime/sequential_thunk.h" #include "xla/service/gpu/runtime/thunk.h" #include "xla/service/gpu/runtime/while_thunk.h" #include "tsl/platform/casts.h" namespace xla::gpu { void ForAllThunks(absl::FunctionRef<void(const Thunk*)> fn, const Thunk* thunk) { fn(thunk); switch (thunk->kind()) { case Thunk::kDynamicSlice: ForAllThunks(fn, tensorflow::down_cast<const DynamicSliceThunk*>(thunk) ->embedded_thunk()); break; case Thunk::kCommandBuffer: if (const std::unique_ptr<SequentialThunk>& sequence = tensorflow::down_cast<const CommandBufferThunk*>(thunk)->thunks(); sequence != nullptr) { ForAllThunks(fn, sequence.get()); } break; case Thunk::kConditional: for (const std::unique_ptr<SequentialThunk>& branch : tensorflow::down_cast<const ConditionalThunk*>(thunk) ->branch_thunks()) { ForAllThunks(fn, branch.get()); } break; case Thunk::kSequential: ForAllThunks( fn, &tensorflow::down_cast<const SequentialThunk*>(thunk)->thunks()); break; case Thunk::kWhile: ForAllThunks(fn, tensorflow::down_cast<const WhileThunk*>(thunk) ->condition_thunk_sequence()); ForAllThunks(fn, tensorflow::down_cast<const WhileThunk*>(thunk) ->body_thunk_sequence()); break; case Thunk::kCholesky: case Thunk::kConvolution: case Thunk::kConvolutionReorder: case Thunk::kCopy: case Thunk::kCopyDone: case Thunk::kCubSort: case Thunk::kCublasLtMatmul: case Thunk::kCustomCall: case Thunk::kCustomKernel: case Thunk::kCuDnn: case Thunk::kFft: case Thunk::kGemm: case Thunk::kInfeed: case Thunk::kKernel: case Thunk::kMemset32BitValue: case Thunk::kMemzero: case Thunk::kNcclAllGather: case Thunk::kNcclAllGatherStart: case Thunk::kNcclAllGatherDone: case Thunk::kNcclAllReduce: case Thunk::kNcclAllReduceStart: case Thunk::kNcclAllReduceDone: case Thunk::kNcclCollectiveBroadcast: case Thunk::kNcclCollectiveBroadcastStart: case Thunk::kNcclCollectiveBroadcastDone: case Thunk::kNcclCollectivePermute: case Thunk::kNcclCollectivePermuteStart: case Thunk::kNcclCollectivePermuteDone: case Thunk::kNcclReduceScatter: case Thunk::kNcclReduceScatterStart: case Thunk::kNcclReduceScatterDone: case Thunk::kNcclAllToAll: case Thunk::kNcclAllToAllStart: case Thunk::kNcclAllToAllDone: case Thunk::kNcclSend: case Thunk::kNcclSendDone: case Thunk::kNcclRecv: case Thunk::kNcclRecvDone: case Thunk::kNorm: case Thunk::kOutfeed: case Thunk::kPartitionId: case Thunk::kRecv: case Thunk::kRecvDone: case Thunk::kReplicaId: case Thunk::kSend: case Thunk::kSendDone: case Thunk::kTriangularSolve: case Thunk::kWaitForStreams: break; } } void ForAllThunks(absl::FunctionRef<void(const Thunk*)> fn, const ThunkSequence* thunks) { for (const std::unique_ptr<Thunk>& thunk : *thunks) { ForAllThunks(fn, thunk.get()); } } }
#include "xla/service/gpu/runtime/for_all_thunks.h" #include <memory> #include <utility> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/status/status.h" #include "xla/service/buffer_assignment.h" #include "xla/service/gpu/runtime/command_buffer_cmd.h" #include "xla/service/gpu/runtime/command_buffer_thunk.h" #include "xla/service/gpu/runtime/conditional_thunk.h" #include "xla/service/gpu/runtime/dynamic_slice_thunk.h" #include "xla/service/gpu/runtime/sequential_thunk.h" #include "xla/service/gpu/runtime/thunk.h" #include "xla/service/gpu/runtime/while_thunk.h" namespace xla::gpu { namespace { using ::testing::IsSupersetOf; using ::testing::UnorderedElementsAre; std::vector<const Thunk*> GetAllThunks(Thunk* root) { std::vector<const Thunk*> thunks; ForAllThunks([&](const Thunk* thunk) { thunks.push_back(thunk); }, root); return thunks; } struct DummyThunk : public Thunk { DummyThunk() : Thunk(Thunk::Kind::kGemm, Thunk::ThunkInfo()) {} absl::Status ExecuteOnStream(const ExecuteParams& params) override { return absl::OkStatus(); } }; TEST(ForAllThunksTest, SingleThunk) { DummyThunk thunk; EXPECT_THAT(GetAllThunks(&thunk), UnorderedElementsAre(&thunk)); } TEST(ForAllThunksTest, DynamicSliceThunk) { auto thunk = std::make_unique<DummyThunk>(); Thunk* thunk_ptr = thunk.get(); auto thunk_sequence = std::make_unique<ThunkSequence>(); thunk_sequence->push_back(std::move(thunk)); DynamicSliceThunk dynamic_slice_thunk( Thunk::ThunkInfo(), std::move(thunk_sequence), {}, {}, {}, {}, {}, {}); EXPECT_THAT(GetAllThunks(&dynamic_slice_thunk), IsSupersetOf<const Thunk*>({thunk_ptr, &dynamic_slice_thunk})); } TEST(ForAllThunksTest, CommandBufferThunk) { auto thunk = std::make_unique<DummyThunk>(); Thunk* thunk_ptr = thunk.get(); ThunkSequence thunk_sequence; thunk_sequence.push_back(std::move(thunk)); auto sequential_thunk = std::make_unique<SequentialThunk>( Thunk::ThunkInfo(), std::move(thunk_sequence)); Thunk* sequential_thunk_ptr = sequential_thunk.get(); CommandBufferThunk command_buffer_thunk(CommandBufferCmdSequence(), Thunk::ThunkInfo(), std::move(sequential_thunk)); EXPECT_THAT(GetAllThunks(&command_buffer_thunk), UnorderedElementsAre(thunk_ptr, &command_buffer_thunk, sequential_thunk_ptr)); } TEST(ForAllThunksTest, ConditionalThunk) { auto thunk = std::make_unique<DummyThunk>(); Thunk* thunk_ptr = thunk.get(); ThunkSequence thunk_sequence; thunk_sequence.push_back(std::move(thunk)); auto sequential_thunk = std::make_unique<SequentialThunk>( Thunk::ThunkInfo(), std::move(thunk_sequence)); SequentialThunk* sequential_thunk_ptr = sequential_thunk.get(); ConditionalThunkConfig config; config.branch_thunks.push_back(std::move(sequential_thunk)); ConditionalThunk conditional_thunk(Thunk::ThunkInfo(), std::move(config), BufferAllocation::Slice()); EXPECT_THAT(GetAllThunks(&conditional_thunk), UnorderedElementsAre(thunk_ptr, sequential_thunk_ptr, &conditional_thunk)); } TEST(ForAllThunksTest, WhileThunk) { auto condition_thunk = std::make_unique<DummyThunk>(); Thunk* condition_thunk_ptr = condition_thunk.get(); ThunkSequence condition_thunk_sequence; condition_thunk_sequence.push_back(std::move(condition_thunk)); auto body_thunk = std::make_unique<DummyThunk>(); Thunk* body_thunk_ptr = body_thunk.get(); ThunkSequence body_thunk_sequence; body_thunk_sequence.push_back(std::move(body_thunk)); WhileThunk while_thunk( Thunk::ThunkInfo(), BufferAllocation::Slice(), std::make_unique<SequentialThunk>(Thunk::ThunkInfo(), std::move(condition_thunk_sequence)), std::make_unique<SequentialThunk>(Thunk::ThunkInfo(), std::move(body_thunk_sequence))); EXPECT_THAT(GetAllThunks(&while_thunk), IsSupersetOf<const Thunk*>( {condition_thunk_ptr, body_thunk_ptr, &while_thunk})); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/runtime/for_all_thunks.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/runtime/for_all_thunks_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
8476d0e3-b0ac-4493-9140-47533d5d00cd
cpp
tensorflow/tensorflow
dynamic_slice_thunk
third_party/xla/xla/service/gpu/runtime/dynamic_slice_thunk.cc
third_party/xla/xla/service/gpu/runtime/dynamic_slice_thunk_test.cc
#include "xla/service/gpu/runtime/dynamic_slice_thunk.h" #include <algorithm> #include <cstdint> #include <memory> #include <optional> #include <utility> #include <variant> #include <vector> #include "absl/container/inlined_vector.h" #include "absl/status/status.h" #include "absl/synchronization/mutex.h" #include "llvm/ADT/STLExtras.h" #include "xla/service/buffer_assignment.h" #include "xla/service/gpu/buffer_allocations.h" #include "xla/service/gpu/ir_emission_utils.h" #include "xla/service/gpu/runtime/sequential_thunk.h" #include "xla/service/gpu/runtime/thunk.h" #include "xla/service/gpu/runtime/while_thunk.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/status_macros.h" #include "xla/stream_executor/device_memory.h" #include "xla/stream_executor/memory_allocation.h" #include "xla/stream_executor/stream.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { DynamicSliceThunk::DynamicSliceThunk( ThunkInfo thunk_info, std::unique_ptr<ThunkSequence> embedded_thunk, std::vector<std::optional<BufferAllocation::Slice>> arguments, std::vector<std::unique_ptr<BufferAllocation>> fake_allocations, std::vector<std::optional<std::vector<Offset>>> offsets, std::vector<std::optional<Shape>> orig_shapes, std::vector<std::optional<Shape>> sliced_shapes, std::vector<std::optional<uint64_t>> offset_byte_sizes) : Thunk(Kind::kDynamicSlice, thunk_info), embedded_thunk_(std::make_unique<SequentialThunk>( ThunkInfo(), std::move(*embedded_thunk))), fake_allocations_(std::move(fake_allocations)) { for (auto [arg, offsets, orig_shape, sliced_shape, offset_byte_size] : llvm::zip_equal(arguments, offsets, orig_shapes, sliced_shapes, offset_byte_sizes)) { slices_.push_back(SliceDef{ std::move(arg), std::move(offsets), std::move(orig_shape), std::move(sliced_shape), std::move(offset_byte_size), }); } for (SliceDef& slice : slices_) { offsets_allocs_base_.push_back(offsets_allocs_size_); if (slice.sliced_shape.has_value()) { offsets_allocs_size_ += slice.sliced_shape->rank() * sizeof(int64_t); } } } DynamicSliceThunk::OffsetArray::OffsetArray(const Literal& l) { CHECK(l.shape().IsArray()) << "Expected array literal, got " << l.ToString(); for (int i = 0; i < l.element_count(); i++) { switch (l.shape().element_type()) { case S32: values.push_back(l.data<int32_t>()[i]); break; case S64: values.push_back(l.data<int64_t>()[i]); break; case U32: values.push_back(l.data<uint32_t>()[i]); break; case U64: CHECK(l.data<uint64_t>()[i] < static_cast<uint64_t>(std::numeric_limits<int64_t>::max())) << "Offset value: " << l.data<uint64_t>()[i] << " cannot fit in int64_t"; values.push_back(l.data<uint64_t>()[i]); break; default: CHECK(false) << "Offset array must be of a supported integer type " "(S32, S64, U32, U64), found: " << l.shape().element_type(); } } } absl::Status DynamicSliceThunk::Prepare(const PrepareParams& params, ResourceRequests& resource_requests) { for (SliceDef& slice : slices_) { if (slice.offsets.has_value()) { TF_RET_CHECK(slice.embedded_thunk_argument.has_value()); TF_RET_CHECK(slice.orig_shape.has_value()); TF_RET_CHECK(slice.sliced_shape.has_value()); TF_RET_CHECK(slice.offset_byte_size.has_value()); TF_RET_CHECK(slice.orig_shape->IsArray()); TF_RET_CHECK(slice.sliced_shape->IsArray()); TF_RET_CHECK(slice.offsets->size() == slice.orig_shape->rank()); TF_RET_CHECK(slice.sliced_shape->rank() == slice.orig_shape->rank()); } } TF_RETURN_IF_ERROR(embedded_thunk_->Prepare(params, resource_requests)); return absl::OkStatus(); } absl::Status DynamicSliceThunk::Initialize(const InitializeParams& params) { TF_RETURN_IF_ERROR(embedded_thunk_->Initialize(params)); absl::MutexLock lock(&mutex_); if (offsets_allocs_.contains(params.executor)) return absl::OkStatus(); VLOG(2) << "Allocate " << offsets_allocs_size_ << " bytes for transferring offsets on executor: " << params.executor; TF_ASSIGN_OR_RETURN( std::unique_ptr<se::MemoryAllocation> allocation, params.executor->HostMemoryAllocate(offsets_allocs_size_)); offsets_allocs_.emplace(params.executor, std::move(allocation)); return absl::OkStatus(); } absl::Status DynamicSliceThunk::ExecuteOnStream(const ExecuteParams& params) { se::Stream& stream = *params.stream; const BufferAllocations& orig_allocations = *params.buffer_allocations; absl::InlinedVector<se::DeviceMemoryBase, 8> slice_buffers( slices_.size(), se::DeviceMemoryBase()); int64_t* offsets_alloc = [&] { absl::MutexLock lock(&mutex_); return reinterpret_cast<int64_t*>( offsets_allocs_.at(stream.parent())->opaque()); }(); auto offset_value = [&](int64_t arg_idx, int64_t offset_idx) -> int64_t& { return offsets_alloc[offsets_allocs_base_.at(arg_idx) + offset_idx]; }; VLOG(2) << "Execute address computation thunk: slices=" << slices_.size(); for (auto [argument_idx, slice] : llvm::enumerate(slices_)) { if (!slice.embedded_thunk_argument.has_value()) { continue; } se::DeviceMemoryBase argument_buffer = orig_allocations.GetDeviceAddress(*slice.embedded_thunk_argument); if (!slice.offsets.has_value()) { slice_buffers[argument_idx] = argument_buffer; continue; } const Shape& src_shape = *slice.orig_shape; const Shape& dst_shape = *slice.sliced_shape; absl::InlinedVector<int64_t, 4> slice_starts; slice_starts.reserve(dst_shape.rank()); int64_t num_transfers = 0; for (auto [offset_idx, values] : llvm::enumerate(llvm::zip( *slice.offsets, src_shape.dimensions(), dst_shape.dimensions()))) { auto [offset, src_dim, dst_dim] = values; if (uint64_t* const_offset = std::get_if<uint64_t>(&offset)) { VLOG(2) << " - arg " << argument_idx << "[" << offset_idx << "]: constant offset = " << *const_offset; offset_value(argument_idx, offset_idx) = *const_offset; } else if (std::holds_alternative<LoopIter>(offset)) { TF_ASSIGN_OR_RETURN(int64_t iter, WhileThunk::CurrentLoopIteration()); VLOG(2) << " - arg " << argument_idx << "[" << offset_idx << "]: loop iteration offset = " << iter; offset_value(argument_idx, offset_idx) = iter; } else if (OffsetArray* offset_array = std::get_if<OffsetArray>(&offset)) { TF_ASSIGN_OR_RETURN(int64_t iter, WhileThunk::CurrentLoopIteration()); VLOG(2) << " - arg " << argument_idx << "[" << offset_idx << "]: offset array offset = " << offset_array->values[iter]; offset_value(argument_idx, offset_idx) = offset_array->values[iter]; } else { auto alloc_slice = std::get<BufferAllocation::Slice>(offset); VLOG(2) << " - arg " << argument_idx << "[" << offset_idx << "]: transfer offset from device " << alloc_slice.ToString(); se::DeviceMemoryBase offset_src = orig_allocations.GetDeviceAddress(alloc_slice); int64_t* offset_dst = &offset_value(argument_idx, offset_idx); TF_RETURN_IF_ERROR( stream.Memcpy(offset_dst, offset_src, *slice.offset_byte_size)); ++num_transfers; } } if (num_transfers > 0) { VLOG(2) << "Wait for completion of " << num_transfers << " transfer"; TF_RETURN_IF_ERROR(stream.BlockHostUntilDone()); } for (auto [offset_idx, values] : llvm::enumerate( llvm::zip(src_shape.dimensions(), dst_shape.dimensions()))) { auto [src_dim, dst_dim] = values; int64_t start_index = std::min(std::max(offset_value(argument_idx, offset_idx), int64_t{0}), src_dim - dst_dim); slice_starts.push_back(start_index); } int64_t new_size = ShapeUtil::ByteSizeOf(dst_shape); int64_t new_offset = 0; for (auto [start, stride] : llvm::zip(slice_starts, *ShapeUtil::ByteStrides(src_shape))) { new_offset += start * stride; } VLOG(2) << "Create sliced argument " << argument_idx << " of shape " << slice.sliced_shape->ToString() << " by slicing argument of shape " << slice.orig_shape->ToString() << " at offset " << new_offset << " with " << new_size; slice_buffers[argument_idx] = argument_buffer.GetByteSlice(new_offset, new_size); } BufferAllocations slice_allocations(slice_buffers, orig_allocations.device_ordinal(), orig_allocations.memory_allocator()); Thunk::ExecuteParams new_params = Thunk::ExecuteParams::CloneWithNewAllocations(params, slice_allocations); TF_RETURN_IF_ERROR(embedded_thunk_->ExecuteOnStream(new_params)); return absl::OkStatus(); } } }
#include "xla/service/gpu/runtime/dynamic_slice_thunk.h" #include <algorithm> #include <cstdint> #include <functional> #include <memory> #include <optional> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/status/statusor.h" #include "absl/strings/ascii.h" #include "xla/ffi/ffi.h" #include "xla/ffi/ffi_api.h" #include "xla/service/buffer_assignment.h" #include "xla/service/gpu/buffer_allocations.h" #include "xla/service/gpu/matmul_utils.h" #include "xla/service/gpu/runtime/custom_call_thunk.h" #include "xla/service/gpu/runtime/gemm_thunk.h" #include "xla/service/gpu/runtime/thunk.h" #include "xla/service/platform_util.h" #include "xla/service/service_executable_run_options.h" #include "xla/shape_util.h" #include "xla/stream_executor/blas.h" #include "xla/stream_executor/command_buffer.h" #include "xla/stream_executor/device_memory.h" #include "xla/stream_executor/device_memory_allocator.h" #include "xla/stream_executor/gpu/gpu_types.h" #include "xla/stream_executor/platform.h" #include "xla/stream_executor/platform_manager.h" #include "xla/stream_executor/stream.h" #include "xla/stream_executor/stream_executor.h" #include "xla/stream_executor/stream_executor_memory_allocator.h" #include "xla/tsl/lib/core/status_test_util.h" #include "xla/types.h" #include "tsl/platform/statusor.h" #include "tsl/platform/test.h" #if GOOGLE_CUDA #define PLATFORM "CUDA" #elif TENSORFLOW_USE_ROCM #define PLATFORM "ROCM" #endif namespace xla::gpu { namespace { static se::StreamExecutor* GpuExecutor() { auto name = absl::AsciiStrToUpper(PlatformUtil::CanonicalPlatformName("gpu").value()); auto* platform = se::PlatformManager::PlatformWithName(name).value(); return platform->ExecutorForDevice(0).value(); } } TEST(DynamicSliceThunkTest, SlicedGemm) { se::StreamExecutor* executor = GpuExecutor(); TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream()); int64_t lhs_length = sizeof(float) * 2 * 4; int64_t rhs_length = sizeof(float) * 3 * 1; int64_t out_length = sizeof(float) * 1 * 1; int64_t offset_length = sizeof(int64_t); std::vector<std::unique_ptr<BufferAllocation>> fake_allocations(4); fake_allocations.push_back( std::make_unique<BufferAllocation>(0, rhs_length, 0)); BufferAllocation::Slice slice_lhs_fake(fake_allocations.back().get(), 0, rhs_length); BufferAllocation alloc_lhs(0, lhs_length, 0); BufferAllocation::Slice slice_lhs(&alloc_lhs, 0, lhs_length); fake_allocations.push_back( std::make_unique<BufferAllocation>(1, rhs_length, 0)); BufferAllocation::Slice slice_rhs(fake_allocations.back().get(), 0, rhs_length); fake_allocations.push_back( std::make_unique<BufferAllocation>(2, out_length, 0)); BufferAllocation::Slice slice_out(fake_allocations.back().get(), 0, out_length); fake_allocations.push_back(std::make_unique<BufferAllocation>( 3, 1024 * 1024, 0)); BufferAllocation::Slice slice_workspace(fake_allocations.back().get(), 0, 1024 * 1024); BufferAllocation alloc_lhs_offset_0(4, offset_length, 0); BufferAllocation::Slice slice_lhs_offset_0(&alloc_lhs_offset_0, 0, offset_length); BufferAllocation alloc_lhs_offset_1(5, offset_length, 0); BufferAllocation::Slice slice_lhs_offset_1(&alloc_lhs_offset_1, 0, offset_length); auto config = GemmConfig::For(ShapeUtil::MakeShape(PrimitiveType::F32, {1, 3}), {}, {1}, ShapeUtil::MakeShape(PrimitiveType::F32, {3, 1}), {}, {0}, ShapeUtil::MakeShape(PrimitiveType::F32, {1, 1}), 1.0, 0.0, 0.0, PrecisionConfig::ALG_UNSET, std::nullopt, se::blas::kDefaultComputePrecision, false, false); ASSERT_TRUE(config.ok()); ThunkSequence seq; seq.emplace_back(std::make_unique<GemmThunk>( Thunk::ThunkInfo(), config.value(), slice_lhs_fake, slice_rhs, slice_out, slice_workspace, true)); std::vector<DynamicSliceThunk::Offset> lhs_offsets{slice_lhs_offset_0, slice_lhs_offset_1}; DynamicSliceThunk thunk( Thunk::ThunkInfo(), std::make_unique<ThunkSequence>(std::move(seq)), {slice_lhs, slice_rhs, slice_out, slice_workspace}, std::move(fake_allocations), {lhs_offsets, std::nullopt, std::nullopt, std::nullopt}, {ShapeUtil::MakeShape(PrimitiveType::F32, {2, 4}), std::nullopt, std::nullopt, std::nullopt}, {ShapeUtil::MakeShape(PrimitiveType::F32, {1, 3}), std::nullopt, std::nullopt, std::nullopt}, {sizeof(int64_t), std::nullopt, std::nullopt, std::nullopt}); se::DeviceMemory<float> lhs = executor->AllocateArray<float>(2 * 4); std::vector<float> lhs_arr{1, 2, 3, 4, 5, 6, 7, 8}; TF_ASSERT_OK(stream->Memcpy(&lhs, lhs_arr.data(), lhs_length)); se::DeviceMemory<float> rhs = executor->AllocateArray<float>(3 * 1); std::vector<float> rhs_arr(3, 1); TF_ASSERT_OK(stream->Memcpy(&rhs, rhs_arr.data(), rhs_length)); se::DeviceMemory<float> out = executor->AllocateArray<float>(1 * 1); TF_ASSERT_OK(stream->MemZero(&out, out_length)); se::DeviceMemory<float> workspace = executor->AllocateArray<float>(1024 * 1024); TF_ASSERT_OK(stream->MemZero(&workspace, 1024 * 1024)); se::DeviceMemory<int64_t> lhs_offset_0 = executor->AllocateArray<int64_t>(1); se::DeviceMemory<int64_t> lhs_offset_1 = executor->AllocateArray<int64_t>(1); std::vector<int64_t> lhs_offset_arr{0, 1}; TF_ASSERT_OK( stream->Memcpy(&lhs_offset_0, &lhs_offset_arr[0], offset_length)); TF_ASSERT_OK( stream->Memcpy(&lhs_offset_1, &lhs_offset_arr[1], offset_length)); ServiceExecutableRunOptions run_options; se::StreamExecutorMemoryAllocator allocator(executor); BufferAllocations allocations( {lhs, rhs, out, workspace, lhs_offset_0, lhs_offset_1}, 0, &allocator); Thunk::ExecuteParams params = Thunk::ExecuteParams::Create( run_options, allocations, stream.get(), stream.get(), nullptr, nullptr); Thunk::ExecutableSource source = {"", {}}; TF_ASSERT_OK(thunk.Initialize( {executor, source, &allocations, stream.get(), stream.get()})); TF_ASSERT_OK(thunk.ExecuteOnStream(params)); TF_ASSERT_OK(stream->BlockHostUntilDone()); std::vector<float> dst(1, 0); TF_ASSERT_OK(stream->Memcpy(dst.data(), out, out_length)); ASSERT_EQ(dst, std::vector<float>({9})); } TEST(DynamicSliceThunkTest, MulipleSlicedOperandsGemm) { se::StreamExecutor* executor = GpuExecutor(); TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream()); int64_t length = sizeof(float) * 2 * 4; int64_t out_length = sizeof(float) * 1; int64_t offset_length = sizeof(int64_t); int64_t slice_length = sizeof(float) * 3; std::vector<std::unique_ptr<BufferAllocation>> fake_allocations(4); fake_allocations.push_back(std::make_unique<BufferAllocation>( 0, slice_length, 0)); BufferAllocation::Slice slice_lhs_fake(fake_allocations.back().get(), 0, slice_length); fake_allocations.push_back(std::make_unique<BufferAllocation>( 1, slice_length, 0)); BufferAllocation::Slice slice_rhs_fake(fake_allocations.back().get(), 0, slice_length); BufferAllocation alloc_lhs(0, length, 0); BufferAllocation::Slice slice_lhs(&alloc_lhs, 0, length); BufferAllocation alloc_rhs(1, length, 0); BufferAllocation::Slice slice_rhs(&alloc_rhs, 0, length); fake_allocations.push_back( std::make_unique<BufferAllocation>(2, out_length, 0)); BufferAllocation::Slice slice_out(fake_allocations.back().get(), 0, out_length); fake_allocations.push_back(std::make_unique<BufferAllocation>( 3, 1024 * 1024, 0)); BufferAllocation::Slice slice_workspace(fake_allocations.back().get(), 0, 1024 * 1024); BufferAllocation alloc_lhs_offset_0(4, offset_length, 0); BufferAllocation::Slice slice_lhs_offset_0(&alloc_lhs_offset_0, 0, offset_length); BufferAllocation alloc_lhs_offset_1(5, offset_length, 0); BufferAllocation::Slice slice_lhs_offset_1(&alloc_lhs_offset_1, 0, offset_length); BufferAllocation alloc_rhs_offset_0(6, offset_length, 0); BufferAllocation::Slice slice_rhs_offset_0(&alloc_rhs_offset_0, 0, offset_length); BufferAllocation alloc_rhs_offset_1(7, offset_length, 0); BufferAllocation::Slice slice_rhs_offset_1(&alloc_rhs_offset_1, 0, offset_length); auto config = GemmConfig::For(ShapeUtil::MakeShape(PrimitiveType::F32, {1, 3}), {}, {1}, ShapeUtil::MakeShape(PrimitiveType::F32, {3, 1}), {}, {0}, ShapeUtil::MakeShape(PrimitiveType::F32, {1, 1}), 1.0, 0.0, 0.0, PrecisionConfig::ALG_UNSET, std::nullopt, se::blas::kDefaultComputePrecision, false, false); ASSERT_TRUE(config.ok()); ThunkSequence seq; seq.emplace_back(std::make_unique<GemmThunk>( Thunk::ThunkInfo(), config.value(), slice_lhs_fake, slice_rhs_fake, slice_out, slice_workspace, true)); std::vector<DynamicSliceThunk::Offset> lhs_offsets{slice_lhs_offset_0, slice_lhs_offset_1}; std::vector<DynamicSliceThunk::Offset> rhs_offsets{slice_rhs_offset_0, slice_rhs_offset_1}; DynamicSliceThunk thunk( Thunk::ThunkInfo(), std::make_unique<ThunkSequence>(std::move(seq)), {slice_lhs, slice_rhs, slice_out, slice_workspace}, std::move(fake_allocations), {lhs_offsets, rhs_offsets, std::nullopt, std::nullopt}, {ShapeUtil::MakeShape(PrimitiveType::F32, {2, 4}), ShapeUtil::MakeShape(PrimitiveType::F32, {8, 1}), std::nullopt, std::nullopt}, {ShapeUtil::MakeShape(PrimitiveType::F32, {1, 3}), ShapeUtil::MakeShape(PrimitiveType::F32, {3, 1}), std::nullopt, std::nullopt}, {sizeof(int64_t), sizeof(int64_t), std::nullopt, std::nullopt}); std::vector<float> arr{1, 2, 3, 4, 5, 6, 7, 8}; se::DeviceMemory<float> lhs = executor->AllocateArray<float>(2 * 4); TF_ASSERT_OK(stream->Memcpy(&lhs, arr.data(), length)); se::DeviceMemory<float> rhs = executor->AllocateArray<float>(8); std::vector<float> rhs_arr(8, 1); TF_ASSERT_OK(stream->Memcpy(&rhs, arr.data(), length)); se::DeviceMemory<float> out = executor->AllocateArray<float>(1); TF_ASSERT_OK(stream->MemZero(&out, out_length)); se::DeviceMemory<float> workspace = executor->AllocateArray<float>(1024 * 1024); TF_ASSERT_OK(stream->MemZero(&workspace, 1024 * 1024)); se::DeviceMemory<int64_t> lhs_offset_0 = executor->AllocateArray<int64_t>(1); se::DeviceMemory<int64_t> lhs_offset_1 = executor->AllocateArray<int64_t>(1); std::vector<int64_t> lhs_offset_arr{0, 1}; TF_ASSERT_OK( stream->Memcpy(&lhs_offset_0, &lhs_offset_arr[0], offset_length)); TF_ASSERT_OK( stream->Memcpy(&lhs_offset_1, &lhs_offset_arr[1], offset_length)); se::DeviceMemory<int64_t> rhs_offset_0 = executor->AllocateArray<int64_t>(1); se::DeviceMemory<int64_t> rhs_offset_1 = executor->AllocateArray<int64_t>(1); std::vector<int64_t> rhs_offset_arr{2, 0}; TF_ASSERT_OK( stream->Memcpy(&rhs_offset_0, &rhs_offset_arr[0], offset_length)); TF_ASSERT_OK( stream->Memcpy(&rhs_offset_1, &rhs_offset_arr[1], offset_length)); ServiceExecutableRunOptions run_options; se::StreamExecutorMemoryAllocator allocator(executor); BufferAllocations allocations({lhs, rhs, out, workspace, lhs_offset_0, lhs_offset_1, rhs_offset_0, rhs_offset_1}, 0, &allocator); Thunk::ExecuteParams params = Thunk::ExecuteParams::Create( run_options, allocations, stream.get(), stream.get(), nullptr, nullptr); Thunk::ExecutableSource source = {"", {}}; TF_ASSERT_OK(thunk.Initialize( {executor, source, &allocations, stream.get(), stream.get()})); TF_ASSERT_OK(thunk.ExecuteOnStream(params)); TF_ASSERT_OK(stream->BlockHostUntilDone()); std::vector<float> dst(1, 0); TF_ASSERT_OK(stream->Memcpy(dst.data(), out, out_length)); ASSERT_EQ(dst, std::vector<float>({2 * 3 + 3 * 4 + 4 * 5})); } static absl::Status Memcpy(se::Stream* stream, ffi::AnyBuffer src, ffi::Result<ffi::AnyBuffer> dst) { se::DeviceMemoryBase dst_mem = dst->device_memory(); se::DeviceMemoryBase src_mem = src.device_memory(); return stream->MemcpyD2D(&dst_mem, src_mem, src_mem.size()); } XLA_FFI_DEFINE_HANDLER(kMemcpy, Memcpy, ffi::Ffi::Bind() .Ctx<ffi::Stream>() .Arg<ffi::AnyBuffer>() .Ret<ffi::AnyBuffer>() ); XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(), "__xla_test$$memcpy", PLATFORM, kMemcpy); TEST(DynamicSliceThunkTest, SlicedMemcpy) { se::StreamExecutor* executor = GpuExecutor(); TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream()); int64_t src_count = 8 * 8 * 10 * 8; int64_t dst_count = 8 * 8; int64_t src_length = sizeof(int32_t) * src_count; int64_t dst_length = sizeof(int32_t) * dst_count; int64_t offset_length = sizeof(int64_t); int64_t slice_length = sizeof(int32_t) * dst_count; std::vector<std::unique_ptr<BufferAllocation>> fake_allocations(2); fake_allocations.push_back(std::make_unique<BufferAllocation>( 0, slice_length, 0)); BufferAllocation::Slice slice_src_fake(fake_allocations.back().get(), 0, slice_length); BufferAllocation alloc_src(0, src_length, 0); BufferAllocation::Slice slice_src(&alloc_src, 0, src_length); fake_allocations.push_back( std::make_unique<BufferAllocation>(1, dst_length, 0)); BufferAllocation::Slice slice_dst(fake_allocations.back().get(), 0, dst_length); BufferAllocation alloc_offset_0(2, offset_length, 0); BufferAllocation::Slice slice_offset_0(&alloc_offset_0, 0, offset_length); BufferAllocation alloc_offset_1(3, offset_length, 0); BufferAllocation::Slice slice_offset_1(&alloc_offset_1, 0, offset_length); BufferAllocation alloc_offset_2(4, offset_length, 0); BufferAllocation::Slice slice_offset_2(&alloc_offset_2, 0, offset_length); BufferAllocation alloc_offset_3(5, offset_length, 0); BufferAllocation::Slice slice_offset_3(&alloc_offset_3, 0, offset_length); auto registration = xla::ffi::FindHandler("__xla_test$$memcpy", PLATFORM); ASSERT_TRUE(registration.ok()); std::vector<std::optional<CustomCallThunk::Slice>> operands{ CustomCallThunk::Slice{slice_src_fake, ShapeUtil::MakeShape(PrimitiveType::S32, {8, 8})}}; std::vector<std::optional<CustomCallThunk::Slice>> results{ CustomCallThunk::Slice{slice_dst, ShapeUtil::MakeShape(PrimitiveType::S32, {8, 8})}}; ThunkSequence seq; TF_ASSERT_OK_AND_ASSIGN( seq.emplace_back(), CustomCallThunk::Create(Thunk::ThunkInfo(), registration->bundle, operands, results, CustomCallThunk::AttributesMap(), nullptr)); std::vector<DynamicSliceThunk::Offset> slice_offsets{ slice_offset_0, slice_offset_1, slice_offset_2, slice_offset_3}; DynamicSliceThunk thunk( Thunk::ThunkInfo(), std::make_unique<ThunkSequence>(std::move(seq)), {slice_src, slice_dst}, std::move(fake_allocations), {slice_offsets, std::nullopt}, {ShapeUtil::MakeShape(PrimitiveType::S32, {8, 8, 10, 8}), std::nullopt}, {ShapeUtil::MakeShape(PrimitiveType::S32, {1, 1, 8, 8}), std::nullopt}, {sizeof(int64_t), std::nullopt}); se::DeviceMemory<int32_t> src = executor->AllocateArray<int32_t>(src_count); std::vector<int32_t> src_arr(src_count, 0); for (unsigned i = 0; i < src_count; ++i) src_arr[i] = i; TF_ASSERT_OK(stream->Memcpy(&src, src_arr.data(), src_length)); se::DeviceMemory<int32_t> dst = executor->AllocateArray<int32_t>(dst_count); TF_ASSERT_OK(stream->MemZero(&dst, dst_length)); se::DeviceMemory<int64_t> offset_0 = executor->AllocateArray<int64_t>(1); se::DeviceMemory<int64_t> offset_1 = executor->AllocateArray<int64_t>(1); se::DeviceMemory<int64_t> offset_2 = executor->AllocateArray<int64_t>(1); se::DeviceMemory<int64_t> offset_3 = executor->AllocateArray<int64_t>(1); std::vector<int64_t> offset_arr{3, 5, 2, 0}; TF_ASSERT_OK(stream->Memcpy(&offset_0, &offset_arr[0], offset_length)); TF_ASSERT_OK(stream->Memcpy(&offset_1, &offset_arr[1], offset_length)); TF_ASSERT_OK(stream->Memcpy(&offset_2, &offset_arr[2], offset_length)); TF_ASSERT_OK(stream->Memcpy(&offset_3, &offset_arr[3], offset_length)); ServiceExecutableRunOptions run_options; se::StreamExecutorMemoryAllocator allocator(executor); BufferAllocations allocations( {src, dst, offset_0, offset_1, offset_2, offset_3}, 0, &allocator); Thunk::ExecuteParams params = Thunk::ExecuteParams::Create( run_options, allocations, stream.get(), stream.get(), nullptr, nullptr); Thunk::ExecutableSource source = {"", {}}; TF_ASSERT_OK(thunk.Initialize( {executor, source, &allocations, stream.get(), stream.get()})); TF_ASSERT_OK(thunk.ExecuteOnStream(params)); TF_ASSERT_OK(stream->BlockHostUntilDone()); std::vector<int32_t> out(dst_count, 0); TF_ASSERT_OK(stream->Memcpy(out.data(), dst, dst_length)); std::vector<int32_t> ref(dst_count, 0); int64_t offset_val = offset_arr[3] + 8 * (offset_arr[2] + 10 * (offset_arr[1] + 8 * offset_arr[0])); std::copy(src_arr.begin() + offset_val, src_arr.begin() + offset_val + dst_count, ref.begin()); ASSERT_EQ(out, ref); } TEST(DynamicSliceThunkTest, SlicedOutputMemcpy) { se::StreamExecutor* executor = GpuExecutor(); TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream()); int64_t src_count = 8 * 8 * 10 * 2; int64_t dst_count = 2 * 2 * 2 * 2; int64_t slice_count = 2 * 2; int64_t src_length = sizeof(int32_t) * src_count; int64_t dst_length = sizeof(int32_t) * dst_count; int64_t offset_length = sizeof(int64_t); int64_t slice_length = sizeof(int32_t) * slice_count; std::vector<std::unique_ptr<BufferAllocation>> fake_allocations(2); fake_allocations.push_back(std::make_unique<BufferAllocation>( 0, slice_length, 0)); BufferAllocation::Slice slice_src_fake(fake_allocations.back().get(), 0, slice_length); fake_allocations.push_back(std::make_unique<BufferAllocation>( 1, slice_length, 0)); BufferAllocation::Slice slice_dst_fake(fake_allocations.back().get(), 0, slice_length); BufferAllocation alloc_src(0, src_length, 0); BufferAllocation::Slice slice_src(&alloc_src, 0, src_length); BufferAllocation alloc_dst(1, dst_length, 0); BufferAllocation::Slice slice_dst(&alloc_dst, 0, dst_length); BufferAllocation alloc_src_offset_0(2, offset_length, 0); BufferAllocation::Slice slice_src_offset_0(&alloc_src_offset_0, 0, offset_length); BufferAllocation alloc_src_offset_1(3, offset_length, 0); BufferAllocation::Slice slice_src_offset_1(&alloc_src_offset_1, 0, offset_length); BufferAllocation alloc_src_offset_2(4, offset_length, 0); BufferAllocation::Slice slice_src_offset_2(&alloc_src_offset_2, 0, offset_length); BufferAllocation alloc_src_offset_3(5, offset_length, 0); BufferAllocation::Slice slice_src_offset_3(&alloc_src_offset_3, 0, offset_length); BufferAllocation alloc_dst_offset_0(6, offset_length, 0); BufferAllocation::Slice slice_dst_offset_0(&alloc_dst_offset_0, 0, offset_length); BufferAllocation alloc_dst_offset_1(7, offset_length, 0); BufferAllocation::Slice slice_dst_offset_1(&alloc_dst_offset_1, 0, offset_length); BufferAllocation alloc_dst_offset_2(8, offset_length, 0); BufferAllocation::Slice slice_dst_offset_2(&alloc_dst_offset_2, 0, offset_length); BufferAllocation alloc_dst_offset_3(9, offset_length, 0); BufferAllocation::Slice slice_dst_offset_3(&alloc_dst_offset_3, 0, offset_length); auto registration = xla::ffi::FindHandler("__xla_test$$memcpy", PLATFORM); ASSERT_TRUE(registration.ok()); std::vector<std::optional<CustomCallThunk::Slice>> operands{ CustomCallThunk::Slice{slice_src_fake, ShapeUtil::MakeShape(PrimitiveType::S32, {2, 2})}}; std::vector<std::optional<CustomCallThunk::Slice>> results{ CustomCallThunk::Slice{slice_dst_fake, ShapeUtil::MakeShape(PrimitiveType::S32, {2, 2})}}; ThunkSequence seq; TF_ASSERT_OK_AND_ASSIGN( seq.emplace_back(), CustomCallThunk::Create(Thunk::ThunkInfo(), registration->bundle, operands, results, CustomCallThunk::AttributesMap(), nullptr)); std::vector<DynamicSliceThunk::Offset> slice_src_offsets{ slice_src_offset_0, slice_src_offset_1, slice_src_offset_2, slice_src_offset_3}; std::vector<DynamicSliceThunk::Offset> slice_dst_offsets{ slice_dst_offset_0, slice_dst_offset_1, slice_dst_offset_2, slice_dst_offset_3}; DynamicSliceThunk thunk( Thunk::ThunkInfo(), std::make_unique<ThunkSequence>(std::move(seq)), {slice_src, slice_dst}, std::move(fake_allocations), {slice_src_offsets, slice_dst_offsets}, {ShapeUtil::MakeShape(PrimitiveType::S32, {8, 8, 10, 2}), ShapeUtil::MakeShape(PrimitiveType::S32, {2, 2, 2, 2})}, {ShapeUtil::MakeShape(PrimitiveType::S32, {1, 1, 2, 2}), ShapeUtil::MakeShape(PrimitiveType::S32, {1, 1, 2, 2})}, {sizeof(int64_t), sizeof(int64_t)}); se::DeviceMemory<int32_t> src = executor->AllocateArray<int32_t>(src_count); std::vector<int32_t> src_arr(src_count, 0); for (unsigned i = 0; i < src_count; ++i) src_arr[i] = i; TF_ASSERT_OK(stream->Memcpy(&src, src_arr.data(), src_length)); se::DeviceMemory<int32_t> dst = executor->AllocateArray<int32_t>(dst_count); TF_ASSERT_OK(stream->MemZero(&dst, dst_length)); se::DeviceMemory<int64_t> src_offset_0 = executor->AllocateArray<int64_t>(1); se::DeviceMemory<int64_t> src_offset_1 = executor->AllocateArray<int64_t>(1); se::DeviceMemory<int64_t> src_offset_2 = executor->AllocateArray<int64_t>(1); se::DeviceMemory<int64_t> src_offset_3 = executor->AllocateArray<int64_t>(1); std::vector<int64_t> src_offset_arr{3, 5, 2, 0}; TF_ASSERT_OK( stream->Memcpy(&src_offset_0, &src_offset_arr[0], offset_length)); TF_ASSERT_OK( stream->Memcpy(&src_offset_1, &src_offset_arr[1], offset_length)); TF_ASSERT_OK( stream->Memcpy(&src_offset_2, &src_offset_arr[2], offset_length)); TF_ASSERT_OK( stream->Memcpy(&src_offset_3, &src_offset_arr[3], offset_length)); se::DeviceMemory<int64_t> dst_offset_0 = executor->AllocateArray<int64_t>(1); se::DeviceMemory<int64_t> dst_offset_1 = executor->AllocateArray<int64_t>(1); se::DeviceMemory<int64_t> dst_offset_2 = executor->AllocateArray<int64_t>(1); se::DeviceMemory<int64_t> dst_offset_3 = executor->AllocateArray<int64_t>(1); std::vector<int64_t> dst_offset_arr{1, 1, 0, 0}; TF_ASSERT_OK( stream->Memcpy(&dst_offset_0, &dst_offset_arr[0], offset_length)); TF_ASSERT_OK( stream->Memcpy(&dst_offset_1, &dst_offset_arr[1], offset_length)); TF_ASSERT_OK( stream->Memcpy(&dst_offset_2, &dst_offset_arr[2], offset_length)); TF_ASSERT_OK( stream->Memcpy(&dst_offset_3, &dst_offset_arr[3], offset_length)); ServiceExecutableRunOptions run_options; se::StreamExecutorMemoryAllocator allocator(executor); BufferAllocations allocations( {src, dst, src_offset_0, src_offset_1, src_offset_2, src_offset_3, dst_offset_0, dst_offset_1, dst_offset_2, dst_offset_3}, 0, &allocator); Thunk::ExecuteParams params = Thunk::ExecuteParams::Create( run_options, allocations, stream.get(), stream.get(), nullptr, nullptr); Thunk::ExecutableSource source = {"", {}}; TF_ASSERT_OK(thunk.Initialize( {executor, source, &allocations, stream.get(), stream.get()})); TF_ASSERT_OK(thunk.ExecuteOnStream(params)); TF_ASSERT_OK(stream->BlockHostUntilDone()); std::vector<int32_t> out(dst_count, 0); TF_ASSERT_OK(stream->Memcpy(out.data(), dst, dst_length)); std::vector<int32_t> ref(dst_count, 0); int64_t src_offset_val = src_offset_arr[3] + 2 * (src_offset_arr[2] + 10 * (src_offset_arr[1] + 8 * src_offset_arr[0])); int64_t dst_offset_val = dst_offset_arr[3] + 2 * (dst_offset_arr[2] + 2 * (dst_offset_arr[1] + 2 * dst_offset_arr[0])); std::copy(src_arr.begin() + src_offset_val, src_arr.begin() + src_offset_val + slice_count, ref.begin() + dst_offset_val); ASSERT_EQ(out, ref); } TEST(DynamicSliceThunkTest, SlicedGemmArbitraryArgumentOrder) { se::StreamExecutor* executor = GpuExecutor(); TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream()); int64_t lhs_length = sizeof(float) * 2 * 4; int64_t rhs_length = sizeof(float) * 3 * 1; int64_t out_length = sizeof(float) * 1 * 1; int64_t offset_length = sizeof(int64_t); std::vector<std::unique_ptr<BufferAllocation>> fake_allocations(4); fake_allocations.push_back( std::make_unique<BufferAllocation>(0, rhs_length, 0)); BufferAllocation::Slice slice_lhs_fake(fake_allocations.back().get(), 0, rhs_length); fake_allocations.push_back( std::make_unique<BufferAllocation>(1, rhs_length, 0)); BufferAllocation::Slice slice_rhs_fake(fake_allocations.back().get(), 0, rhs_length); fake_allocations.push_back( std::make_unique<BufferAllocation>(2, out_length, 0)); BufferAllocation::Slice slice_out_fake(fake_allocations.back().get(), 0, out_length); fake_allocations.push_back(std::make_unique<BufferAllocation>( 3, 1024 * 1024, 0)); BufferAllocation::Slice slice_workspace_fake(fake_allocations.back().get(), 0, 1024 * 1024); BufferAllocation alloc_lhs(1, lhs_length, 0); BufferAllocation::Slice slice_lhs(&alloc_lhs, 0, lhs_length); BufferAllocation alloc_rhs(3, rhs_length, 0); BufferAllocation::Slice slice_rhs(&alloc_rhs, 0, rhs_length); BufferAllocation alloc_out(2, out_length, 0); BufferAllocation::Slice slice_out(&alloc_out, 0, out_length); BufferAllocation alloc_workspace(0, 1024 * 1024, 0); BufferAllocation::Slice slice_workspace(&alloc_workspace, 0, 1024 * 1024); BufferAllocation alloc_lhs_offset_0(4, offset_length, 0); BufferAllocation::Slice slice_lhs_offset_0(&alloc_lhs_offset_0, 0, offset_length); BufferAllocation alloc_lhs_offset_1(5, offset_length, 0); BufferAllocation::Slice slice_lhs_offset_1(&alloc_lhs_offset_1, 0, offset_length); auto config = GemmConfig::For(ShapeUtil::MakeShape(PrimitiveType::F32, {1, 3}), {}, {1}, ShapeUtil::MakeShape(PrimitiveType::F32, {3, 1}), {}, {0}, ShapeUtil::MakeShape(PrimitiveType::F32, {1, 1}), 1.0, 0.0, 0.0, PrecisionConfig::ALG_UNSET, std::nullopt, se::blas::kDefaultComputePrecision, false, false); ASSERT_TRUE(config.ok()); ThunkSequence seq; seq.emplace_back(std::make_unique<GemmThunk>( Thunk::ThunkInfo(), config.value(), slice_lhs_fake, slice_rhs_fake, slice_out_fake, slice_workspace_fake, true)); std::vector<DynamicSliceThunk::Offset> lhs_offsets{slice_lhs_offset_0, slice_lhs_offset_1}; DynamicSliceThunk thunk( Thunk::ThunkInfo(), std::make_unique<ThunkSequence>(std::move(seq)), {slice_lhs, slice_rhs, slice_out, slice_workspace}, std::move(fake_allocations), {lhs_offsets, std::nullopt, std::nullopt, std::nullopt}, {ShapeUtil::MakeShape(PrimitiveType::F32, {2, 4}), std::nullopt, std::nullopt, std::nullopt}, {ShapeUtil::MakeShape(PrimitiveType::F32, {1, 3}), std::nullopt, std::nullopt, std::nullopt}, {sizeof(int64_t), std::nullopt, std::nullopt, std::nullopt}); se::DeviceMemory<float> lhs = executor->AllocateArray<float>(2 * 4); std::vector<float> lhs_arr{1, 2, 3, 4, 5, 6, 7, 8}; TF_ASSERT_OK(stream->Memcpy(&lhs, lhs_arr.data(), lhs_length)); se::DeviceMemory<float> rhs = executor->AllocateArray<float>(3 * 1); std::vector<float> rhs_arr(3, 1); TF_ASSERT_OK(stream->Memcpy(&rhs, rhs_arr.data(), rhs_length)); se::DeviceMemory<float> out = executor->AllocateArray<float>(1 * 1); TF_ASSERT_OK(stream->MemZero(&out, out_length)); se::DeviceMemory<float> workspace = executor->AllocateArray<float>(1024 * 1024); TF_ASSERT_OK(stream->MemZero(&workspace, 1024 * 1024)); se::DeviceMemory<int64_t> lhs_offset_0 = executor->AllocateArray<int64_t>(1); se::DeviceMemory<int64_t> lhs_offset_1 = executor->AllocateArray<int64_t>(1); std::vector<int64_t> lhs_offset_arr{0, 1}; TF_ASSERT_OK( stream->Memcpy(&lhs_offset_0, &lhs_offset_arr[0], offset_length)); TF_ASSERT_OK( stream->Memcpy(&lhs_offset_1, &lhs_offset_arr[1], offset_length)); ServiceExecutableRunOptions run_options; se::StreamExecutorMemoryAllocator allocator(executor); BufferAllocations allocations( {workspace, lhs, out, rhs, lhs_offset_0, lhs_offset_1}, 0, &allocator); Thunk::ExecuteParams params = Thunk::ExecuteParams::Create( run_options, allocations, stream.get(), stream.get(), nullptr, nullptr); Thunk::ExecutableSource source = {"", {}}; TF_ASSERT_OK(thunk.Initialize( {executor, source, &allocations, stream.get(), stream.get()})); TF_ASSERT_OK(thunk.ExecuteOnStream(params)); TF_ASSERT_OK(stream->BlockHostUntilDone()); std::vector<float> dst(1, 0); TF_ASSERT_OK(stream->Memcpy(dst.data(), out, out_length)); ASSERT_EQ(dst, std::vector<float>({9})); } TEST(DynamicSliceThunkTest, SlicedGemmArbitraryNumberOfArguments) { se::StreamExecutor* executor = GpuExecutor(); TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream()); int64_t lhs_length = sizeof(float) * 2 * 4; int64_t rhs_length = sizeof(float) * 3 * 1; int64_t out_length = sizeof(float) * 1 * 1; int64_t offset_length = sizeof(int64_t); std::vector<std::unique_ptr<BufferAllocation>> fake_allocations(4); fake_allocations.push_back( std::make_unique<BufferAllocation>(0, rhs_length, 0)); BufferAllocation::Slice slice_lhs_fake(fake_allocations.back().get(), 0, rhs_length); fake_allocations.push_back( std::make_unique<BufferAllocation>(1, rhs_length, 0)); BufferAllocation::Slice slice_rhs_fake(fake_allocations.back().get(), 0, rhs_length); fake_allocations.push_back( std::make_unique<BufferAllocation>(2, out_length, 0)); BufferAllocation::Slice slice_out_fake(fake_allocations.back().get(), 0, out_length); fake_allocations.push_back(std::make_unique<BufferAllocation>( 3, 1024 * 1024, 0)); BufferAllocation::Slice slice_workspace_fake(fake_allocations.back().get(), 0, 1024 * 1024); BufferAllocation alloc_lhs(7, lhs_length, 0); BufferAllocation::Slice slice_lhs(&alloc_lhs, 0, lhs_length); BufferAllocation alloc_rhs(3, rhs_length, 0); BufferAllocation::Slice slice_rhs(&alloc_rhs, 0, rhs_length); BufferAllocation alloc_out(2, out_length, 0); BufferAllocation::Slice slice_out(&alloc_out, 0, out_length); BufferAllocation alloc_workspace(0, 1024 * 1024, 0); BufferAllocation::Slice slice_workspace(&alloc_workspace, 0, 1024 * 1024); BufferAllocation alloc_lhs_offset_0(4, offset_length, 0); BufferAllocation::Slice slice_lhs_offset_0(&alloc_lhs_offset_0, 0, offset_length); BufferAllocation alloc_lhs_offset_1(5, offset_length, 0); BufferAllocation::Slice slice_lhs_offset_1(&alloc_lhs_offset_1, 0, offset_length); auto config = GemmConfig::For(ShapeUtil::MakeShape(PrimitiveType::F32, {1, 3}), {}, {1}, ShapeUtil::MakeShape(PrimitiveType::F32, {3, 1}), {}, {0}, ShapeUtil::MakeShape(PrimitiveType::F32, {1, 1}), 1.0, 0.0, 0.0, PrecisionConfig::ALG_UNSET, std::nullopt, se::blas::kDefaultComputePrecision, false, false); ASSERT_TRUE(config.ok()); ThunkSequence seq; seq.emplace_back(std::make_unique<GemmThunk>( Thunk::ThunkInfo(), config.value(), slice_lhs_fake, slice_rhs_fake, slice_out_fake, slice_workspace_fake, true)); std::vector<DynamicSliceThunk::Offset> lhs_offsets{slice_lhs_offset_0, slice_lhs_offset_1}; DynamicSliceThunk thunk( Thunk::ThunkInfo(), std::make_unique<ThunkSequence>(std::move(seq)), {slice_lhs, slice_rhs, slice_out, slice_workspace}, std::move(fake_allocations), {lhs_offsets, std::nullopt, std::nullopt, std::nullopt}, {ShapeUtil::MakeShape(PrimitiveType::F32, {2, 4}), std::nullopt, std::nullopt, std::nullopt}, {ShapeUtil::MakeShape(PrimitiveType::F32, {1, 3}), std::nullopt, std::nullopt, std::nullopt}, {sizeof(int64_t), std::nullopt, std::nullopt, std::nullopt}); se::DeviceMemory<float> lhs = executor->AllocateArray<float>(2 * 4); std::vector<float> lhs_arr{1, 2, 3, 4, 5, 6, 7, 8}; TF_ASSERT_OK(stream->Memcpy(&lhs, lhs_arr.data(), lhs_length)); se::DeviceMemory<float> rhs = executor->AllocateArray<float>(3 * 1); std::vector<float> rhs_arr(3, 1); TF_ASSERT_OK(stream->Memcpy(&rhs, rhs_arr.data(), rhs_length)); se::DeviceMemory<float> out = executor->AllocateArray<float>(1 * 1); TF_ASSERT_OK(stream->MemZero(&out, out_length)); se::DeviceMemory<float> workspace = executor->AllocateArray<float>(1024 * 1024); TF_ASSERT_OK(stream->MemZero(&workspace, 1024 * 1024)); se::DeviceMemory<int64_t> lhs_offset_0 = executor->AllocateArray<int64_t>(1); se::DeviceMemory<int64_t> lhs_offset_1 = executor->AllocateArray<int64_t>(1); std::vector<int64_t> lhs_offset_arr{0, 1}; TF_ASSERT_OK( stream->Memcpy(&lhs_offset_0, &lhs_offset_arr[0], offset_length)); TF_ASSERT_OK( stream->Memcpy(&lhs_offset_1, &lhs_offset_arr[1], offset_length)); ServiceExecutableRunOptions run_options; se::StreamExecutorMemoryAllocator allocator(executor); BufferAllocations allocations( {workspace, se::DeviceMemoryBase(), out, rhs, lhs_offset_0, lhs_offset_1, rhs, lhs}, 0, &allocator); Thunk::ExecuteParams params = Thunk::ExecuteParams::Create( run_options, allocations, stream.get(), stream.get(), nullptr, nullptr); Thunk::ExecutableSource source = {"", {}}; TF_ASSERT_OK(thunk.Initialize( {executor, source, &allocations, stream.get(), stream.get()})); TF_ASSERT_OK(thunk.ExecuteOnStream(params)); TF_ASSERT_OK(stream->BlockHostUntilDone()); std::vector<float> dst(1, 0); TF_ASSERT_OK(stream->Memcpy(dst.data(), out, out_length)); ASSERT_EQ(dst, std::vector<float>({9})); } TEST(DynamicSliceThunkTest, SlicedTupledOperandGemm) { se::StreamExecutor* executor = GpuExecutor(); TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream()); int64_t lhs_length = sizeof(float) * 2 * 4; int64_t rhs_length = sizeof(float) * 3 * 1; int64_t out_length = sizeof(float) * 1 * 1; int64_t offset_length = sizeof(int64_t); std::vector<std::unique_ptr<BufferAllocation>> fake_allocations(4); fake_allocations.push_back( std::make_unique<BufferAllocation>(0, rhs_length, 0)); BufferAllocation::Slice slice_lhs_fake(fake_allocations.back().get(), 0, rhs_length); BufferAllocation alloc_lhs(0, 3 * lhs_length, 0); BufferAllocation::Slice slice_lhs(&alloc_lhs, lhs_length, lhs_length); fake_allocations.push_back( std::make_unique<BufferAllocation>(1, rhs_length, 0)); BufferAllocation::Slice slice_rhs(fake_allocations.back().get(), 0, rhs_length); fake_allocations.push_back( std::make_unique<BufferAllocation>(2, out_length, 0)); BufferAllocation::Slice slice_out(fake_allocations.back().get(), 0, out_length); fake_allocations.push_back(std::make_unique<BufferAllocation>( 3, 1024 * 1024, 0)); BufferAllocation::Slice slice_workspace(fake_allocations.back().get(), 0, 1024 * 1024); BufferAllocation alloc_lhs_offset_0(4, offset_length, 0); BufferAllocation::Slice slice_lhs_offset_0(&alloc_lhs_offset_0, 0, offset_length); BufferAllocation alloc_lhs_offset_1(5, offset_length, 0); BufferAllocation::Slice slice_lhs_offset_1(&alloc_lhs_offset_1, 0, offset_length); auto config = GemmConfig::For(ShapeUtil::MakeShape(PrimitiveType::F32, {1, 3}), {}, {1}, ShapeUtil::MakeShape(PrimitiveType::F32, {3, 1}), {}, {0}, ShapeUtil::MakeShape(PrimitiveType::F32, {1, 1}), 1.0, 0.0, 0.0, PrecisionConfig::ALG_UNSET, std::nullopt, se::blas::kDefaultComputePrecision, false, false); ASSERT_TRUE(config.ok()); ThunkSequence seq; seq.emplace_back(std::make_unique<GemmThunk>( Thunk::ThunkInfo(), config.value(), slice_lhs_fake, slice_rhs, slice_out, slice_workspace, true)); std::vector<DynamicSliceThunk::Offset> lhs_offsets{slice_lhs_offset_0, slice_lhs_offset_1}; DynamicSliceThunk thunk( Thunk::ThunkInfo(), std::make_unique<ThunkSequence>(std::move(seq)), {slice_lhs, slice_rhs, slice_out, slice_workspace}, std::move(fake_allocations), {lhs_offsets, std::nullopt, std::nullopt, std::nullopt}, {ShapeUtil::MakeShape(PrimitiveType::F32, {2, 4}), std::nullopt, std::nullopt, std::nullopt}, {ShapeUtil::MakeShape(PrimitiveType::F32, {1, 3}), std::nullopt, std::nullopt, std::nullopt}, {sizeof(int64_t), std::nullopt, std::nullopt, std::nullopt}); se::DeviceMemory<float> lhs_whole_buffer = executor->AllocateArray<float>(2 * 4 * 3); TF_ASSERT_OK(stream->MemZero(&lhs_whole_buffer, 2 * 4 * 3)); std::vector<float> lhs_arr{1, 2, 3, 4, 5, 6, 7, 8}; se::DeviceMemoryBase lhs = lhs_whole_buffer.GetByteSlice(lhs_length, lhs_length); TF_ASSERT_OK(stream->Memcpy(&lhs, lhs_arr.data(), lhs_length)); se::DeviceMemory<float> rhs = executor->AllocateArray<float>(3 * 1); std::vector<float> rhs_arr(3, 1); TF_ASSERT_OK(stream->Memcpy(&rhs, rhs_arr.data(), rhs_length)); se::DeviceMemory<float> out = executor->AllocateArray<float>(1 * 1); TF_ASSERT_OK(stream->MemZero(&out, out_length)); se::DeviceMemory<float> workspace = executor->AllocateArray<float>(1024 * 1024); TF_ASSERT_OK(stream->MemZero(&workspace, 1024 * 1024)); se::DeviceMemory<int64_t> lhs_offset_0 = executor->AllocateArray<int64_t>(1); se::DeviceMemory<int64_t> lhs_offset_1 = executor->AllocateArray<int64_t>(1); std::vector<int64_t> lhs_offset_arr{0, 1}; TF_ASSERT_OK( stream->Memcpy(&lhs_offset_0, &lhs_offset_arr[0], offset_length)); TF_ASSERT_OK( stream->Memcpy(&lhs_offset_1, &lhs_offset_arr[1], offset_length)); ServiceExecutableRunOptions run_options; se::StreamExecutorMemoryAllocator allocator(executor); BufferAllocations allocations( {lhs_whole_buffer, rhs, out, workspace, lhs_offset_0, lhs_offset_1}, 0, &allocator); Thunk::ExecuteParams params = Thunk::ExecuteParams::Create( run_options, allocations, stream.get(), stream.get(), nullptr, nullptr); Thunk::ExecutableSource source = {"", {}}; TF_ASSERT_OK(thunk.Initialize( {executor, source, &allocations, stream.get(), stream.get()})); TF_ASSERT_OK(thunk.ExecuteOnStream(params)); TF_ASSERT_OK(stream->BlockHostUntilDone()); std::vector<float> dst(1, 0); TF_ASSERT_OK(stream->Memcpy(dst.data(), out, out_length)); ASSERT_EQ(dst, std::vector<float>({9})); } TEST(DynamicSliceThunkTest, SlicedMemcpyOOB) { se::StreamExecutor* executor = GpuExecutor(); TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream()); int64_t src_count = 8 * 8 * 10 * 2; int64_t dst_count = 2 * 2 * 2 * 2; int64_t slice_count = 2 * 2; int64_t src_length = sizeof(int32_t) * src_count; int64_t dst_length = sizeof(int32_t) * dst_count; int64_t offset_length = sizeof(int64_t); int64_t slice_length = sizeof(int32_t) * slice_count; std::vector<std::unique_ptr<BufferAllocation>> fake_allocations(2); fake_allocations.push_back(std::make_unique<BufferAllocation>( 0, slice_length, 0)); BufferAllocation::Slice slice_src_fake(fake_allocations.back().get(), 0, slice_length); fake_allocations.push_back(std::make_unique<BufferAllocation>( 1, slice_length, 0)); BufferAllocation::Slice slice_dst_fake(fake_allocations.back().get(), 0, slice_length); BufferAllocation alloc_src(0, src_length, 0); BufferAllocation::Slice slice_src(&alloc_src, 0, src_length); BufferAllocation alloc_dst(1, dst_length, 0); BufferAllocation::Slice slice_dst(&alloc_dst, 0, dst_length); BufferAllocation alloc_src_offset_0(2, offset_length, 0); BufferAllocation::Slice slice_src_offset_0(&alloc_src_offset_0, 0, offset_length); BufferAllocation alloc_src_offset_1(3, offset_length, 0); BufferAllocation::Slice slice_src_offset_1(&alloc_src_offset_1, 0, offset_length); BufferAllocation alloc_src_offset_2(4, offset_length, 0); BufferAllocation::Slice slice_src_offset_2(&alloc_src_offset_2, 0, offset_length); BufferAllocation alloc_src_offset_3(5, offset_length, 0); BufferAllocation::Slice slice_src_offset_3(&alloc_src_offset_3, 0, offset_length); BufferAllocation alloc_dst_offset_0(6, offset_length, 0); BufferAllocation::Slice slice_dst_offset_0(&alloc_dst_offset_0, 0, offset_length); BufferAllocation alloc_dst_offset_1(7, offset_length, 0); BufferAllocation::Slice slice_dst_offset_1(&alloc_dst_offset_1, 0, offset_length); BufferAllocation alloc_dst_offset_2(8, offset_length, 0); BufferAllocation::Slice slice_dst_offset_2(&alloc_dst_offset_2, 0, offset_length); BufferAllocation alloc_dst_offset_3(9, offset_length, 0); BufferAllocation::Slice slice_dst_offset_3(&alloc_dst_offset_3, 0, offset_length); auto registration = xla::ffi::FindHandler("__xla_test$$memcpy", PLATFORM); ASSERT_TRUE(registration.ok()); std::vector<std::optional<CustomCallThunk::Slice>> operands{ CustomCallThunk::Slice{slice_src_fake, ShapeUtil::MakeShape(PrimitiveType::S32, {2, 2})}}; std::vector<std::optional<CustomCallThunk::Slice>> results{ CustomCallThunk::Slice{slice_dst_fake, ShapeUtil::MakeShape(PrimitiveType::S32, {2, 2})}}; ThunkSequence seq; TF_ASSERT_OK_AND_ASSIGN( seq.emplace_back(), CustomCallThunk::Create(Thunk::ThunkInfo(), registration->bundle, operands, results, CustomCallThunk::AttributesMap(), nullptr)); std::vector<DynamicSliceThunk::Offset> slice_src_offsets{ slice_src_offset_0, slice_src_offset_1, slice_src_offset_2, slice_src_offset_3}; std::vector<DynamicSliceThunk::Offset> slice_dst_offsets{ slice_dst_offset_0, slice_dst_offset_1, slice_dst_offset_2, slice_dst_offset_3}; DynamicSliceThunk thunk( Thunk::ThunkInfo(), std::make_unique<ThunkSequence>(std::move(seq)), {slice_src, slice_dst}, std::move(fake_allocations), {slice_src_offsets, slice_dst_offsets}, {ShapeUtil::MakeShape(PrimitiveType::S32, {8, 8, 10, 2}), ShapeUtil::MakeShape(PrimitiveType::S32, {2, 2, 2, 2})}, {ShapeUtil::MakeShape(PrimitiveType::S32, {1, 1, 2, 2}), ShapeUtil::MakeShape(PrimitiveType::S32, {1, 1, 2, 2})}, {sizeof(int64_t), sizeof(int64_t)}); se::DeviceMemory<int32_t> src = executor->AllocateArray<int32_t>(src_count); std::vector<int32_t> src_arr(src_count, 0); for (unsigned i = 0; i < src_count; ++i) src_arr[i] = i; TF_ASSERT_OK(stream->Memcpy(&src, src_arr.data(), src_length)); se::DeviceMemory<int32_t> dst = executor->AllocateArray<int32_t>(dst_count); TF_ASSERT_OK(stream->MemZero(&dst, dst_length)); se::DeviceMemory<int64_t> src_offset_0 = executor->AllocateArray<int64_t>(1); se::DeviceMemory<int64_t> src_offset_1 = executor->AllocateArray<int64_t>(1); se::DeviceMemory<int64_t> src_offset_2 = executor->AllocateArray<int64_t>(1); se::DeviceMemory<int64_t> src_offset_3 = executor->AllocateArray<int64_t>(1); std::vector<int64_t> src_ref_offset_arr{3, 5, 2, 0}; std::vector<int64_t> src_offset_arr{3, 5, 2, -3}; TF_ASSERT_OK( stream->Memcpy(&src_offset_0, &src_offset_arr[0], offset_length)); TF_ASSERT_OK( stream->Memcpy(&src_offset_1, &src_offset_arr[1], offset_length)); TF_ASSERT_OK( stream->Memcpy(&src_offset_2, &src_offset_arr[2], offset_length)); TF_ASSERT_OK( stream->Memcpy(&src_offset_3, &src_offset_arr[3], offset_length)); se::DeviceMemory<int64_t> dst_offset_0 = executor->AllocateArray<int64_t>(1); se::DeviceMemory<int64_t> dst_offset_1 = executor->AllocateArray<int64_t>(1); se::DeviceMemory<int64_t> dst_offset_2 = executor->AllocateArray<int64_t>(1); se::DeviceMemory<int64_t> dst_offset_3 = executor->AllocateArray<int64_t>(1); std::vector<int64_t> dst_ref_offset_arr{1, 1, 0, 0}; std::vector<int64_t> dst_offset_arr{3, 2, 5, -4}; TF_ASSERT_OK( stream->Memcpy(&dst_offset_0, &dst_offset_arr[0], offset_length)); TF_ASSERT_OK( stream->Memcpy(&dst_offset_1, &dst_offset_arr[1], offset_length)); TF_ASSERT_OK( stream->Memcpy(&dst_offset_2, &dst_offset_arr[2], offset_length)); TF_ASSERT_OK( stream->Memcpy(&dst_offset_3, &dst_offset_arr[3], offset_length)); ServiceExecutableRunOptions run_options; se::StreamExecutorMemoryAllocator allocator(executor); BufferAllocations allocations( {src, dst, src_offset_0, src_offset_1, src_offset_2, src_offset_3, dst_offset_0, dst_offset_1, dst_offset_2, dst_offset_3}, 0, &allocator); Thunk::ExecuteParams params = Thunk::ExecuteParams::Create( run_options, allocations, stream.get(), stream.get(), nullptr, nullptr); Thunk::ExecutableSource source = {"", {}}; TF_ASSERT_OK(thunk.Initialize( {executor, source, &allocations, stream.get(), stream.get()})); TF_ASSERT_OK(thunk.ExecuteOnStream(params)); TF_ASSERT_OK(stream->BlockHostUntilDone()); std::vector<int32_t> out(dst_count, 0); TF_ASSERT_OK(stream->Memcpy(out.data(), dst, dst_length)); std::vector<int32_t> ref(dst_count, 0); int64_t src_offset_val = src_ref_offset_arr[3] + 2 * (src_ref_offset_arr[2] + 10 * (src_ref_offset_arr[1] + 8 * src_ref_offset_arr[0])); int64_t dst_offset_val = dst_ref_offset_arr[3] + 2 * (dst_ref_offset_arr[2] + 2 * (dst_ref_offset_arr[1] + 2 * dst_ref_offset_arr[0])); std::copy(src_arr.begin() + src_offset_val, src_arr.begin() + src_offset_val + slice_count, ref.begin() + dst_offset_val); ASSERT_EQ(out, ref); } TEST(DynamicSliceThunkTest, SlicedOperandsSameBufferGemm) { se::StreamExecutor* executor = GpuExecutor(); TF_ASSERT_OK_AND_ASSIGN(auto stream, executor->CreateStream()); int64_t lhs_length = sizeof(float) * 2 * 4; int64_t rhs_length = sizeof(float) * 3 * 1; int64_t out_length = sizeof(float) * 1 * 1; int64_t offset_length = sizeof(int64_t); std::vector<std::unique_ptr<BufferAllocation>> fake_allocations(4); fake_allocations.push_back( std::make_unique<BufferAllocation>(0, rhs_length, 0)); BufferAllocation::Slice slice_lhs_fake(fake_allocations.back().get(), 0, rhs_length); fake_allocations.push_back( std::make_unique<BufferAllocation>(1, rhs_length, 0)); BufferAllocation::Slice slice_rhs_fake(fake_allocations.back().get(), 0, rhs_length); fake_allocations.push_back( std::make_unique<BufferAllocation>(2, out_length, 0)); BufferAllocation::Slice slice_out_fake(fake_allocations.back().get(), 0, out_length); fake_allocations.push_back(std::make_unique<BufferAllocation>( 3, 1024 * 1024, 0)); BufferAllocation::Slice slice_workspace_fake(fake_allocations.back().get(), 0, 1024 * 1024); BufferAllocation alloc(0, lhs_length + rhs_length + out_length, 0); BufferAllocation::Slice slice_lhs(&alloc, 0, lhs_length); BufferAllocation::Slice slice_rhs(&alloc, lhs_length, rhs_length); BufferAllocation::Slice slice_out(&alloc, lhs_length + rhs_length, out_length); BufferAllocation alloc_workspace(1, 1024 * 1024, 0); BufferAllocation::Slice slice_workspace(&alloc_workspace, 0, 1024 * 1024); BufferAllocation alloc_lhs_offset_0(2, offset_length, 0); BufferAllocation::Slice slice_lhs_offset_0(&alloc_lhs_offset_0, 0, offset_length); BufferAllocation alloc_lhs_offset_1(3, offset_length, 0); BufferAllocation::Slice slice_lhs_offset_1(&alloc_lhs_offset_1, 0, offset_length); auto config = GemmConfig::For(ShapeUtil::MakeShape(PrimitiveType::F32, {1, 3}), {}, {1}, ShapeUtil::MakeShape(PrimitiveType::F32, {3, 1}), {}, {0}, ShapeUtil::MakeShape(PrimitiveType::F32, {1, 1}), 1.0, 0.0, 0.0, PrecisionConfig::ALG_UNSET, std::nullopt, se::blas::kDefaultComputePrecision, false, false); ASSERT_TRUE(config.ok()); ThunkSequence seq; seq.emplace_back(std::make_unique<GemmThunk>( Thunk::ThunkInfo(), config.value(), slice_lhs_fake, slice_rhs_fake, slice_out_fake, slice_workspace_fake, true)); std::vector<DynamicSliceThunk::Offset> lhs_offsets{slice_lhs_offset_0, slice_lhs_offset_1}; DynamicSliceThunk thunk( Thunk::ThunkInfo(), std::make_unique<ThunkSequence>(std::move(seq)), {slice_lhs, slice_rhs, slice_out, slice_workspace}, std::move(fake_allocations), {lhs_offsets, std::nullopt, std::nullopt, std::nullopt}, {ShapeUtil::MakeShape(PrimitiveType::F32, {2, 4}), std::nullopt, std::nullopt, std::nullopt}, {ShapeUtil::MakeShape(PrimitiveType::F32, {1, 3}), std::nullopt, std::nullopt, std::nullopt}, {sizeof(int64_t), std::nullopt, std::nullopt, std::nullopt}); se::DeviceMemory<float> buffer = executor->AllocateArray<float>(lhs_length + rhs_length + out_length); TF_ASSERT_OK(stream->MemZero(&buffer, lhs_length + rhs_length + out_length)); se::DeviceMemoryBase lhs = buffer.GetByteSlice(0, lhs_length); std::vector<float> lhs_arr{1, 2, 3, 4, 5, 6, 7, 8}; TF_ASSERT_OK(stream->Memcpy(&lhs, lhs_arr.data(), lhs_length)); se::DeviceMemoryBase rhs = buffer.GetByteSlice(lhs_length, rhs_length); std::vector<float> rhs_arr(3, 1); TF_ASSERT_OK(stream->Memcpy(&rhs, rhs_arr.data(), rhs_length)); se::DeviceMemoryBase out = buffer.GetByteSlice(lhs_length + rhs_length, out_length); se::DeviceMemory<float> workspace = executor->AllocateArray<float>(1024 * 1024); TF_ASSERT_OK(stream->MemZero(&workspace, 1024 * 1024)); se::DeviceMemory<int64_t> lhs_offset_0 = executor->AllocateArray<int64_t>(1); se::DeviceMemory<int64_t> lhs_offset_1 = executor->AllocateArray<int64_t>(1); std::vector<int64_t> lhs_offset_arr{0, 1}; TF_ASSERT_OK( stream->Memcpy(&lhs_offset_0, &lhs_offset_arr[0], offset_length)); TF_ASSERT_OK( stream->Memcpy(&lhs_offset_1, &lhs_offset_arr[1], offset_length)); ServiceExecutableRunOptions run_options; se::StreamExecutorMemoryAllocator allocator(executor); BufferAllocations allocations({buffer, workspace, lhs_offset_0, lhs_offset_1}, 0, &allocator); Thunk::ExecuteParams params = Thunk::ExecuteParams::Create( run_options, allocations, stream.get(), stream.get(), nullptr, nullptr); Thunk::ExecutableSource source = {"", {}}; TF_ASSERT_OK(thunk.Initialize( {executor, source, &allocations, stream.get(), stream.get()})); TF_ASSERT_OK(thunk.ExecuteOnStream(params)); TF_ASSERT_OK(stream->BlockHostUntilDone()); std::vector<float> dst(1, 0); TF_ASSERT_OK(stream->Memcpy(dst.data(), out, out_length)); ASSERT_EQ(dst, std::vector<float>({9})); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/runtime/dynamic_slice_thunk.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/runtime/dynamic_slice_thunk_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
3bb723b8-5bd7-4924-90c9-66c1ee244de9
cpp
tensorflow/tensorflow
all_gather_optimizer
third_party/xla/xla/service/gpu/transforms/all_gather_optimizer.cc
third_party/xla/xla/service/gpu/transforms/all_gather_optimizer_test.cc
#include "xla/service/gpu/transforms/all_gather_optimizer.h" #include <cstdint> #include <utility> #include "absl/container/flat_hash_set.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/collective_ops_utils.h" #include "xla/shape_util.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" namespace xla { namespace gpu { absl::StatusOr<bool> AllGatherOptimizer::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool changed = false; for (HloComputation* computation : module->MakeNonfusionComputations(execution_threads)) { for (HloInstruction* instruction : computation->MakeInstructionPostOrder()) { if (!HloOpcodeIsBinaryCommutative(instruction->opcode())) { continue; } HloInstruction* left_op = instruction->mutable_operand(0); HloInstruction* right_op = instruction->mutable_operand(1); if (right_op->opcode() != HloOpcode::kAllGather || left_op->opcode() != HloOpcode::kAllGather) { VLOG(2) << "Binary op's operands are not all-gather deduced types."; continue; } auto* left_all_gather = Cast<HloAllGatherInstruction>(left_op); auto* right_all_gather = Cast<HloAllGatherInstruction>(right_op); if (right_all_gather->constrain_layout() != left_all_gather->constrain_layout() || right_all_gather->use_global_device_ids() != left_all_gather->use_global_device_ids() || !ReplicaGroupsEqual(right_all_gather->replica_groups(), left_all_gather->replica_groups())) { VLOG(2) << "The right and left all-gather ops are not compatible " "to merge. "; continue; } if (!ShapeUtil::Equal(left_all_gather->operand(0)->shape(), right_all_gather->operand(0)->shape())) { VLOG(2) << "all-gather operands have different shapes"; continue; } if (right_all_gather->user_count() != 1 || left_all_gather->user_count() != 1) { VLOG(2) << "all-gather user_count > 1 "; continue; } auto index_in_full_shape = computation->AddInstruction(HloInstruction::CreateBinary( right_all_gather->operand(0)->shape(), instruction->opcode(), left_all_gather->mutable_operand(0), right_all_gather->mutable_operand(0))); int64_t all_gather_dimension = Cast<HloAllGatherInstruction>(right_all_gather) ->all_gather_dimension(); auto combined = HloInstruction::CreateAllGather( left_all_gather->shape(), {index_in_full_shape}, all_gather_dimension, left_all_gather->device_list(), false, left_all_gather->channel_id(), Cast<HloAllGatherInstruction>(left_all_gather) ->use_global_device_ids()); TF_RETURN_IF_ERROR(computation->ReplaceWithNewInstruction( instruction, std::move(combined))); changed = true; } } return changed; } } }
#include "xla/service/gpu/transforms/all_gather_optimizer.h" #include <cstddef> #include <cstdint> #include <memory> #include <utility> #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/hlo_module_config.h" #include "xla/tests/hlo_test_base.h" #include "xla/util.h" #include "tsl/platform/statusor.h" #include "tsl/platform/test.h" namespace xla { namespace gpu { namespace { class GpuAllGatherOptimizerTest : public HloTestBase { public: absl::StatusOr<std::unique_ptr<HloModule>> RunPass( absl::string_view hlo_module, int64_t num_replicas, int64_t num_partitions, bool expect_change) { HloModuleConfig config = GetModuleConfigForTest( num_replicas, num_partitions); config.set_use_spmd_partitioning(num_partitions > 1); TF_ASSIGN_OR_RETURN(auto module, ParseAndReturnVerifiedModule(hlo_module, config)); auto changed = AllGatherOptimizer().Run(module.get()); if (!changed.ok()) { return changed.status(); } EXPECT_EQ(changed.value(), expect_change); return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(module)); } template <HloOpcode oc> size_t CollectiveCount(std::unique_ptr<HloModule> &module) { return absl::c_count_if(module->entry_computation()->instructions(), HloPredicateIsOp<oc>); } }; TEST_F(GpuAllGatherOptimizerTest, BranchesOptimized) { absl::string_view hlo_string = R"( HloModule ReduceScatter add { x = bf16[] parameter(0) y = bf16[] parameter(1) ROOT add = bf16[] add(x, y) } ENTRY main { param.1 = bf16[8,128,1024]{2,1,0} parameter(0) param.2 = bf16[8,128,1024]{2,1,0} parameter(1) reduce-scatter.1 = bf16[8,64,1024]{2,1,0} reduce-scatter(param.1), channel_id=8, replica_groups={{0,1},{2,3},{4,5},{6,7}}, use_global_device_ids=true, dimensions={1}, to_apply=add all-gather.1 = bf16[8,128,1024]{2,1,0} all-gather(reduce-scatter.1), channel_id=5, replica_groups={{0,1},{2,3},{4,5},{6,7}}, dimensions={1}, use_global_device_ids=true reduce-scatter.2 = bf16[8,64,1024]{2,1,0} reduce-scatter(param.2), channel_id=9, replica_groups={{0,1},{2,3},{4,5},{6,7}}, use_global_device_ids=true, dimensions={1}, to_apply=add all-gather.2 = bf16[8,128,1024]{2,1,0} all-gather(reduce-scatter.2), channel_id=5, replica_groups={{0,1},{2,3},{4,5},{6,7}}, dimensions={1}, use_global_device_ids=true add.1 = bf16[8,128,1024]{2,1,0} add(all-gather.1, all-gather.2) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, 8, 1, true)); EXPECT_EQ(CollectiveCount<HloOpcode::kAllGather>(module), 3); EXPECT_EQ(CollectiveCount<HloOpcode::kReduceScatter>(module), 2); } TEST_F(GpuAllGatherOptimizerTest, DisbledSPMDPartitioningJAXBug) { absl::string_view hlo_string = R"( HloModule pjit_f, entry_computation_layout={(f32[4,8]{1,0}, f32[4,8]{1,0})->f32[8,8]{1,0}} ENTRY %main.6_spmd (param: f32[4,8], param.1: f32[4,8]) -> f32[8,8] { %param = f32[4,8]{1,0} parameter(0), sharding={devices=[2,1]<=[2]} %all-gather = f32[8,8]{1,0} all-gather(f32[4,8]{1,0} %param), channel_id=1, replica_groups={{0,1}}, dimensions={0}, use_global_device_ids=true, metadata={op_name="pjit(f)/jit(main)/add" source_file="third_party/py/jax/tests/pjit_test.py" source_line=207} %param.1 = f32[4,8]{1,0} parameter(1), sharding={devices=[2,1]<=[2]} %all-gather.1 = f32[8,8]{1,0} all-gather(f32[4,8]{1,0} %param.1), channel_id=2, replica_groups={{0,1}}, dimensions={0}, use_global_device_ids=true, metadata={op_name="pjit(f)/jit(main)/add" source_file="third_party/py/jax/tests/pjit_test.py" source_line=207} ROOT %add.0 = f32[8,8]{1,0} add(f32[8,8]{1,0} %all-gather, f32[8,8]{1,0} %all-gather.1), metadata={op_name="pjit(f)/jit(main)/add" source_file="third_party/py/jax/tests/pjit_test.py" source_line=207} } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, 1, 2, true)); EXPECT_EQ(CollectiveCount<HloOpcode::kAllGather>(module), 1); } TEST_F(GpuAllGatherOptimizerTest, MoreThanSingleUserForAllGather) { absl::string_view hlo_string = R"( HloModule ReduceScatter add { x = bf16[] parameter(0) y = bf16[] parameter(1) ROOT add = bf16[] add(x, y) } ENTRY main { param.1 = bf16[8,128,1024]{2,1,0} parameter(0) param.2 = bf16[8,128,1024]{2,1,0} parameter(1) param.3 = bf16[8,128,1024]{2,1,0} parameter(2) reduce-scatter.1 = bf16[8,64,1024]{2,1,0} reduce-scatter(param.1), channel_id=8, replica_groups={{0,1},{2,3},{4,5},{6,7}}, use_global_device_ids=true, dimensions={1}, to_apply=add all-gather.1 = bf16[8,128,1024]{2,1,0} all-gather(reduce-scatter.1), channel_id=5, replica_groups={{0,1},{2,3},{4,5},{6,7}}, dimensions={1}, use_global_device_ids=true reduce-scatter.2 = bf16[8,64,1024]{2,1,0} reduce-scatter(param.2), channel_id=9, replica_groups={{0,1},{2,3},{4,5},{6,7}}, use_global_device_ids=true, dimensions={1}, to_apply=add all-gather.2 = bf16[8,128,1024]{2,1,0} all-gather(reduce-scatter.2), channel_id=5, replica_groups={{0,1},{2,3},{4,5},{6,7}}, dimensions={1}, use_global_device_ids=true reduce-scatter.3 = bf16[8,64,1024]{2,1,0} reduce-scatter(param.3), channel_id=9, replica_groups={{0,1},{2,3},{4,5},{6,7}}, use_global_device_ids=true, dimensions={1}, to_apply=add all-gather.3 = bf16[8,128,1024]{2,1,0} all-gather(reduce-scatter.3), channel_id=5, replica_groups={{0,1},{2,3},{4,5},{6,7}}, dimensions={1}, use_global_device_ids=true add.1 = bf16[8,128,1024]{2,1,0} add(all-gather.1, all-gather.3) add.2 = bf16[8,128,1024]{2,1,0} add(all-gather.1, all-gather.2) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, 8, 1, false)); EXPECT_EQ(CollectiveCount<HloOpcode::kAllGather>(module), 3); EXPECT_EQ(CollectiveCount<HloOpcode::kReduceScatter>(module), 3); } TEST_F(GpuAllGatherOptimizerTest, AllGatherWithOpInBetweenOnRightBranch) { absl::string_view hlo_string = R"( HloModule ReduceScatter add { x = bf16[] parameter(0) y = bf16[] parameter(1) ROOT add = bf16[] add(x, y) } ENTRY main { param.1 = bf16[8,128,1024]{2,1,0} parameter(0) param.2 = bf16[8,128,1024]{2,1,0} parameter(1) param.3 = bf16[8,128,1024]{2,1,0} parameter(2) reduce-scatter.1 = bf16[8,64,1024]{2,1,0} reduce-scatter(param.1), channel_id=8, replica_groups={{0,1},{2,3},{4,5},{6,7}}, use_global_device_ids=true, dimensions={1}, to_apply=add reduce-scatter.2 = bf16[8,64,1024]{2,1,0} reduce-scatter(param.2), channel_id=9, replica_groups={{0,1},{2,3},{4,5},{6,7}}, use_global_device_ids=true, dimensions={1}, to_apply=add add.1 = bf16[8,64,1024]{2,1,0} add(reduce-scatter.1, reduce-scatter.2) all-gather.1 = bf16[8,128,1024]{2,1,0} all-gather(add.1), channel_id=5, replica_groups={{0,1},{2,3},{4,5},{6,7}}, dimensions={1}, use_global_device_ids=true reduce-scatter.3 = bf16[8,64,1024]{2,1,0} reduce-scatter(param.3), channel_id=9, replica_groups={{0,1},{2,3},{4,5},{6,7}}, use_global_device_ids=true, dimensions={1}, to_apply=add all-gather.3 = bf16[8,128,1024]{2,1,0} all-gather(reduce-scatter.3), channel_id=5, replica_groups={{0,1},{2,3},{4,5},{6,7}}, dimensions={1}, use_global_device_ids=true add.2 = bf16[8,128,1024]{2,1,0} add(all-gather.1, all-gather.3) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, 8, 1, true)); EXPECT_EQ(CollectiveCount<HloOpcode::kAllGather>(module), 3); EXPECT_EQ(CollectiveCount<HloOpcode::kReduceScatter>(module), 3); } TEST_F(GpuAllGatherOptimizerTest, AllGatherOneSided) { absl::string_view hlo_string = R"( HloModule ReduceScatter add { x = bf16[] parameter(0) y = bf16[] parameter(1) ROOT add = bf16[] add(x, y) } ENTRY main { param.1 = bf16[8,128,1024]{2,1,0} parameter(0) param.2 = bf16[8,128,1024]{2,1,0} parameter(1) param.3 = bf16[8,128,1024]{2,1,0} parameter(2) add.1 = bf16[8,128,1024]{2,1,0} add(param.1, param.2) reduce-scatter = bf16[8,64,1024]{2,1,0} reduce-scatter(param.3), channel_id=9, replica_groups={{0,1},{2,3},{4,5},{6,7}}, use_global_device_ids=true, dimensions={1}, to_apply=add all-gather = bf16[8,128,1024]{2,1,0} all-gather(reduce-scatter), channel_id=5, replica_groups={{0,1},{2,3},{4,5},{6,7}}, dimensions={1}, use_global_device_ids=true add.2 = bf16[8,128,1024]{2,1,0} add(all-gather, add.1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, 8, 1, false)); EXPECT_EQ(CollectiveCount<HloOpcode::kAllGather>(module), 1); EXPECT_EQ(CollectiveCount<HloOpcode::kReduceScatter>(module), 1); } TEST_F(GpuAllGatherOptimizerTest, DifferentOperandShapes) { absl::string_view hlo_string = R"( HloModule TestModule ENTRY main { param.1 = bf16[8,64,128]{2,1,0} parameter(0) param.2 = bf16[8,128,64]{2,1,0} parameter(1) all-gather.1 = bf16[8,128,128]{2,1,0} all-gather(param.1), channel_id=5, replica_groups={{0,1},{2,3},{4,5},{6,7}}, dimensions={1}, use_global_device_ids=true all-gather.2 = bf16[8,128,128]{2,1,0} all-gather(param.2), channel_id=5, replica_groups={{0,1},{2,3},{4,5},{6,7}}, dimensions={2}, use_global_device_ids=true add.1 = bf16[8,128,128]{2,1,0} add(all-gather.1, all-gather.2) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, 8, 1, false)); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/all_gather_optimizer.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/all_gather_optimizer_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
11330dda-2940-4ffb-b703-255d358d435f
cpp
tensorflow/tensorflow
cudnn_fused_mha_rewriter
third_party/xla/xla/service/gpu/transforms/cudnn_fused_mha_rewriter.cc
third_party/xla/xla/service/gpu/transforms/cudnn_fused_mha_rewriter_test.cc
#include "xla/service/gpu/transforms/cudnn_fused_mha_rewriter.h" #include <algorithm> #include <cstdint> #include <numeric> #include <optional> #include <queue> #include <string> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_format.h" #include "absl/strings/str_join.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/permutation_util.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/cublas_cudnn.h" #include "xla/service/gpu/matmul_utils.h" #include "xla/service/gpu/stream_executor_util.h" #include "xla/service/pattern_matcher.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/status_macros.h" #include "xla/stream_executor/device_description.h" #include "xla/stream_executor/dnn.h" #include "xla/types.h" #include "xla/util.h" #include "xla/xla.pb.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" #if GOOGLE_CUDA #include "third_party/gpus/cuda/include/cuda.h" #endif namespace xla { namespace gpu { namespace { namespace m = match; struct MatchFwdResult { HloInstruction* matched_bmm_1 = nullptr; HloInstruction* matched_bmm_2 = nullptr; HloInstruction* matched_bias = nullptr; HloInstruction* matched_scale = nullptr; HloInstruction* matched_softmax_input = nullptr; HloInstruction* matched_reduce_sum = nullptr; double matched_dropout_rate = 0.0; bool need_canonicalization = false; bool is_training = false; bool is_causal_mask = false; bool has_match = false; std::string matched_custom_call_name; }; struct MatchBwdResult { HloInstruction* matched_bmm_1_grad_1 = nullptr; HloInstruction* matched_bmm_1_grad_2 = nullptr; HloInstruction* matched_bmm_2_grad_1 = nullptr; HloInstruction* matched_bmm_2_grad_2 = nullptr; HloInstruction* matched_dbias = nullptr; bool bmm_1_grad_1_need_canonicalization = false; bool bmm_1_grad_2_need_canonicalization = false; bool bmm_2_grad_1_need_canonicalization = false; bool bmm_2_grad_2_need_canonicalization = false; bool has_match = false; std::string matched_custom_call_name; }; template <typename Pattern> auto OptionalReshape(Pattern pattern) { auto shared = m::SharedSubpattern(pattern); return m::AnyOf<HloInstruction>(m::Reshape(shared), shared); } template <typename Pattern> auto OptionalConvert(Pattern pattern) { auto shared = m::SharedSubpattern(pattern); return m::AnyOf<HloInstruction>(m::Convert(shared), shared); } template <typename Pattern> auto OptionalBitcast(Pattern pattern) { auto shared = m::SharedSubpattern(pattern); return m::AnyOf<HloInstruction>(m::Bitcast(shared), shared); } template <typename Pattern> auto OptionalBroadcast(Pattern pattern) { auto shared = m::SharedSubpattern(pattern); return m::AnyOf<HloInstruction>(m::Broadcast(shared), shared); } bool IsBatchedMatmul(const HloInstruction* instr) { if (instr->opcode() != HloOpcode::kDot) return false; if (Cast<HloDotInstruction>(instr)->sparse_operands()) return false; const DotDimensionNumbers& dot_dims = instr->dot_dimension_numbers(); bool is_batch_dot = !dot_dims.lhs_batch_dimensions().empty() || !dot_dims.rhs_batch_dimensions().empty(); return is_batch_dot; } bool IsSharingOperandWithFwdMha(HloInstruction* gemm) { for (int64_t i = 0; i < gemm->operands().size(); i++) { std::queue<HloInstruction*> visit_list; visit_list.push(gemm->mutable_operand(i)); while (!visit_list.empty()) { HloInstruction* current_instr = visit_list.front(); for (auto user : current_instr->users()) { switch (user->opcode()) { case HloOpcode::kBitcast: case HloOpcode::kReshape: case HloOpcode::kTranspose: { visit_list.push(user); break; } case HloOpcode::kCustomCall: { if (IsFwdCustomCallTofMHA(*user)) { return true; } } break; default: break; } } visit_list.pop(); } } return false; } bool IsFirstFwdMatmul(HloInstruction* gemm) { return IsBatchedMatmul(gemm) && !IsFwdCustomCallTofMHA(*gemm->operand(0)) && !IsFwdCustomCallTofMHA(*gemm->operand(1)) && !IsSharingOperandWithFwdMha(gemm); } bool IsScalar(const HloInstruction* instr) { return ShapeUtil::IsEffectiveScalar(instr->shape()); } bool IsReduceMax(const HloInstruction* instr) { return instr->opcode() == HloOpcode::kReduce && instr->to_apply()->root_instruction()->opcode() == HloOpcode::kMaximum; } bool IsReduceSum(const HloInstruction* instr) { return instr->opcode() == HloOpcode::kReduce && instr->to_apply()->root_instruction()->opcode() == HloOpcode::kAdd; } auto GetUnfusedReduceMaxSumSoftmaxPattern( HloInstruction** softmax_input = nullptr, HloInstruction** softmax_reduce_sum = nullptr, HloInstruction** softmax_reduce_sum_bcast = nullptr) { auto unfused_softmax_max_subpattern = m::SharedSubpattern( m::Subtract( m::Op(), m::Broadcast(OptionalConvert( m::Op() .WithPredicate(IsReduceMax) .WithOneUse() .WithOperand(0, OptionalBitcast(OptionalConvert( m::Op(softmax_input).WithNumUser(2))))))) .WithOneUse()); auto unfused_softmax_sum_subpattern = m::SharedSubpattern(m::Divide( OptionalBitcast(m::Exp(unfused_softmax_max_subpattern)), m::Broadcast( softmax_reduce_sum_bcast, OptionalConvert( m::Op(softmax_reduce_sum) .WithOperand(0, OptionalBitcast(OptionalConvert( m::Exp(unfused_softmax_max_subpattern)))) .WithPredicate(IsReduceSum) .WithAtMostNumUser(2))) .WithAtMostNumUser(2))); return unfused_softmax_sum_subpattern; } std::optional<double> GetConstantValue(const HloInstruction* inst) { if (!IsScalar(inst)) { return std::nullopt; } switch (inst->shape().element_type()) { case F16: return static_cast<float>(inst->literal().GetFirstElement<half>()); case BF16: return static_cast<float>(inst->literal().GetFirstElement<bfloat16>()); case F32: return inst->literal().GetFirstElement<float>(); case F64: return inst->literal().GetFirstElement<double>(); default: return std::nullopt; } } double GetDropoutRateFromHlo(HloInstruction* dropout) { std::optional<double> dropout_rate_inv; dropout_rate_inv = GetConstantValue(dropout); if (!dropout_rate_inv.has_value()) { return 0.0; } return (1.0 - (1.0 / *dropout_rate_inv)); } bool IsComputeCapabilityAndCudnnSupported( stream_executor::CudaComputeCapability cc, stream_executor::dnn::VersionInfo cudnn_version, stream_executor::dnn::VersionInfo supported_cudnn_version) { if (cc.IsAtLeastAmpere() && cudnn_version >= supported_cudnn_version) { return true; } VLOG(2) << absl::StrFormat( "CudnnFusedMHARewriter did not run. Unsupported compute " "capability(%s; major should be >= 8, minor should be 0) or cudnn version" "(%s; should be >= %s)", cc.ToString(), cudnn_version.ToString(), supported_cudnn_version.ToString()); return false; } bool IsSupportedPrimitiveType(const HloInstruction* bmm) { PrimitiveType dtype = bmm->shape().element_type(); return dtype == BF16 || dtype == F16; } std::vector<int64_t> GetDimensionVector(absl::Span<const int64_t> dimensions, absl::Span<const int64_t> dim_nums) { std::vector<int64_t> vec(dim_nums.size()); for (int i = 0; i < dim_nums.size(); i++) { vec[i] = dimensions.at(dim_nums.at(i)); } return vec; } struct QKVLayout { int64_t batch; int64_t num_heads; int64_t seqlen_q; int64_t seqlen_kv; int64_t hidden_dim; }; absl::StatusOr<std::optional<QKVLayout>> GetQKVLayout( HloInstruction* bmm_1, HloInstruction* bmm_2, bool need_canonicalization) { const DotDimensionNumbers& bmm1_dnums = bmm_1->dot_dimension_numbers(); TF_ASSIGN_OR_RETURN( std::vector<int64_t> bmm1_s_q_dims, GetNonContractingDims(bmm_1->operand(0)->shape(), bmm1_dnums.lhs_batch_dimensions(), bmm1_dnums.lhs_contracting_dimensions())); TF_ASSIGN_OR_RETURN( std::vector<int64_t> bmm1_s_kv_dims, GetNonContractingDims(bmm_1->operand(1)->shape(), bmm1_dnums.rhs_batch_dimensions(), bmm1_dnums.rhs_contracting_dimensions())); std::vector<int64_t> bmm1_bh = GetDimensionVector(bmm_1->operand(0)->shape().dimensions(), bmm1_dnums.lhs_batch_dimensions()); std::vector<int64_t> bmm1_s_q = GetDimensionVector( bmm_1->operand(0)->shape().dimensions(), bmm1_s_q_dims); std::vector<int64_t> bmm1_s_kv = GetDimensionVector( bmm_1->operand(1)->shape().dimensions(), bmm1_s_kv_dims); std::vector<int64_t> bmm1_d = GetDimensionVector(bmm_1->operand(0)->shape().dimensions(), bmm1_dnums.lhs_contracting_dimensions()); TF_RET_CHECK(bmm1_bh.size() == 2); TF_RET_CHECK(bmm1_s_q.size() == 1); TF_RET_CHECK(bmm1_s_kv.size() == 1); TF_RET_CHECK(bmm1_d.size() == 1); const DotDimensionNumbers& bmm2_dnums = bmm_2->dot_dimension_numbers(); TF_ASSIGN_OR_RETURN( std::vector<int64_t> bmm2_lhs_non_contracting_dims, GetNonContractingDims(bmm_2->operand(0)->shape(), bmm2_dnums.lhs_batch_dimensions(), bmm2_dnums.lhs_contracting_dimensions())); TF_ASSIGN_OR_RETURN( std::vector<int64_t> bmm2_rhs_non_contracting_dims, GetNonContractingDims(bmm_2->operand(1)->shape(), bmm2_dnums.rhs_batch_dimensions(), bmm2_dnums.rhs_contracting_dimensions())); std::vector<int64_t> bmm2_bh = GetDimensionVector(bmm_2->operand(0)->shape().dimensions(), bmm2_dnums.lhs_batch_dimensions()); std::vector<int64_t> bmm2_s_kv = GetDimensionVector(bmm_2->operand(0)->shape().dimensions(), bmm2_dnums.lhs_contracting_dimensions()); std::vector<int64_t> bmm2_s_q = need_canonicalization ? GetDimensionVector(bmm_2->operand(1)->shape().dimensions(), bmm2_rhs_non_contracting_dims) : GetDimensionVector(bmm_2->operand(0)->shape().dimensions(), bmm2_lhs_non_contracting_dims); std::vector<int64_t> bmm2_d = need_canonicalization ? GetDimensionVector(bmm_2->operand(0)->shape().dimensions(), bmm2_lhs_non_contracting_dims) : GetDimensionVector(bmm_2->operand(1)->shape().dimensions(), bmm2_rhs_non_contracting_dims); TF_RET_CHECK(bmm2_bh.size() == 2); TF_RET_CHECK(bmm2_s_q.size() == 1); TF_RET_CHECK(bmm2_s_kv.size() == 1); TF_RET_CHECK(bmm2_d.size() == 1); if (bmm1_bh[0] != bmm2_bh[0] || bmm1_bh[1] != bmm2_bh[1] || bmm1_s_q[0] != bmm2_s_q[0] || bmm1_s_kv[0] != bmm2_s_kv[0] || bmm1_d[0] != bmm2_d[0]) { return std::nullopt; } QKVLayout qkv_layout; qkv_layout.batch = bmm1_bh[0]; qkv_layout.num_heads = bmm1_bh[1]; qkv_layout.seqlen_q = bmm1_s_q[0]; qkv_layout.seqlen_kv = bmm1_s_kv[0]; qkv_layout.hidden_dim = bmm1_d[0]; return qkv_layout; } absl::StatusOr<bool> IsFlashAttention( QKVLayout qkv_layout, bool is_training, stream_executor::CudaComputeCapability cc, stream_executor::dnn::VersionInfo cudnn_version) { int64_t s_q = qkv_layout.seqlen_q; int64_t s_kv = qkv_layout.seqlen_kv; int64_t hidden_dim = qkv_layout.hidden_dim; bool is_seqlen_supported = (!is_training || (s_q % 2 == 0 && s_kv % 2 == 0)); bool is_hidden_dim_supported = hidden_dim <= 128 && hidden_dim % 8 == 0; bool is_flash_attention = is_seqlen_supported && is_hidden_dim_supported; if (!is_flash_attention) return false; if ((is_training && (s_q < 64 || s_kv < 64)) && !IsComputeCapabilityAndCudnnSupported( cc, cudnn_version, stream_executor::dnn::VersionInfo(9, 0, 0))) { VLOG(2) << "Flash attention training with seq < 64 not supported cuDNN < " "9.0.0."; return false; } if ((hidden_dim != 64 && hidden_dim != 128) && !IsComputeCapabilityAndCudnnSupported( cc, cudnn_version, stream_executor::dnn::VersionInfo(8, 9, 6))) { VLOG(2) << "Flash attention head dim != 64 or 128 not supported with cuDNN " "< 8.9.6."; return false; } if ((is_training && s_kv % 64 != 0) && !IsComputeCapabilityAndCudnnSupported( cc, cudnn_version, stream_executor::dnn::VersionInfo(8, 9, 5))) { VLOG(2) << "Flash attention training with seq kv % 64 != 0 not supported " "with cuDNN < 8.9.5."; return false; } if (!IsComputeCapabilityAndCudnnSupported( cc, cudnn_version, stream_executor::dnn::VersionInfo(8, 9, 4))) { VLOG(2) << "Require cuDNN 8.9.4 to run flash attention."; return false; } return is_flash_attention; } bool IsCausalMaskPattern(HloInstruction* mask) { auto causal_mask = m::Select(m::Compare(m::Iota(), m::Iota()), m::Broadcast(m::Constant()), m::Broadcast(m::Constant())); auto causal_mask_pattern_fwd_remat = m::Broadcast(OptionalBitcast(causal_mask)); auto causal_mask_pattern_bwd = m::Broadcast(m::Convert(OptionalBitcast( m::Minimum(m::Op(), m::Broadcast(OptionalBitcast(causal_mask)))))); HloInstruction* param = nullptr; HloInstruction* gte = nullptr; auto causal_mask_pattern_fwd = m::Broadcast( OptionalBitcast(m::GetTupleElement(&gte, m::Parameter(&param)))); auto causal_mask_pattern = m::AnyOf<HloInstruction>( causal_mask_pattern_fwd_remat, causal_mask_pattern_fwd, causal_mask_pattern_bwd); if (Match(mask, causal_mask_pattern)) { if (param != nullptr && param->parent()->IsWhileBodyComputation()) { auto while_instr = param->parent()->WhileCallInstruction(); auto mask_index = gte->tuple_index(); auto actual_mask = while_instr->mutable_operand(0)->mutable_operand(mask_index); auto causal_mask_pattern_fwd = OptionalBitcast(m::Convert(m::MinimumAnyOrder( m::Op(), OptionalBitcast(m::MinimumAnyOrder( m::Op(), m::Broadcast(OptionalBitcast(causal_mask))))))); return Match(actual_mask, causal_mask_pattern_fwd); } return true; } return false; } MatchFwdResult MatchSoftmaxDropoutBmm(MatchFwdResult previous_result, int64_t bmm2_operand_position, HloInstruction* instr) { MatchFwdResult match_result = previous_result; HloInstruction* softmax_reduce_sum; HloInstruction* softmax_reduce_sum_bcast; HloInstruction* bmm_2; HloInstruction* softmax_input; HloInstruction* dropout = nullptr; auto dropout_softmax_pattern_form_1 = m::Select( m::Op(), OptionalConvert(m::MultiplyAnyOrder( OptionalBitcast(OptionalReshape( OptionalConvert(GetUnfusedReduceMaxSumSoftmaxPattern( &softmax_input, &softmax_reduce_sum, &softmax_reduce_sum_bcast)))), m::Broadcast( OptionalConvert(m::Constant(&dropout).WithPredicate(IsScalar))))), m::Op()); auto dropout_softmax_pattern_form_2 = OptionalBitcast(OptionalBitcast(OptionalConvert(m::MultiplyAnyOrder( OptionalReshape(OptionalConvert(GetUnfusedReduceMaxSumSoftmaxPattern( &softmax_input, &softmax_reduce_sum, &softmax_reduce_sum_bcast))), m::Broadcast( OptionalConvert(OptionalBitcast(OptionalReshape(m::Select( m::Op(), m::Broadcast(m::Constant(&dropout).WithPredicate(IsScalar)), m::Op()))))))))); auto dropout_softmax_pattern_form_3 = m::MultiplyAnyOrder( m::MultiplyAnyOrder( OptionalConvert(GetUnfusedReduceMaxSumSoftmaxPattern( &softmax_input, &softmax_reduce_sum, &softmax_reduce_sum_bcast)), m::Op()), m::Broadcast(m::Constant(&dropout).WithPredicate(IsScalar))); auto softmax_dropout_bmm2_pattern = m::Op(&bmm_2) .WithPredicate(IsBatchedMatmul) .WithOperand(bmm2_operand_position, m::AnyOf<HloInstruction>( OptionalBitcast(OptionalConvert( GetUnfusedReduceMaxSumSoftmaxPattern( &softmax_input, &softmax_reduce_sum, &softmax_reduce_sum_bcast))), dropout_softmax_pattern_form_1, dropout_softmax_pattern_form_2, dropout_softmax_pattern_form_3)); if (!Match(instr, softmax_dropout_bmm2_pattern) || !IsSupportedPrimitiveType(bmm_2)) { match_result.has_match = false; return match_result; } if (softmax_reduce_sum->users()[0]->opcode() == HloOpcode::kConvert) { softmax_reduce_sum = softmax_reduce_sum->users()[0]; } match_result.is_training = softmax_reduce_sum->user_count() == 2 && softmax_reduce_sum_bcast->user_count() == 2; match_result.matched_bmm_2 = bmm_2; if (dropout) { match_result.matched_dropout_rate = GetDropoutRateFromHlo(dropout); } match_result.matched_softmax_input = softmax_input; match_result.matched_reduce_sum = softmax_reduce_sum; match_result.has_match = true; return match_result; } MatchFwdResult MatchBmm1UnfusedBiasSoftmaxBmm2(MatchFwdResult previous_result, HloInstruction* softmax_input, bool has_dropout) { MatchFwdResult match_result = previous_result; HloInstruction* bmm_1; HloInstruction* bias = nullptr; HloInstruction* scale = nullptr; auto first_bmm_pattern = m::SharedSubpattern(m::Op(&bmm_1).WithPredicate(IsBatchedMatmul)); auto unfused_scaled_bmm_subpattern = m::MultiplyAnyOrder( OptionalConvert(first_bmm_pattern.WithOneUse()), OptionalConvert( m::Broadcast(m::Constant(&scale).WithPredicate(IsScalar)))); if (Match(softmax_input, OptionalConvert(OptionalBitcast(m::AnyOf<HloInstruction>( first_bmm_pattern, unfused_scaled_bmm_subpattern))))) { match_result.matched_bmm_1 = bmm_1; match_result.matched_scale = scale; match_result.matched_custom_call_name = has_dropout ? kCudnnfMHASoftmaxDropoutCallTarget : kCudnnfMHASoftmaxCallTarget; match_result.has_match = true; } else if (Match(softmax_input, OptionalBitcast(m::AddAnyOrder( OptionalConvert(OptionalBitcast(m::AnyOf<HloInstruction>( unfused_scaled_bmm_subpattern.WithOneUse(), first_bmm_pattern.WithOneUse()))), m::Op(&bias))))) { match_result.matched_bmm_1 = bmm_1; match_result.matched_scale = scale; match_result.matched_custom_call_name = has_dropout ? kCudnnfMHAScaleBiasSoftmaxDropoutCallTarget : kCudnnfMHAScaleBiasSoftmaxCallTarget; match_result.is_causal_mask |= IsCausalMaskPattern(bias); if (!match_result.is_causal_mask && bias->opcode() == HloOpcode::kBroadcast) { auto dims = Cast<HloBroadcastInstruction>(bias)->dimensions(); if (dims == std::vector<int64_t>{2, 3} || dims == std::vector<int64_t>{0, 2, 3} || dims == std::vector<int64_t>{1, 2, 3}) { HloInstruction* bias_bc = bias->mutable_operand(0); std::vector<int64_t> bitcast_dims(bias->shape().rank(), 1); for (int dim : dims) { bitcast_dims[dim] = bias->shape().dimensions()[dim]; } bias = bias_bc->AddInstruction(HloInstruction::CreateBitcast( ShapeUtil::MakeShape(bias->shape().element_type(), bitcast_dims), bias_bc)); } } match_result.matched_bias = bias; match_result.has_match = true; } else { match_result.has_match = false; } return match_result; } MatchFwdResult MatchFwdMHAPatternsForCanonicalization(HloInstruction* instr) { MatchFwdResult match_result; for (auto bmm2_operand_pos : {0, 1}) { if (bmm2_operand_pos == 1) { match_result.need_canonicalization = true; } bool has_dropout = false; match_result = MatchSoftmaxDropoutBmm(match_result, bmm2_operand_pos, instr); if (!match_result.has_match) { continue; } has_dropout = match_result.matched_dropout_rate > 0.0; match_result = MatchBmm1UnfusedBiasSoftmaxBmm2( match_result, match_result.matched_softmax_input, has_dropout); if (match_result.has_match) { return match_result; } } match_result.need_canonicalization = false; return match_result; } bool IsBmm2GradGemm2(HloInstruction* instr) { return (instr->user_count() == 1) || (instr->user_count() == 2); } MatchBwdResult MatchBmm1GradGemm1(MatchBwdResult previous_result, HloInstruction* bmm_1) { MatchBwdResult match_result = previous_result; match_result.has_match = false; const HloInstruction* q_tensor = bmm_1->operand(0); for (int64_t i = 0; i < q_tensor->user_count(); i++) { HloInstruction* q_tensor_user_i = q_tensor->users()[i]; if (IsBatchedMatmul(q_tensor_user_i) && q_tensor_user_i != bmm_1) { match_result.matched_bmm_1_grad_1 = q_tensor_user_i; if (match_result.matched_bmm_1_grad_1->operand_index(q_tensor) != 1) { match_result.bmm_1_grad_1_need_canonicalization = true; } match_result.has_match = true; } } return match_result; } MatchBwdResult MatchBmm1GradGemm2(MatchBwdResult previous_result, HloInstruction* fwd_fmha_call) { HloInstruction* bmm_1_grad_2 = nullptr; MatchBwdResult match_result = previous_result; match_result.has_match = false; int64_t d_s_index = match_result.bmm_1_grad_1_need_canonicalization ? 1 : 0; HloInstruction* d_s_user_0 = match_result.matched_bmm_1_grad_1; HloInstruction* d_s = d_s_user_0->mutable_operand(d_s_index); if (d_s->opcode() == HloOpcode::kBitcast && d_s->user_count() == 1) { d_s = d_s->mutable_operand(0); } auto bmm_1_grad_2_it = std::find_if( d_s->users().begin(), d_s->users().end(), [&](HloInstruction* instr) { return instr != match_result.matched_bmm_1_grad_1 && instr->opcode() == HloOpcode::kDot; }); if (bmm_1_grad_2_it != d_s->users().end()) { bmm_1_grad_2 = *bmm_1_grad_2_it; } else { return match_result; } match_result.matched_bmm_1_grad_2 = bmm_1_grad_2; if (match_result.matched_bmm_1_grad_2->operand_index(d_s) != 0) { match_result.bmm_1_grad_2_need_canonicalization = true; } match_result.has_match = true; return match_result; } MatchBwdResult MatchBmm2GradGemm1(HloInstruction* fwd_fmha_call) { HloInstruction* bmm_2_grad_1 = nullptr; MatchBwdResult matched_result; int64_t activation_out_gte_index = 1; if (fwd_fmha_call->user_count() < 2 || fwd_fmha_call->users()[activation_out_gte_index]->opcode() != HloOpcode::kGetTupleElement || fwd_fmha_call->users()[activation_out_gte_index]->user_count() > 1 || !IsBatchedMatmul( fwd_fmha_call->users()[activation_out_gte_index]->users()[0])) { matched_result.has_match = false; return matched_result; } bmm_2_grad_1 = fwd_fmha_call->users()[activation_out_gte_index]->users()[0]; matched_result.matched_bmm_2_grad_1 = bmm_2_grad_1; if (bmm_2_grad_1->operand_index( fwd_fmha_call->users()[activation_out_gte_index]) != 0) { matched_result.bmm_2_grad_1_need_canonicalization = true; } matched_result.has_match = true; return matched_result; } MatchBwdResult MatchBmm2GradGemm2(MatchBwdResult previous_result, HloInstruction* fwd_fmha_call, bool v_transposed) { MatchBwdResult match_result = previous_result; match_result.has_match = false; const HloInstruction* v_tensor = v_transposed ? fwd_fmha_call->operand(2)->operand(0) : fwd_fmha_call->operand(2); for (int64_t i = 0; i < v_tensor->user_count(); i++) { HloInstruction* v_tensor_user_i = v_tensor->users()[i]; if (IsBatchedMatmul(v_tensor_user_i) && IsBmm2GradGemm2(v_tensor_user_i)) { match_result.matched_bmm_2_grad_2 = v_tensor_user_i; if (match_result.matched_bmm_2_grad_2->operand_index(v_tensor) != 1) { match_result.bmm_2_grad_2_need_canonicalization = true; } match_result.has_match = true; } } return match_result; } MatchBwdResult MatchDbias(MatchBwdResult previous_result, HloInstruction* d_intermediate, const absl::flat_hash_set<HloInstruction*> users) { MatchBwdResult match_result = previous_result; auto user_count = d_intermediate->user_count(); HloInstruction* dbias_user = nullptr; HloInstruction* dbias = nullptr; for (auto user : d_intermediate->users()) { if (users.contains(user)) { user_count -= 1; } else { dbias_user = user; } } auto ConsumeExtraConvert = [](HloInstruction* instr) { Match(instr->users()[0], m::Convert(&instr, m::Op()).WithOneUse()); return true; }; match_result.has_match = user_count == 1 && Match(dbias_user, m::Reduce(&dbias, m::Op(), m::Op()).WithOneUse()) && dbias->shape().rank() == 3 && ConsumeExtraConvert(dbias); if (match_result.has_match) { auto reduce_dim = dbias->dimensions(); if (reduce_dim.size() == 1 && reduce_dim[0] == 0) { match_result.matched_dbias = dbias; } else { match_result.has_match = false; } } return match_result; } MatchBwdResult MatchBwdBmmSoftmaxDropoutBmm(MatchBwdResult previous_result, HloInstruction* fwd_fmha_call) { MatchBwdResult match_result = previous_result; bool is_bmm1_grad1_canonicalized = match_result.bmm_1_grad_1_need_canonicalization; match_result.has_match = false; bool has_scale = false; bool has_dropout = false; auto bwd_dropout_pattern_form_1 = m::SharedSubpattern( OptionalBitcast(OptionalReshape(OptionalConvert(m::Select( m::Op(), m::Op().WithPredicate([&](const HloInstruction* instr) { return instr == match_result.matched_bmm_2_grad_2; }), m::Broadcast( OptionalConvert(m::Constant().WithPredicate(IsScalar)))))))); auto bwd_dropout_pattern_form_2 = m::SharedSubpattern(OptionalBitcast(m::MultiplyAnyOrder( OptionalConvert( m::Op().WithPredicate([&](const HloInstruction* instr) { return instr == match_result.matched_bmm_2_grad_2; })), m::Broadcast(OptionalConvert(OptionalBitcast(OptionalReshape( m::Select(m::Op(), m::Broadcast(OptionalConvert( m::Constant().WithPredicate(IsScalar))), m::Op())))))))); auto bwd_dropout_pattern_form_3 = OptionalConvert(m::MultiplyAnyOrder( m::MultiplyAnyOrder( m::Op().WithPredicate([&](const HloInstruction* instr) { return instr == match_result.matched_bmm_2_grad_2; }), m::Broadcast(m::Constant().WithPredicate(IsScalar))), m::Op())); auto bwd_dropout_pattern = m::AnyOf<HloInstruction>( bwd_dropout_pattern_form_1, bwd_dropout_pattern_form_2, bwd_dropout_pattern_form_3); HloInstruction* bwd_softmax_input = nullptr; HloInstruction* exp_1; HloInstruction* exp_2; HloInstruction* d_softmax; auto bwd_softmax_pattern = OptionalBitcast(OptionalConvert( m::MultiplyAnyOrder( &d_softmax, m::AddAnyOrder( m::Divide().WithOneUse(), m::Broadcast(OptionalBitcast(OptionalConvert( m::Negate( OptionalBitcast( m::Op() .WithPredicate(IsReduceSum) .WithOneUse() .WithOperand( 0, OptionalBitcast( m::MultiplyAnyOrder( m::MultiplyAnyOrder( m::Op(&bwd_softmax_input), m::Broadcast()) .WithOneUse(), m::Exp(&exp_2, m::Op())) .WithOneUse())))) .WithOneUse())))), m::Exp(&exp_1, m::Op())) .WithAtMostNumUser(3))); HloInstruction* bwd_scale_input = nullptr; HloInstruction* bwd_scale = nullptr; auto bwd_scale_pattern = m::MultiplyAnyOrder(&bwd_scale, m::Op(&bwd_scale_input), m::Broadcast(m::Constant().WithPredicate(IsScalar))) .WithNumUser(2); int intermediate_input_pos = is_bmm1_grad1_canonicalized ? 1 : 0; HloInstruction* intermediate_input = match_result.matched_bmm_1_grad_1->mutable_operand( intermediate_input_pos); has_scale = Match(intermediate_input, bwd_scale_pattern); if (has_scale) { intermediate_input = bwd_scale_input; } if (!Match(intermediate_input, bwd_softmax_pattern) || exp_1 != exp_2) { return match_result; } has_dropout = Match(bwd_softmax_input, bwd_dropout_pattern); if (!has_dropout && !Match(bwd_softmax_input, OptionalConvert((OptionalBitcast( m::Op().WithPredicate([&](const HloInstruction* instr) { return instr == match_result.matched_bmm_2_grad_2; })))))) { return match_result; } if (has_dropout) { if (fwd_fmha_call->custom_call_target() == kCudnnfMHAScaleBiasSoftmaxDropoutCallTarget) match_result.matched_custom_call_name = kCudnnfMHAScaleBiasSoftmaxDropoutBackwardCallTarget; if (fwd_fmha_call->custom_call_target() == kCudnnfMHASoftmaxDropoutCallTarget) match_result.matched_custom_call_name = kCudnnfMHASoftmaxDropoutBackwardCallTarget; } else { if (fwd_fmha_call->custom_call_target() == kCudnnfMHAScaleBiasSoftmaxCallTarget) match_result.matched_custom_call_name = kCudnnfMHAScaleBiasSoftmaxBackwardCallTarget; if (fwd_fmha_call->custom_call_target() == kCudnnfMHASoftmaxCallTarget) match_result.matched_custom_call_name = kCudnnfMHASoftmaxBackwardCallTarget; } HloInstruction* dS = d_softmax; if (dS->users()[0]->opcode() == HloOpcode::kConvert) { dS = dS->users()[0]; } if (has_scale) { if (dS->user_count() == 1) { match_result.has_match = true; } else if (dS->user_count() == 2) { match_result = MatchDbias(match_result, dS, {bwd_scale}); } else { match_result.has_match = false; } } else { if (dS->user_count() == 2) { match_result.has_match = true; } else if (dS->user_count() == 3) { match_result = MatchDbias(match_result, dS, {match_result.matched_bmm_1_grad_1, match_result.matched_bmm_1_grad_2}); } else { match_result.has_match = false; } } return match_result; } MatchBwdResult MatchBackwardBmms(HloInstruction* fwd_fmha_call, HloInstruction* bmm_1, bool v_transposed) { MatchBwdResult matched_result = MatchBmm2GradGemm1(fwd_fmha_call); if (!matched_result.has_match) { return matched_result; } matched_result = MatchBmm2GradGemm2(matched_result, fwd_fmha_call, v_transposed); if (!matched_result.has_match) { return matched_result; } matched_result = MatchBmm1GradGemm1(matched_result, bmm_1); if (!matched_result.has_match) { return matched_result; } matched_result = MatchBmm1GradGemm2(matched_result, fwd_fmha_call); if (!matched_result.has_match) { return matched_result; } return matched_result; } MatchBwdResult MatchBwdMHAPatternsForCanonicalization( HloInstruction* fwd_fmha_call, HloInstruction* bmm_1, bool v_transposed) { MatchBwdResult match_result = MatchBackwardBmms(fwd_fmha_call, bmm_1, v_transposed); if (!match_result.has_match) { return match_result; } match_result = MatchBwdBmmSoftmaxDropoutBmm(match_result, fwd_fmha_call); return match_result; } absl::StatusOr<bool> IsMHABlockSupported( HloInstruction* bmm_1, HloInstruction* bmm_2, bool need_canonicalization, bool is_training, bool is_causal_mask, std::string& custom_call_name, const DebugOptions& debug_options, stream_executor::CudaComputeCapability cc, stream_executor::dnn::VersionInfo cudnn_version) { if (MHACallHasDropout(custom_call_name) && !debug_options.xla_gpu_fused_attention_use_cudnn_rng()) { VLOG(3) << "Using CUDNN RNG for fused attention dropout is not enabled.\n"; return false; } if (!IsSupportedPrimitiveType(bmm_1) || !IsSupportedPrimitiveType(bmm_2)) { if (VLOG_IS_ON(2)) { VLOG(2) << "Unsupported primitive type for cuDNN MHA fusion:\n" << bmm_1->ToString() << "\nOR\n" << bmm_2->ToString() << "\n" << "BF16 and F16 are the supported Dtypes."; } return false; } if (bmm_1->shape().rank() != 4 || bmm_2->shape().rank() != 4) { if (VLOG_IS_ON(2)) { VLOG(2) << "Unsupported bmm rank for cuDNN MHA fusion:\n" << bmm_1->ToString() << "\nOR\n" << bmm_2->ToString() << "\n" << "Only bmm with rank 4 is supported."; } return false; } TF_ASSIGN_OR_RETURN(std::optional<QKVLayout> qkv_layout, GetQKVLayout(bmm_1, bmm_2, need_canonicalization)); if (!qkv_layout.has_value()) { VLOG(2) << "bmm1 and bmm2 have different qkv layout."; return false; } TF_ASSIGN_OR_RETURN( bool is_flash_attention, IsFlashAttention(qkv_layout.value(), is_training, cc, cudnn_version)); if (is_flash_attention) { if (is_causal_mask) { if (custom_call_name == kCudnnfMHAScaleBiasSoftmaxDropoutCallTarget) { custom_call_name = kCudnnfMHASoftmaxDropoutCallTarget; } else if (custom_call_name == kCudnnfMHAScaleBiasSoftmaxCallTarget) { custom_call_name = kCudnnfMHASoftmaxCallTarget; } } } return is_flash_attention; } absl::StatusOr<HloInstruction*> CanonicalizeBatchedGemmForcuDNNFMHA( HloInstruction* bmm, HloComputation* comp) { if (VLOG_IS_ON(3)) { VLOG(3) << "Before FMHA Dot Cannonicalization: \n" << comp->parent()->ToString(); } HloInstruction* lhs_bmm = bmm->mutable_operand(0); HloInstruction* rhs_bmm = bmm->mutable_operand(1); const DotDimensionNumbers& dnums = bmm->dot_dimension_numbers(); int64_t rank = bmm->shape().dimensions_size(); std::vector<int64_t> perm(rank); std::iota(perm.begin(), perm.end(), 0); std::swap(perm[rank - 1], perm[rank - 2]); DotDimensionNumbers new_dnums = dnums; std::swap(*new_dnums.mutable_lhs_contracting_dimensions(), *new_dnums.mutable_rhs_contracting_dimensions()); std::swap(*new_dnums.mutable_lhs_batch_dimensions(), *new_dnums.mutable_rhs_batch_dimensions()); auto original_bmm_shape = bmm->shape(); HloInstruction* new_dot = comp->AddInstruction(HloInstruction::CreateDot( ShapeUtil::MakeShape(original_bmm_shape.element_type(), Permute(original_bmm_shape.dimensions(), perm)), rhs_bmm, lhs_bmm, new_dnums, bmm->precision_config())); TF_RETURN_IF_ERROR(comp->ReplaceWithNewInstruction( bmm, HloInstruction::CreateTranspose(original_bmm_shape, new_dot, perm))); if (VLOG_IS_ON(2)) { VLOG(2) << "After FMHA Dot Cannonicalization: \n" << comp->parent()->ToString(); } return new_dot; } absl::StatusOr<HloInstruction*> ChangeCheckedDimToFastest( HloComputation* comp, HloInstruction* bmm, bool is_lhs, bool should_contracting_be_fastest) { const DotDimensionNumbers& dot_dims_bmm = bmm->dot_dimension_numbers(); DotDimensionNumbers new_dot_dims_bmm = dot_dims_bmm; int64_t bmm_operand = is_lhs ? 0 : 1; absl::Span<const int64_t> contracting_dims = is_lhs ? dot_dims_bmm.lhs_contracting_dimensions() : dot_dims_bmm.rhs_contracting_dimensions(); absl::Span<const int64_t> batch_dims = is_lhs ? dot_dims_bmm.lhs_batch_dimensions() : dot_dims_bmm.rhs_batch_dimensions(); absl::Span<const int64_t> lhs_minor_to_major_bmm = bmm->operand(0)->shape().layout().minor_to_major(); absl::Span<const int64_t> rhs_minor_to_major_bmm = bmm->operand(1)->shape().layout().minor_to_major(); absl::Span<const int64_t>& minor_to_major_to_check = is_lhs ? lhs_minor_to_major_bmm : rhs_minor_to_major_bmm; CHECK_EQ(contracting_dims.size(), 1); TF_ASSIGN_OR_RETURN(std::vector<int64_t> non_contracting_dims, GetNonContractingDims(bmm->operand(bmm_operand)->shape(), batch_dims, contracting_dims)); CHECK_EQ(non_contracting_dims.size(), 1); HloInstruction* operand_bmm = bmm->mutable_operand(bmm_operand); int64_t hidden_dim = should_contracting_be_fastest ? contracting_dims[0] : non_contracting_dims[0]; int64_t minor_dim = minor_to_major_to_check[0]; if (minor_dim != hidden_dim) { std::vector<int64_t> perm(bmm->shape().dimensions_size()); std::iota(perm.begin(), perm.end(), 0); std::swap(perm[hidden_dim], perm[minor_dim]); if (is_lhs) { new_dot_dims_bmm.set_lhs_contracting_dimensions(0, non_contracting_dims[0]); } else { new_dot_dims_bmm.set_rhs_contracting_dimensions(0, non_contracting_dims[0]); } operand_bmm = comp->AddInstruction( HloInstruction::CreateTranspose( ShapeUtil::MakeShapeWithDenseLayout( bmm->shape().element_type(), Permute(operand_bmm->shape().dimensions(), perm), minor_to_major_to_check), operand_bmm, perm), &operand_bmm->metadata()); *((DynCast<HloDotInstruction>(bmm))->mutable_dot_dimension_numbers()) = new_dot_dims_bmm; } return operand_bmm; } absl::StatusOr<HloInstruction*> FuseFwdMultiHeadedAttentionBlock( HloComputation* comp, HloInstruction* bmm_1, HloInstruction* bmm_2, HloInstruction* bias, HloInstruction* scale, HloInstruction* reduce_sum, HloInstruction* softmax_input, double dropout_rate, std::string& custom_call_name, stream_executor::CudaComputeCapability cc, bool is_training, bool& changed, bool& v_transposed, bool is_causal_mask) { double scale_value = 1.0; HloInstruction* lhs_bmm1; HloInstruction* rhs_bmm1; HloInstruction* rhs_bmm2; DotDimensionNumbers orig_bmm1_dot_dim = bmm_1->dot_dimension_numbers(); DotDimensionNumbers orig_bmm2_dot_dim = bmm_2->dot_dimension_numbers(); TF_ASSIGN_OR_RETURN(rhs_bmm1, ChangeCheckedDimToFastest( comp, bmm_1, false , true )); TF_ASSIGN_OR_RETURN(lhs_bmm1, ChangeCheckedDimToFastest( comp, bmm_1, true , true )); TF_ASSIGN_OR_RETURN(rhs_bmm2, ChangeCheckedDimToFastest( comp, bmm_2, false , false )); if (rhs_bmm2 != bmm_2->mutable_operand(1)) { v_transposed = true; } GpuBackendConfig gpu_config; CudnnfMHABackendConfig& fmha_config = *gpu_config.mutable_cudnn_fmha_backend_config(); *fmha_config.mutable_bmm1_dot_dimension_numbers() = bmm_1->dot_dimension_numbers(); *fmha_config.mutable_bmm2_dot_dimension_numbers() = bmm_2->dot_dimension_numbers(); TF_RET_CHECK((dropout_rate >= 0.0 && dropout_rate <= 1.0)); *((DynCast<HloDotInstruction>(bmm_1))->mutable_dot_dimension_numbers()) = orig_bmm1_dot_dim; *((DynCast<HloDotInstruction>(bmm_2))->mutable_dot_dimension_numbers()) = orig_bmm2_dot_dim; if (scale != nullptr) { std::optional<double> value; value = GetConstantValue(scale); TF_RET_CHECK(value.has_value()); scale_value = (double)*value; } fmha_config.set_fmha_scale(scale_value); fmha_config.set_dropout_rate(dropout_rate); fmha_config.set_seed(42); *fmha_config.mutable_intermediate_tensor_shape() = bmm_1->shape().ToProto(); { auto* algorithm = fmha_config.mutable_algorithm(); algorithm->set_algo_id(0); algorithm->set_math_type(se::dnn::AlgorithmProto::TENSOR_OP_MATH); std::vector<int64_t> knob_ids = {17, 24}; std::vector<int64_t> knob_vals = {1, 0}; for (int i = 0; i < knob_ids.size(); ++i) { (*algorithm->mutable_tuning_knobs())[knob_ids[i]] = knob_vals[i]; } algorithm->set_is_cudnn_frontend(true); algorithm->mutable_workspace_size()->set_value(0); } fmha_config.set_mask_type(is_causal_mask ? CudnnfMHABackendConfig::CAUSAL : CudnnfMHABackendConfig::NO_MASK); fmha_config.set_sliding_window_length(0); const Shape& output_shape = bmm_2->shape(); Shape call_shape; HloInstruction* activation_output = nullptr; std::vector<Shape> output_shapes = {output_shape}; if (is_training) { activation_output = bmm_2->mutable_operand(0); if (activation_output->user_count() < 2 && activation_output->opcode() == HloOpcode::kBitcast) { HloInstruction* producer = activation_output->mutable_operand(0); TF_RET_CHECK(producer->user_count() == 2); HloInstruction* bmm2_grad2_user = producer->users()[0] == activation_output ? producer->users()[1] : producer->users()[0]; if (IsBatchedMatmul(bmm2_grad2_user)) { activation_output = producer; } else if (bmm2_grad2_user->opcode() == HloOpcode::kTranspose) { activation_output = bmm2_grad2_user; } else { return Internal("Unexpected activation patterns"); } } TF_RET_CHECK(reduce_sum != nullptr); output_shapes.push_back( ShapeUtil::MakeShape(F32, reduce_sum->shape().dimensions())); } output_shapes.push_back(ShapeUtil::MakeShape(U8, {0})); call_shape = ShapeUtil::MakeTupleShape(output_shapes); std::vector<HloInstruction*> operands = {lhs_bmm1, rhs_bmm1, rhs_bmm2}; if (!is_causal_mask && bias != nullptr) { HloInstruction* original_bias; HloInstruction* original_broadcast; if (Match(bias, m::Broadcast( &original_broadcast, m::Convert( m::Op(&original_bias) .WithPredicate([](const HloInstruction* instr) { return instr->shape().element_type() == F16 || instr->shape().element_type() == BF16; })) .WithPredicate([](const HloInstruction* instr) { return instr->shape().element_type() == F32 || instr->shape().element_type() == F64; })))) { absl::Span<const int64_t> original_bcast_dims = (DynCast<HloBroadcastInstruction>(original_broadcast))->dimensions(); absl::Span<const int64_t> original_broadcast_shape_dims = original_broadcast->shape().dimensions(); int64_t starting_index = original_broadcast_shape_dims.size() == 5 && original_broadcast_shape_dims[0] == 1 ? 1 : 0; std::vector<int64_t> bcast_dimensions; for (auto& dim : original_bcast_dims) { bcast_dimensions.push_back(dim - starting_index); } const Shape& bcast_shape = bmm_1->shape(); bias = comp->AddInstruction(HloInstruction::CreateBroadcast( bcast_shape, original_bias, bcast_dimensions)); } operands.push_back(bias); } HloInstruction* fmha_call = comp->AddInstruction(HloInstruction::CreateCustomCall( call_shape, operands, absl::string_view(custom_call_name))); TF_RETURN_IF_ERROR(fmha_call->set_backend_config(gpu_config)); TF_RETURN_IF_ERROR(SetFMHAInstructionName(bmm_1->GetModule(), fmha_call)); TF_RETURN_IF_ERROR(comp->ReplaceWithNewInstruction( bmm_2, HloInstruction::CreateGetTupleElement(bmm_2->shape(), fmha_call, 0))); if (activation_output) { HloInstruction* activation_gte = comp->AddInstruction(HloInstruction::CreateGetTupleElement( activation_output->shape(), fmha_call, 1)); TF_RETURN_IF_ERROR(comp->ReplaceInstructionWithDifferentShape( activation_output, activation_gte, false, false, false) .status()); } if (VLOG_IS_ON(2)) { VLOG(2) << "After CudnnFusedMHARewriter: \n" << comp->parent()->ToString(); } changed = true; return fmha_call; } absl::StatusOr<bool> FuseBwdMultiHeadedAttentionBlock( HloComputation* comp, HloInstruction* bmm_1_grad_1, HloInstruction* bmm_1_grad_2, HloInstruction* bmm_2_grad_1, HloInstruction* bmm_2_grad_2, HloInstruction* fwd_fmha_call, HloInstruction* dbias, HloInstruction* bias, std::string& bwd_custom_call_name) { HloInstruction* rhs_bmm1_grad_gemm1; HloInstruction* lhs_bmm1_grad_gemm2; HloInstruction* rhs_bmm2_grad_gemm2; HloInstruction* d_output_grad; DotDimensionNumbers orig_bmm1_grad1_config = bmm_1_grad_1->dot_dimension_numbers(); DotDimensionNumbers orig_bmm1_grad2_config = bmm_1_grad_2->dot_dimension_numbers(); DotDimensionNumbers orig_bmm2_grad1_config = bmm_2_grad_1->dot_dimension_numbers(); DotDimensionNumbers orig_bmm2_grad2_config = bmm_2_grad_2->dot_dimension_numbers(); TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config, fwd_fmha_call->backend_config<GpuBackendConfig>()); const CudnnfMHABackendConfig& fwd_config = gpu_config.cudnn_fmha_backend_config(); bool is_causal_mask = fwd_config.mask_type() == CudnnfMHABackendConfig::CAUSAL; CudnnfMHABackendConfig bwd_fmha_config; TF_ASSIGN_OR_RETURN( rhs_bmm1_grad_gemm1, ChangeCheckedDimToFastest(comp, bmm_1_grad_1, false , false )); TF_ASSIGN_OR_RETURN( lhs_bmm1_grad_gemm2, ChangeCheckedDimToFastest(comp, bmm_1_grad_2, false , false )); HloInstruction* fwd_act; int64_t fwd_act_index = 1; fwd_act = comp->AddInstruction(HloInstruction::CreateGetTupleElement( fwd_fmha_call->shape().tuple_shapes(fwd_act_index), fwd_fmha_call, fwd_act_index)); TF_ASSIGN_OR_RETURN( rhs_bmm2_grad_gemm2, ChangeCheckedDimToFastest(comp, bmm_2_grad_2, false , true )); TF_ASSIGN_OR_RETURN( d_output_grad, ChangeCheckedDimToFastest(comp, bmm_2_grad_2, true , true )); TF_ASSIGN_OR_RETURN( HloInstruction * bmm_2_grad_1_rhs, ChangeCheckedDimToFastest(comp, bmm_2_grad_1, false , false )); (void)bmm_2_grad_1_rhs; std::vector<HloInstruction*> operands = { rhs_bmm1_grad_gemm1, lhs_bmm1_grad_gemm2, rhs_bmm2_grad_gemm2, fwd_act, d_output_grad}; if (!is_causal_mask && bias) { operands.push_back(bias); } HloInstruction* fwd_output; for (auto user : fwd_fmha_call->users()) { if (user->opcode() == HloOpcode::kGetTupleElement && user->tuple_index() == 0) { fwd_output = user; } } TF_RET_CHECK(fwd_output != nullptr); TF_RET_CHECK(fwd_output->shape() == d_output_grad->shape()); operands.push_back(fwd_output); *bwd_fmha_config.mutable_bmm1_grad_gemm1_dot_dimension_numbers() = bmm_1_grad_1->dot_dimension_numbers(); *bwd_fmha_config.mutable_bmm1_grad_gemm2_dot_dimension_numbers() = bmm_1_grad_2->dot_dimension_numbers(); *bwd_fmha_config.mutable_bmm2_grad_gemm1_dot_dimension_numbers() = bmm_2_grad_1->dot_dimension_numbers(); *bwd_fmha_config.mutable_bmm2_grad_gemm2_dot_dimension_numbers() = bmm_2_grad_2->dot_dimension_numbers(); *((DynCast<HloDotInstruction>(bmm_1_grad_1)) ->mutable_dot_dimension_numbers()) = orig_bmm1_grad1_config; *((DynCast<HloDotInstruction>(bmm_1_grad_2)) ->mutable_dot_dimension_numbers()) = orig_bmm1_grad2_config; *((DynCast<HloDotInstruction>(bmm_2_grad_1)) ->mutable_dot_dimension_numbers()) = orig_bmm2_grad1_config; *((DynCast<HloDotInstruction>(bmm_2_grad_2)) ->mutable_dot_dimension_numbers()) = orig_bmm2_grad2_config; bwd_fmha_config.set_fmha_scale(fwd_config.fmha_scale()); bwd_fmha_config.set_dropout_rate(fwd_config.dropout_rate()); bwd_fmha_config.set_seed(fwd_config.seed()); bwd_fmha_config.set_mask_type(is_causal_mask ? CudnnfMHABackendConfig::CAUSAL : CudnnfMHABackendConfig::NO_MASK); bwd_fmha_config.set_sliding_window_length(0); *bwd_fmha_config.mutable_intermediate_tensor_shape() = fwd_config.intermediate_tensor_shape(); { auto* algorithm = bwd_fmha_config.mutable_algorithm(); algorithm->set_algo_id(0); algorithm->set_math_type(se::dnn::AlgorithmProto::TENSOR_OP_MATH); std::vector<int64_t> knob_ids = {17, 24}; std::vector<int64_t> knob_vals = {1, 0}; for (int i = 0; i < knob_ids.size(); ++i) { (*algorithm->mutable_tuning_knobs())[knob_ids[i]] = knob_vals[i]; } algorithm->set_is_cudnn_frontend(true); algorithm->mutable_workspace_size()->set_value(0); } std::vector<Shape> output_shapes = { bmm_1_grad_2->shape(), bmm_1_grad_1->shape(), bmm_2_grad_1->shape()}; if (dbias) { std::vector<int64_t> dbias_shape_vector = SpanToVector(dbias->shape().dimensions()); dbias_shape_vector.insert(dbias_shape_vector.begin(), 1); Shape cudnn_dbias_shape = ShapeUtil::MakeShape(dbias->shape().element_type(), dbias_shape_vector); output_shapes.push_back(cudnn_dbias_shape); } output_shapes.push_back(ShapeUtil::MakeShape(U8, {0})); Shape call_shape = ShapeUtil::MakeTupleShape(output_shapes); HloInstruction* fmha_bwd_call = comp->AddInstruction(HloInstruction::CreateCustomCall( call_shape, operands, absl::string_view(bwd_custom_call_name))); GpuBackendConfig bwd_gpu_config; *bwd_gpu_config.mutable_cudnn_fmha_backend_config() = bwd_fmha_config; TF_RETURN_IF_ERROR(fmha_bwd_call->set_backend_config(bwd_gpu_config)); TF_RETURN_IF_ERROR( SetFMHAInstructionName(bmm_1_grad_1->GetModule(), fmha_bwd_call)); TF_RETURN_IF_ERROR(comp->ReplaceWithNewInstruction( bmm_1_grad_2, HloInstruction::CreateGetTupleElement(bmm_1_grad_2->shape(), fmha_bwd_call, 0))); TF_RETURN_IF_ERROR(comp->ReplaceWithNewInstruction( bmm_1_grad_1, HloInstruction::CreateGetTupleElement(bmm_1_grad_1->shape(), fmha_bwd_call, 1))); TF_RETURN_IF_ERROR(comp->ReplaceWithNewInstruction( bmm_2_grad_1, HloInstruction::CreateGetTupleElement(bmm_2_grad_1->shape(), fmha_bwd_call, 2))); if (dbias) { Shape original_shape = dbias->shape(); HloInstruction* dbias_user = dbias->users()[0]; HloInstruction* cudnn_dbias_output = comp->AddInstruction(HloInstruction::CreateGetTupleElement( output_shapes[3], fmha_bwd_call, 3)); HloInstruction* reshape_dbias = comp->AddInstruction( HloInstruction::CreateReshape(original_shape, cudnn_dbias_output)); TF_RETURN_IF_ERROR(dbias_user->ReplaceOperandWith( dbias_user->operand_index(dbias), reshape_dbias)); TF_RETURN_IF_ERROR( comp->ReplaceInstructionWithDifferentShape(dbias, cudnn_dbias_output)); } return true; } absl::Status RestoreFwdGraph( HloComputation* comp, HloInstruction* fwd_fmha_call, HloInstruction* bmm2, HloInstruction* activation, HloInstruction* original_bmm2_producer0, HloInstruction* original_bmm2_producer1, std::vector<HloInstruction*>& original_activation_producers, bool bmm_2_need_canonicalization) { HloInstruction* output_gte = fwd_fmha_call->users()[0]; HloInstruction* activation_gte = fwd_fmha_call->users()[1]; std::string suffix = "fmha_no_match_clone"; HloInstruction* cloned_activation = comp->AddInstruction(activation->CloneWithNewOperands( activation->shape(), original_activation_producers, suffix)); HloInstruction* lhs = activation == original_bmm2_producer0 ? cloned_activation : original_bmm2_producer0; HloInstruction* rhs = activation == original_bmm2_producer0 ? original_bmm2_producer1 : cloned_activation; HloInstruction* cloned_bmm2 = comp->AddInstruction( bmm2->CloneWithNewOperands(bmm2->shape(), {lhs, rhs}, suffix)); if (bmm_2_need_canonicalization) { TF_RET_CHECK(output_gte->users()[0]->opcode() == HloOpcode::kTranspose); TF_RETURN_IF_ERROR( comp->ReplaceInstruction(output_gte->users()[0], cloned_bmm2)); } else { TF_RETURN_IF_ERROR(comp->ReplaceInstruction(output_gte, cloned_bmm2)); } TF_RETURN_IF_ERROR( comp->ReplaceInstruction(activation_gte, cloned_activation)); return absl::OkStatus(); } } absl::StatusOr<bool> CudnnFusedMHARewriter::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool any_changed = false; absl::flat_hash_set<HloInstruction*> matched_bmm1; for (HloComputation* comp : module->MakeNonfusionComputations(execution_threads)) { const DebugOptions& debug_options = comp->parent()->config().debug_options(); const se::dnn::VersionInfo cudnn_version = GetDnnVersionInfoOrDefault(stream_executor_, cudnn_version_); #if !defined(GOOGLE_CUDA) || CUDA_VERSION < 12000 return false; #endif if (!debug_options.xla_gpu_enable_cudnn_fmha() || !IsComputeCapabilityAndCudnnSupported( compute_capability_, cudnn_version, stream_executor::dnn::VersionInfo(9, 0, 0))) { return false; } for (HloInstruction* instr : comp->MakeInstructionPostOrder()) { bool v_transposed = false; bool changed = false; MatchFwdResult matched_result = MatchFwdMHAPatternsForCanonicalization(instr); if (!matched_result.has_match) { continue; } TF_ASSIGN_OR_RETURN( bool is_mha_module_supported, IsMHABlockSupported( matched_result.matched_bmm_1, matched_result.matched_bmm_2, matched_result.need_canonicalization, matched_result.is_training, matched_result.is_causal_mask, matched_result.matched_custom_call_name, debug_options, compute_capability_, cudnn_version)); if (!is_mha_module_supported) continue; HloInstruction* activation = matched_result.need_canonicalization ? matched_result.matched_bmm_2->mutable_operand(1) : matched_result.matched_bmm_2->mutable_operand(0); if (!matched_result.is_training && activation->user_count() > 1) { VLOG(2) << "Activation: " << activation->ToString() << " cannot have more than 1 users in non-training mode. Skipping."; continue; } HloInstruction* original_bmm2_producer0 = matched_result.matched_bmm_2->mutable_operand(0); HloInstruction* original_bmm2_producer1 = matched_result.matched_bmm_2->mutable_operand(1); HloInstruction* original_bmm2 = matched_result.matched_bmm_2; std::vector<HloInstruction*> original_activation_producers; for (HloInstruction* operand : activation->mutable_operands()) { original_activation_producers.push_back(operand); } if (!matched_bmm1.insert(matched_result.matched_bmm_1).second) { continue; } if (matched_result.need_canonicalization) { TF_ASSIGN_OR_RETURN(matched_result.matched_bmm_2, CanonicalizeBatchedGemmForcuDNNFMHA( matched_result.matched_bmm_2, comp)); } TF_ASSIGN_OR_RETURN( HloInstruction * fwd_fmha_call, FuseFwdMultiHeadedAttentionBlock( comp, matched_result.matched_bmm_1, matched_result.matched_bmm_2, matched_result.matched_bias, matched_result.matched_scale, matched_result.matched_reduce_sum, matched_result.matched_softmax_input, matched_result.matched_dropout_rate, matched_result.matched_custom_call_name, compute_capability_, matched_result.is_training, changed, v_transposed, matched_result.is_causal_mask)); any_changed |= changed; if (matched_result.is_training) { MatchBwdResult matched_bwd_result = MatchBwdMHAPatternsForCanonicalization( fwd_fmha_call, matched_result.matched_bmm_1, v_transposed); if (!matched_bwd_result.has_match) { VLOG(2) << "Backward pattern not matching, skipping."; TF_RETURN_IF_ERROR( RestoreFwdGraph(comp, fwd_fmha_call, original_bmm2, activation, original_bmm2_producer0, original_bmm2_producer1, original_activation_producers, matched_result.need_canonicalization)); continue; } if (matched_bwd_result.matched_dbias && !(compute_capability_.IsAtLeastHopper() && cudnn_version >= stream_executor::dnn::VersionInfo(9, 0, 0))) { VLOG(2) << "Flash attention dbias requires cudnn 9.0.0 + hopper."; TF_RETURN_IF_ERROR( RestoreFwdGraph(comp, fwd_fmha_call, original_bmm2, activation, original_bmm2_producer0, original_bmm2_producer1, original_activation_producers, matched_result.need_canonicalization)); continue; } if (matched_bwd_result.bmm_1_grad_1_need_canonicalization) { TF_ASSIGN_OR_RETURN( matched_bwd_result.matched_bmm_1_grad_1, CanonicalizeBatchedGemmForcuDNNFMHA( matched_bwd_result.matched_bmm_1_grad_1, comp)); } if (matched_bwd_result.bmm_1_grad_2_need_canonicalization) { TF_ASSIGN_OR_RETURN( matched_bwd_result.matched_bmm_1_grad_2, CanonicalizeBatchedGemmForcuDNNFMHA( matched_bwd_result.matched_bmm_1_grad_2, comp)); } if (matched_bwd_result.bmm_2_grad_1_need_canonicalization) { TF_ASSIGN_OR_RETURN( matched_bwd_result.matched_bmm_2_grad_1, CanonicalizeBatchedGemmForcuDNNFMHA( matched_bwd_result.matched_bmm_2_grad_1, comp)); } if (matched_bwd_result.bmm_2_grad_2_need_canonicalization) { TF_ASSIGN_OR_RETURN( matched_bwd_result.matched_bmm_2_grad_2, CanonicalizeBatchedGemmForcuDNNFMHA( matched_bwd_result.matched_bmm_2_grad_2, comp)); } TF_ASSIGN_OR_RETURN( changed, FuseBwdMultiHeadedAttentionBlock( comp, matched_bwd_result.matched_bmm_1_grad_1, matched_bwd_result.matched_bmm_1_grad_2, matched_bwd_result.matched_bmm_2_grad_1, matched_bwd_result.matched_bmm_2_grad_2, fwd_fmha_call, matched_bwd_result.matched_dbias, matched_result.matched_bias, matched_bwd_result.matched_custom_call_name)); any_changed |= changed; } } } return any_changed; } } }
#include "xla/service/gpu/transforms/cudnn_fused_mha_rewriter.h" #include <cstddef> #include <memory> #include <optional> #include <utility> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/algorithm/container.h" #include "absl/strings/string_view.h" #include "xla/error_spec.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/algebraic_simplifier.h" #include "xla/service/computation_layout.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/cublas_cudnn.h" #include "xla/service/gpu/transforms/cudnn_fused_mha_transpose_fusion.h" #include "xla/service/hlo_cse.h" #include "xla/service/hlo_dce.h" #include "xla/service/hlo_module_config.h" #include "xla/service/hlo_parser.h" #include "xla/service/hlo_verifier.h" #include "xla/service/layout_normalization.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/service/reshape_decomposer.h" #include "xla/stream_executor/device_description.h" #include "xla/stream_executor/dnn.h" #include "xla/test_helpers.h" #include "xla/tests/hlo_test_base.h" #include "xla/tsl/lib/core/status_test_util.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/statusor.h" #if GOOGLE_CUDA #include "third_party/gpus/cuda/include/cuda.h" #include "third_party/gpus/cudnn/cudnn.h" #endif namespace xla { namespace gpu { namespace { namespace m = xla::match; class CudnnFusedMhaRewriterTestHloTest : public HloTestBase { public: se::CudaComputeCapability GetCudaComputeCapability() { return se::CudaComputeCapability(8, 0); } se::CudaComputeCapability GetRealCudaComputeCapability() { return backend() .default_stream_executor() ->GetDeviceDescription() .cuda_compute_capability(); } se::dnn::VersionInfo GetCudnnVersion() { return se::dnn::VersionInfo(9, 0, 0); } CudnnFusedMhaRewriterTestHloTest() : HloTestBase(false, false, {}) { #if !defined(GOOGLE_CUDA) || CUDA_VERSION < 12000 skip_reason_ = "cuDNN fused MHA requires CUDA 12 or later."; return; #endif } protected: size_t CountFusedAttentionCall(HloModule* module, bool is_backward = false) { return absl::c_count_if(module->entry_computation()->instructions(), [&](const HloInstruction* instr) { if (is_backward) { return IsBwdCustomCallTofMHA(*instr); } else { return IsFwdCustomCallTofMHA(*instr); } }); } DebugOptions GetDebugOptionsForTest() override { auto debug_options = HloTestBase::GetDebugOptionsForTest(); debug_options.set_xla_gpu_enable_cudnn_fmha(true); debug_options.set_xla_gpu_fused_attention_use_cudnn_rng(true); return debug_options; } HloModuleConfig GetModuleConfig() { DebugOptions debug_options = GetDebugOptionsForTest(); HloModuleConfig config_with_fmha; config_with_fmha.set_debug_options(debug_options); return config_with_fmha; } std::optional<absl::string_view> skip_reason_; }; constexpr absl::string_view hlo_BF16Bmm1SoftmaxBmm2Pattern_k_hidden_not_most_minor = R"( HloModule fmha_test, entry_computation_layout={(bf16[16,16,256,64]{3,2,1,0},bf16[16,16,256,64]{3,2,1,0},bf16[16,16,256,64]{3,2,1,0})->bf16[16,16,256,64]{3,2,1,0}} region_0.7 { Arg_0.8 = bf16[] parameter(0) Arg_1.9 = bf16[] parameter(1) ROOT maximum = bf16[] maximum(Arg_0.8, Arg_1.9) } region_1.19 { Arg_0.20 = f32[] parameter(0) Arg_1.21 = f32[] parameter(1) ROOT add = f32[] add(Arg_0.20, Arg_1.21) } ENTRY main.6 { Arg_2.3 = bf16[16,16,256,64]{3,2,1,0} parameter(2) Arg_0.1 = bf16[16,16,256,64]{3,2,1,0} parameter(0) Arg_1.2 = bf16[16,16,256,64]{2,3,1,0} parameter(1) dot.0 = bf16[16,16,256,256]{3,2,1,0} dot(Arg_0.1, Arg_1.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3}, metadata={} constant = bf16[] constant(-inf) reduce.11 = bf16[16,16,256]{2,1,0} reduce(dot.0, constant), dimensions={3}, to_apply=region_0.7 broadcast.3 = bf16[16,16,256,256]{3,2,1,0} broadcast(reduce.11), dimensions={0,1,2} subtract.1 = bf16[16,16,256,256]{3,2,1,0} subtract(dot.0, broadcast.3) exponential.1 = bf16[16,16,256,256]{3,2,1,0} exponential(subtract.1) convert.1 = f32[16,16,256,256]{3,2,1,0} convert(exponential.1) constant.1 = f32[] constant(0) reduce.23 = f32[16,16,256]{2,1,0} reduce(convert.1, constant.1), dimensions={3}, to_apply=region_1.19 convert.2 = bf16[16,16,256]{2,1,0} convert(reduce.23) broadcast.4 = bf16[16,16,256,256]{3,2,1,0} broadcast(convert.2), dimensions={0,1,2} divide = bf16[16,16,256,256]{3,2,1,0} divide(exponential.1, broadcast.4) ROOT dot.1 = bf16[16,16,256,64]{3,2,1,0} dot(divide, Arg_2.3), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}, metadata={} })"; TEST_F(CudnnFusedMhaRewriterTestHloTest, BF16Bmm1SoftmaxBmm2Pattern_bmm1_rhs_contracting_dim_not_most_minor) { if (skip_reason_) GTEST_SKIP() << *skip_reason_; TF_ASSERT_OK_AND_ASSIGN( auto m, ParseAndReturnVerifiedModule( hlo_BF16Bmm1SoftmaxBmm2Pattern_k_hidden_not_most_minor)); CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(), GetCudnnVersion()}; TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&fusedMhaRewriter, m.get())); EXPECT_TRUE(result); const HloInstruction* fmha; SCOPED_TRACE(m->ToString()); EXPECT_THAT( m->entry_computation()->root_instruction(), GmockMatch(m::GetTupleElement( m::CustomCall(&fmha, {kCudnnfMHASoftmaxCallTarget}), 0) .WithShape(BF16, {16, 16, 256, 64}))); TF_ASSERT_OK_AND_ASSIGN(auto gpu_config, fmha->backend_config<GpuBackendConfig>()); const CudnnfMHABackendConfig& config = gpu_config.cudnn_fmha_backend_config(); EXPECT_EQ(config.bmm1_dot_dimension_numbers().rhs_contracting_dimensions()[0], 2); } constexpr absl::string_view hlo_BF16Bmm1SoftmaxBmm2Pattern_q_hidden_not_most_minor = R"( HloModule fmha_test, entry_computation_layout={(bf16[16,16,256,64]{3,2,1,0},bf16[16,16,256,64]{3,2,1,0},bf16[16,16,256,64]{3,2,1,0})->bf16[16,16,256,64]{3,2,1,0}} region_0.7 { Arg_0.8 = bf16[] parameter(0) Arg_1.9 = bf16[] parameter(1) ROOT maximum = bf16[] maximum(Arg_0.8, Arg_1.9) } region_1.19 { Arg_0.20 = f32[] parameter(0) Arg_1.21 = f32[] parameter(1) ROOT add = f32[] add(Arg_0.20, Arg_1.21) } ENTRY main.6 { Arg_2.3 = bf16[16,16,256,64]{3,2,1,0} parameter(2) Arg_0.1 = bf16[16,16,256,64]{2,3,1,0} parameter(0) Arg_1.2 = bf16[16,16,256,64]{2,3,1,0} parameter(1) dot.0 = bf16[16,16,256,256]{3,2,1,0} dot(Arg_0.1, Arg_1.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3}, metadata={} constant = bf16[] constant(-inf) reduce.11 = bf16[16,16,256]{2,1,0} reduce(dot.0, constant), dimensions={3}, to_apply=region_0.7 broadcast.3 = bf16[16,16,256,256]{3,2,1,0} broadcast(reduce.11), dimensions={0,1,2} subtract.1 = bf16[16,16,256,256]{3,2,1,0} subtract(dot.0, broadcast.3) exponential.1 = bf16[16,16,256,256]{3,2,1,0} exponential(subtract.1) convert.1 = f32[16,16,256,256]{3,2,1,0} convert(exponential.1) constant.1 = f32[] constant(0) reduce.23 = f32[16,16,256]{2,1,0} reduce(convert.1, constant.1), dimensions={3}, to_apply=region_1.19 convert.2 = bf16[16,16,256]{2,1,0} convert(reduce.23) broadcast.4 = bf16[16,16,256,256]{3,2,1,0} broadcast(convert.2), dimensions={0,1,2} divide = bf16[16,16,256,256]{3,2,1,0} divide(exponential.1, broadcast.4) ROOT dot.1 = bf16[16,16,256,64]{3,2,1,0} dot(divide, Arg_2.3), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}, metadata={} })"; TEST_F(CudnnFusedMhaRewriterTestHloTest, BF16Bmm1SoftmaxBmm2Pattern_bmm1_lhs_contracting_dim_not_most_minor) { if (skip_reason_) GTEST_SKIP() << *skip_reason_; TF_ASSERT_OK_AND_ASSIGN( auto m, ParseAndReturnVerifiedModule( hlo_BF16Bmm1SoftmaxBmm2Pattern_q_hidden_not_most_minor)); CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(), GetCudnnVersion()}; TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&fusedMhaRewriter, m.get())); EXPECT_TRUE(result); const HloInstruction* fmha; SCOPED_TRACE(m->ToString()); EXPECT_THAT( m->entry_computation()->root_instruction(), GmockMatch(m::GetTupleElement( m::CustomCall(&fmha, {kCudnnfMHASoftmaxCallTarget}), 0) .WithShape(BF16, {16, 16, 256, 64}))); TF_ASSERT_OK_AND_ASSIGN(auto gpu_config, fmha->backend_config<GpuBackendConfig>()); const CudnnfMHABackendConfig& config = gpu_config.cudnn_fmha_backend_config(); EXPECT_EQ(config.bmm1_dot_dimension_numbers().lhs_contracting_dimensions()[0], 2); EXPECT_EQ(config.bmm1_dot_dimension_numbers().rhs_contracting_dimensions()[0], 2); } constexpr absl::string_view hlo_BF16Bmm1SoftmaxBmm2Pattern_v_hidden_dim_not_most_minor = R"( HloModule fmha_test, entry_computation_layout={(bf16[16,16,256,64]{3,2,1,0},bf16[16,16,256,64]{3,2,1,0},bf16[16,16,256,64]{3,2,1,0})->bf16[16,16,256,64]{3,2,1,0}} region_0.7 { Arg_0.8 = bf16[] parameter(0) Arg_1.9 = bf16[] parameter(1) ROOT maximum = bf16[] maximum(Arg_0.8, Arg_1.9) } region_1.19 { Arg_0.20 = f32[] parameter(0) Arg_1.21 = f32[] parameter(1) ROOT add = f32[] add(Arg_0.20, Arg_1.21) } ENTRY main.6 { Arg_2.3 = bf16[16,16,256,64]{2,3,1,0} parameter(2) Arg_0.1 = bf16[16,16,256,64]{2,3,1,0} parameter(0) Arg_1.2 = bf16[16,16,256,64]{2,3,1,0} parameter(1) dot.0 = bf16[16,16,256,256]{3,2,1,0} dot(Arg_0.1, Arg_1.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3}, metadata={} constant = bf16[] constant(-inf) reduce.11 = bf16[16,16,256]{2,1,0} reduce(dot.0, constant), dimensions={3}, to_apply=region_0.7 broadcast.3 = bf16[16,16,256,256]{3,2,1,0} broadcast(reduce.11), dimensions={0,1,2} subtract.1 = bf16[16,16,256,256]{3,2,1,0} subtract(dot.0, broadcast.3) exponential.1 = bf16[16,16,256,256]{3,2,1,0} exponential(subtract.1) convert.1 = f32[16,16,256,256]{3,2,1,0} convert(exponential.1) constant.1 = f32[] constant(0) reduce.23 = f32[16,16,256]{2,1,0} reduce(convert.1, constant.1), dimensions={3}, to_apply=region_1.19 convert.2 = bf16[16,16,256]{2,1,0} convert(reduce.23) broadcast.4 = bf16[16,16,256,256]{3,2,1,0} broadcast(convert.2), dimensions={0,1,2} divide = bf16[16,16,256,256]{3,2,1,0} divide(exponential.1, broadcast.4) ROOT dot.1 = bf16[16,16,256,64]{3,2,1,0} dot(divide, Arg_2.3), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}, metadata={} })"; TEST_F(CudnnFusedMhaRewriterTestHloTest, BF16Bmm1SoftmaxBmm2Pattern_bmm2_non_contracting_dim_not_most_minor) { if (skip_reason_) GTEST_SKIP() << *skip_reason_; TF_ASSERT_OK_AND_ASSIGN( auto m, ParseAndReturnVerifiedModule( hlo_BF16Bmm1SoftmaxBmm2Pattern_v_hidden_dim_not_most_minor)); CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(), GetCudnnVersion()}; TF_ASSERT_OK_AND_ASSIGN(bool result, RunHloPass(&fusedMhaRewriter, m.get())); EXPECT_TRUE(result); const HloInstruction* fmha; SCOPED_TRACE(m->ToString()); EXPECT_THAT( m->entry_computation()->root_instruction(), GmockMatch(m::GetTupleElement( m::CustomCall(&fmha, {kCudnnfMHASoftmaxCallTarget}), 0) .WithShape(BF16, {16, 16, 256, 64}))); TF_ASSERT_OK_AND_ASSIGN(auto gpu_config, fmha->backend_config<GpuBackendConfig>()); const CudnnfMHABackendConfig& config = gpu_config.cudnn_fmha_backend_config(); EXPECT_EQ(config.bmm2_dot_dimension_numbers().lhs_contracting_dimensions()[0], 3); EXPECT_EQ(config.bmm2_dot_dimension_numbers().rhs_contracting_dimensions()[0], 3); } TEST_F(CudnnFusedMhaRewriterTestHloTest, BF16Bmm1CombinedMaskBiasSoftmaxBmm2) { if (skip_reason_) GTEST_SKIP() << *skip_reason_; const char* module_str = R"( HloModule jit__unnamed_wrapped_function_, entry_computation_layout={(bf16[16,256,16,64]{3,2,1,0},bf16[16,256,16,64]{3,2,1,0},bf16[16,256,16,64]{3,2,1,0},bf16[1,16,256,256]{3,2,1,0},pred[16,1,256,256]{3,2,1,0})->bf16[16,256,16,64]{3,2,1,0}} region_0.32.clone { Arg_0.0 = f32[] parameter(0) Arg_1.0 = f32[] parameter(1) ROOT maximum.1 = f32[] maximum(Arg_0.0, Arg_1.0) } region_1.44 { Arg_0.45 = f32[] parameter(0) Arg_1.46 = f32[] parameter(1) ROOT add = f32[] add(Arg_0.45, Arg_1.46) } ENTRY main.61 { Arg_2.3 = bf16[16,256,16,64]{3,2,1,0} parameter(2), sharding={replicated} transpose.5 = bf16[16,16,64,256]{3,2,1,0} transpose(Arg_2.3), dimensions={0,2,3,1} Arg_0.1 = bf16[16,256,16,64]{3,2,1,0} parameter(0), sharding={replicated} transpose.6 = bf16[16,16,256,64]{3,2,1,0} transpose(Arg_0.1), dimensions={0,2,1,3} Arg_1.2 = bf16[16,256,16,64]{3,2,1,0} parameter(1), sharding={replicated} transpose.7 = bf16[16,16,64,256]{3,2,1,0} transpose(Arg_1.2), dimensions={0,2,3,1} Arg_4.5 = pred[16,1,256,256]{3,2,1,0} parameter(4), sharding={replicated} bitcast.35 = pred[16,256,256]{2,1,0} bitcast(Arg_4.5) convert.49 = s32[16,256,256]{2,1,0} convert(bitcast.35) constant.5 = s32[] constant(0) broadcast.10 = s32[16,256,256]{2,1,0} broadcast(constant.5), dimensions={} compare = pred[16,256,256]{2,1,0} compare(convert.49, broadcast.10), direction=GT constant.7 = bf16[] constant(0) broadcast.12 = bf16[16,256,256]{2,1,0} broadcast(constant.7), dimensions={} constant.9 = bf16[] constant(-9.999e+09) broadcast.13 = bf16[16,256,256]{2,1,0} broadcast(constant.9), dimensions={} select = bf16[16,256,256]{2,1,0} select(compare, broadcast.12, broadcast.13) convert.51 = f32[16,256,256]{2,1,0} convert(select) broadcast.14 = f32[16,16,256,256]{3,2,1,0} broadcast(convert.51), dimensions={0,2,3} Arg_3.4 = bf16[1,16,256,256]{3,2,1,0} parameter(3), sharding={replicated} bitcast.52 = bf16[16,256,256]{2,1,0} bitcast(Arg_3.4) convert.52 = f32[16,256,256]{2,1,0} convert(bitcast.52) broadcast.15 = f32[16,16,256,256]{3,2,1,0} broadcast(convert.52), dimensions={1,2,3} add.1 = f32[16,16,256,256]{3,2,1,0} add(broadcast.14, broadcast.15) dot.2 = bf16[16,16,256,256]{3,2,1,0} dot(transpose.6, transpose.7), lhs_contracting_dims={3}, rhs_contracting_dims={2}, lhs_batch_dims={0,1}, rhs_batch_dims={0,1} convert.55 = f32[16,16,256,256]{3,2,1,0} convert(dot.2) add.18 = f32[16,16,256,256]{3,2,1,0} add(convert.55, add.1) constant.11 = f32[] constant(-inf) reduce.36 = f32[16,16,256]{2,1,0} reduce(add.18, constant.11), dimensions={3}, to_apply=region_0.32.clone broadcast.17 = f32[16,16,256,256]{3,2,1,0} broadcast(reduce.36), dimensions={0,1,2} subtract.1 = f32[16,16,256,256]{3,2,1,0} subtract(add.18, broadcast.17) exponential.1 = f32[16,16,256,256]{3,2,1,0} exponential(subtract.1) constant.14 = f32[] constant(0) reduce.48 = f32[16,16,256]{2,1,0} reduce(exponential.1, constant.14), dimensions={3}, to_apply=region_1.44 broadcast.18 = f32[16,16,256,256]{3,2,1,0} broadcast(reduce.48), dimensions={0,1,2} divide = f32[16,16,256,256]{3,2,1,0} divide(exponential.1, broadcast.18) convert.68 = bf16[16,16,256,256]{3,2,1,0} convert(divide) dot.1 = bf16[16,16,64,256]{3,2,1,0} dot(transpose.5, convert.68), lhs_contracting_dims={3}, rhs_contracting_dims={3}, lhs_batch_dims={0,1}, rhs_batch_dims={0,1} ROOT transpose.8 = bf16[16,256,16,64]{3,2,1,0} transpose(dot.1), dimensions={0,3,1,2} } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(), GetCudnnVersion()}; TF_ASSERT_OK(RunHloPass(&fusedMhaRewriter, m.get()).status()); const HloInstruction* fmha; SCOPED_TRACE(m->ToString()); EXPECT_THAT( m->entry_computation()->root_instruction(), GmockMatch( m::Transpose( m::Transpose(m::GetTupleElement( m::CustomCall(&fmha, {kCudnnfMHAScaleBiasSoftmaxCallTarget}), 0))) .WithShape(BF16, {16, 256, 16, 64}))); TF_ASSERT_OK_AND_ASSIGN(auto gpu_config, fmha->backend_config<GpuBackendConfig>()); EXPECT_EQ(fmha->operands().size(), 4); } TEST_F(CudnnFusedMhaRewriterTestHloTest, F16Bmm1UnfusedSoftmaxBmm2) { if (skip_reason_) GTEST_SKIP() << *skip_reason_; const char* module_str = R"( HloModule jit__unnamed_wrapped_function_, entry_computation_layout={(f16[2,6,40,64]{3,2,1,0},f16[2,6,64,40]{3,2,1,0},f16[2,6,40,64]{3,2,1,0})->f16[2,6,40,64]{3,2,1,0}} region_0.7 { Arg_0.8 = f16[] parameter(0) Arg_1.9 = f16[] parameter(1) ROOT maximum = f16[] maximum(Arg_0.8, Arg_1.9) } region_1.19 { Arg_0.20 = f32[] parameter(0) Arg_1.21 = f32[] parameter(1) ROOT add = f32[] add(Arg_0.20, Arg_1.21) } ENTRY main.31 { Arg_0.1 = f16[2,6,40,64]{3,2,1,0} parameter(0), sharding={replicated} Arg_1.2 = f16[2,6,64,40]{3,2,1,0} parameter(1), sharding={replicated} dot = f16[2,6,40,40]{3,2,1,0} dot(Arg_0.1, Arg_1.2), lhs_contracting_dims={3}, rhs_contracting_dims={2}, lhs_batch_dims={0,1}, rhs_batch_dims={0,1} constant = f16[] constant(-inf) reduce.11 = f16[2,6,40]{2,1,0} reduce(dot, constant), dimensions={3}, to_apply=region_0.7 broadcast.3 = f16[2,6,40,40]{3,2,1,0} broadcast(reduce.11), dimensions={0,1,2} subtract.1 = f16[2,6,40,40]{3,2,1,0} subtract(dot, broadcast.3) exponential.1 = f16[2,6,40,40]{3,2,1,0} exponential(subtract.1) convert.1 = f32[2,6,40,40]{3,2,1,0} convert(exponential.1) constant.1 = f32[] constant(0) reduce.23 = f32[2,6,40]{2,1,0} reduce(convert.1, constant.1), dimensions={3}, to_apply=region_1.19 convert.2 = f16[2,6,40]{2,1,0} convert(reduce.23) broadcast.4 = f16[2,6,40,40]{3,2,1,0} broadcast(convert.2), dimensions={0,1,2} divide = f16[2,6,40,40]{3,2,1,0} divide(exponential.1, broadcast.4) Arg_2.3 = f16[2,6,40,64]{3,2,1,0} parameter(2), sharding={replicated} ROOT dot.1 = f16[2,6,40,64]{3,2,1,0} dot(divide, Arg_2.3), lhs_contracting_dims={3}, rhs_contracting_dims={2}, lhs_batch_dims={0,1}, rhs_batch_dims={0,1} })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(), GetCudnnVersion()}; TF_ASSERT_OK(RunHloPass(&fusedMhaRewriter, m.get()).status()); const HloInstruction* fmha; SCOPED_TRACE(m->ToString()); EXPECT_THAT( m->entry_computation()->root_instruction(), GmockMatch(m::GetTupleElement( m::CustomCall(&fmha, {kCudnnfMHASoftmaxCallTarget}), 0) .WithShape(F16, {2, 6, 40, 64}))); TF_ASSERT_OK_AND_ASSIGN(auto gpu_config, fmha->backend_config<GpuBackendConfig>()); const CudnnfMHABackendConfig& config = gpu_config.cudnn_fmha_backend_config(); EXPECT_FLOAT_EQ(config.fmha_scale(), 1.0); EXPECT_FLOAT_EQ(config.dropout_rate(), 0.0); EXPECT_EQ(fmha->operands().size(), 3); } TEST_F(CudnnFusedMhaRewriterTestHloTest, BF16Bmm1ConvertedMaskAddedAfterFirstGemmSoftmaxBmm2) { if (skip_reason_) GTEST_SKIP() << *skip_reason_; const char* module_str = R"( HloModule jit__unnamed_wrapped_function_, entry_computation_layout={(bf16[16,256,16,64]{3,2,1,0},bf16[16,256,16,64]{3,2,1,0},bf16[16,256,16,64]{3,2,1,0},pred[16,1,256,256]{3,2,1,0})->bf16[16,256,16,64]{3,2,1,0}} region_0.27.clone { Arg_0.0 = f32[] parameter(0) Arg_1.0 = f32[] parameter(1) ROOT maximum.1 = f32[] maximum(Arg_0.0, Arg_1.0) } region_1.39 { Arg_0.40 = f32[] parameter(0) Arg_1.41 = f32[] parameter(1) ROOT add = f32[] add(Arg_0.40, Arg_1.41) } ENTRY main.56 { Arg_2.3 = bf16[16,256,16,64]{3,2,1,0} parameter(2), sharding={replicated} transpose.5 = bf16[16,16,64,256]{3,2,1,0} transpose(Arg_2.3), dimensions={0,2,3,1} Arg_0.1 = bf16[16,256,16,64]{3,2,1,0} parameter(0), sharding={replicated} transpose.6 = bf16[16,16,256,64]{3,2,1,0} transpose(Arg_0.1), dimensions={0,2,1,3} Arg_1.2 = bf16[16,256,16,64]{3,2,1,0} parameter(1), sharding={replicated} transpose.7 = bf16[16,16,64,256]{3,2,1,0} transpose(Arg_1.2), dimensions={0,2,3,1} dot = bf16[16,16,256,256]{3,2,1,0} dot(transpose.6, transpose.7), lhs_contracting_dims={3}, rhs_contracting_dims={2}, lhs_batch_dims={0,1}, rhs_batch_dims={0,1} convert.47 = f32[16,16,256,256]{3,2,1,0} convert(dot) Arg_3.4 = pred[16,1,256,256]{3,2,1,0} parameter(3), sharding={replicated} bitcast.37 = pred[16,256,256]{2,1,0} bitcast(Arg_3.4) convert.42 = s32[16,256,256]{2,1,0} convert(bitcast.37) constant.6 = s32[] constant(0) broadcast.9 = s32[16,256,256]{2,1,0} broadcast(constant.6), dimensions={} compare = pred[16,256,256]{2,1,0} compare(convert.42, broadcast.9), direction=GT constant.8 = bf16[] constant(0) broadcast.11 = bf16[16,256,256]{2,1,0} broadcast(constant.8), dimensions={} constant.10 = bf16[] constant(-9.999e+09) broadcast.12 = bf16[16,256,256]{2,1,0} broadcast(constant.10), dimensions={} select = bf16[16,256,256]{2,1,0} select(compare, broadcast.11, broadcast.12) convert.48 = f32[16,256,256]{2,1,0} convert(select) broadcast.14 = f32[16,16,256,256]{3,2,1,0} broadcast(convert.48), dimensions={0,2,3} add.2 = f32[16,16,256,256]{3,2,1,0} add(convert.47, broadcast.14) constant.13 = f32[] constant(-inf) reduce.31 = f32[16,16,256]{2,1,0} reduce(add.2, constant.13), dimensions={3}, to_apply=region_0.27.clone broadcast.16 = f32[16,16,256,256]{3,2,1,0} broadcast(reduce.31), dimensions={0,1,2} subtract.1 = f32[16,16,256,256]{3,2,1,0} subtract(add.2, broadcast.16) exponential.1 = f32[16,16,256,256]{3,2,1,0} exponential(subtract.1) constant.14 = f32[] constant(0) reduce.43 = f32[16,16,256]{2,1,0} reduce(exponential.1, constant.14), dimensions={3}, to_apply=region_1.39 broadcast.17 = f32[16,16,256,256]{3,2,1,0} broadcast(reduce.43), dimensions={0,1,2} divide = f32[16,16,256,256]{3,2,1,0} divide(exponential.1, broadcast.17) convert.63 = bf16[16,16,256,256]{3,2,1,0} convert(divide) dot.1 = bf16[16,16,64,256]{3,2,1,0} dot(transpose.5, convert.63), lhs_contracting_dims={3}, rhs_contracting_dims={3}, lhs_batch_dims={0,1}, rhs_batch_dims={0,1} ROOT transpose.8 = bf16[16,256,16,64]{3,2,1,0} transpose(dot.1), dimensions={0,3,1,2} } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(), GetCudnnVersion()}; TF_ASSERT_OK(RunHloPass(&fusedMhaRewriter, m.get()).status()); const HloInstruction* fmha; SCOPED_TRACE(m->ToString()); EXPECT_THAT( m->entry_computation()->root_instruction(), GmockMatch( m::Transpose( m::Transpose(m::GetTupleElement( m::CustomCall(&fmha, {kCudnnfMHAScaleBiasSoftmaxCallTarget}), 0))) .WithShape(BF16, {16, 256, 16, 64}))); TF_ASSERT_OK_AND_ASSIGN(auto gpu_config, fmha->backend_config<GpuBackendConfig>()); EXPECT_EQ(fmha->operands().size(), 4); } TEST_F(CudnnFusedMhaRewriterTestHloTest, BF16Bmm1Bmm2Pattern_bmm1_contracting_dim_not_equal_64) { if (skip_reason_) GTEST_SKIP() << *skip_reason_; const char* module_str = R"( HloModule fmha_test, entry_computation_layout={(bf16[16,16,256,32]{3,2,1,0},bf16[16,16,256,32]{3,2,1,0},bf16[16,16,256,64]{3,2,1,0})->bf16[16,16,256,64]{3,2,1,0}} ENTRY main.6 { Arg_2.3 = bf16[16,16,256,64]{3,2,1,0} parameter(2) Arg_0.1 = bf16[16,16,256,32]{3,2,1,0} parameter(0) Arg_1.2 = bf16[16,16,256,32]{3,2,1,0} parameter(1) dot.0 = bf16[16,16,256,256]{3,2,1,0} dot(Arg_0.1, Arg_1.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3}, metadata={} ROOT dot.1 = bf16[16,16,256,64]{3,2,1,0} dot(dot.0, Arg_2.3), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}, metadata={} } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(), GetCudnnVersion()}; TF_ASSERT_OK(RunHloPass(&fusedMhaRewriter, m.get()).status()); const HloInstruction* fmha; SCOPED_TRACE(m->ToString()); EXPECT_THAT(m->entry_computation()->root_instruction(), GmockMatch(m::Dot(&fmha, m::Dot(m::Parameter(0), m::Parameter(1)), m::Parameter(2)) .WithShape(BF16, {16, 16, 256, 64}))); } TEST_F(CudnnFusedMhaRewriterTestHloTest, BF16Bmm1Bmm2Pattern_bmm2_rhs_non_contracting_dim_not_equal_64) { if (skip_reason_) GTEST_SKIP() << *skip_reason_; const char* module_str = R"( HloModule fmha_test, entry_computation_layout={(bf16[16,16,256,64]{3,2,1,0},bf16[16,16,256,64]{3,2,1,0},bf16[16,16,256,32]{3,2,1,0})->bf16[16,16,256,32]{3,2,1,0}} ENTRY main.6 { Arg_2.3 = bf16[16,16,256,32]{3,2,1,0} parameter(2) Arg_0.1 = bf16[16,16,256,64]{3,2,1,0} parameter(0) Arg_1.2 = bf16[16,16,256,64]{3,2,1,0} parameter(1) dot.0 = bf16[16,16,256,256]{3,2,1,0} dot(Arg_0.1, Arg_1.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3}, metadata={} ROOT dot.1 = bf16[16,16,256,32]{3,2,1,0} dot(dot.0, Arg_2.3), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2}, metadata={} } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(), GetCudnnVersion()}; TF_ASSERT_OK(RunHloPass(&fusedMhaRewriter, m.get()).status()); const HloInstruction* fmha; SCOPED_TRACE(m->ToString()); EXPECT_THAT(m->entry_computation()->root_instruction(), GmockMatch(m::Dot(&fmha, m::Op(), m::Parameter(2)) .WithShape(BF16, {16, 16, 256, 32}))); } TEST_F(CudnnFusedMhaRewriterTestHloTest, BF16Bmm1Bmm2PatternUncanonicalized_bmm1_contracting_dim_not_equal_64) { if (skip_reason_) GTEST_SKIP() << *skip_reason_; const char* module_str = R"( HloModule fmha_test, entry_computation_layout={(bf16[16,16,256,32]{3,2,1,0},bf16[16,16,256,32]{3,2,1,0},bf16[16,16,256,64]{3,2,1,0})->bf16[16,16,64,256]{3,2,1,0}} ENTRY main.6 { Arg_2.3 = bf16[16,16,256,64]{3,2,1,0} parameter(2) Arg_0.1 = bf16[16,16,256,32]{3,2,1,0} parameter(0) Arg_1.2 = bf16[16,16,256,32]{3,2,1,0} parameter(1) dot.0 = bf16[16,16,256,256]{3,2,1,0} dot(Arg_0.1, Arg_1.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3}, metadata={} ROOT dot.1 = bf16[16,16,64,256]{3,2,1,0} dot(Arg_2.3, dot.0), lhs_batch_dims={0,1}, lhs_contracting_dims={2}, rhs_batch_dims={0,1}, rhs_contracting_dims={3}, metadata={} } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(), GetCudnnVersion()}; TF_ASSERT_OK(RunHloPass(&fusedMhaRewriter, m.get()).status()); const HloInstruction* fmha; SCOPED_TRACE(m->ToString()); EXPECT_THAT(m->entry_computation()->root_instruction(), GmockMatch(m::Dot(&fmha, m::Parameter(2), m::Op()) .WithShape(BF16, {16, 16, 64, 256}))); } TEST_F(CudnnFusedMhaRewriterTestHloTest, BF16Bmm1BiasSoftmaxDropoutBmm2) { if (skip_reason_) GTEST_SKIP() << *skip_reason_; const char* module_str = R"( HloModule jit__unnamed_wrapped_function_, entry_computation_layout={(bf16[16,256,16,64]{3,2,1,0},bf16[16,256,16,64]{3,2,1,0},bf16[16,256,16,64]{3,2,1,0},bf16[1,16,256,256]{3,2,1,0})->bf16[16,256,16,64]{3,2,1,0}} region_0.34 { Arg_0.35 = bf16[] parameter(0) Arg_1.36 = bf16[] parameter(1) ROOT maximum.37 = bf16[] maximum(Arg_0.35, Arg_1.36) } region_1.46 { Arg_0.47 = f32[] parameter(0) Arg_1.48 = f32[] parameter(1) ROOT add.49 = f32[] add(Arg_0.47, Arg_1.48) } ENTRY main.82 { Arg_2.3 = bf16[16,256,16,64]{3,2,1,0} parameter(2), sharding={replicated} copy = bf16[16,256,16,64]{1,3,2,0} copy(Arg_2.3), sharding={replicated} transpose.2 = bf16[16,16,64,256]{3,2,1,0} transpose(copy), dimensions={0,2,3,1} Arg_0.1 = bf16[16,256,16,64]{3,2,1,0} parameter(0), sharding={replicated} copy.1 = bf16[16,256,16,64]{3,1,2,0} copy(Arg_0.1), sharding={replicated} transpose = bf16[16,16,256,64]{3,2,1,0} transpose(copy.1), dimensions={0,2,1,3} Arg_1.2 = bf16[16,256,16,64]{3,2,1,0} parameter(1), sharding={replicated} copy.2 = bf16[16,256,16,64]{1,3,2,0} copy(Arg_1.2), sharding={replicated} transpose.1 = bf16[16,16,64,256]{3,2,1,0} transpose(copy.2), dimensions={0,2,3,1} dot = bf16[16,16,256,256]{3,2,1,0} dot(transpose, transpose.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} Arg_3.4 = bf16[1,16,256,256]{3,2,1,0} parameter(3), sharding={replicated} reshape.31 = bf16[16,256,256]{2,1,0} reshape(Arg_3.4) broadcast.32 = bf16[16,16,256,256]{3,2,1,0} broadcast(reshape.31), dimensions={1,2,3} add.33 = bf16[16,16,256,256]{3,2,1,0} add(dot, broadcast.32) constant.21 = bf16[] constant(-inf) reduce.38 = bf16[16,16,256]{2,1,0} reduce(add.33, constant.21), dimensions={3}, to_apply=region_0.34 broadcast.42 = bf16[16,16,256,256]{3,2,1,0} broadcast(reduce.38), dimensions={0,1,2} subtract.43 = bf16[16,16,256,256]{3,2,1,0} subtract(add.33, broadcast.42) exponential.44 = bf16[16,16,256,256]{3,2,1,0} exponential(subtract.43) convert.45 = f32[16,16,256,256]{3,2,1,0} convert(exponential.44) constant.9 = f32[] constant(0) reduce.50 = f32[16,16,256]{2,1,0} reduce(convert.45, constant.9), dimensions={3}, to_apply=region_1.46 convert.1 = bf16[16,16,256]{2,1,0} convert(reduce.50) broadcast.55 = bf16[16,16,256,256]{3,2,1,0} broadcast(convert.1), dimensions={0,1,2} divide.56 = bf16[16,16,256,256]{3,2,1,0} divide(exponential.44, broadcast.55) constant.18 = u32[1]{0} constant({255383827}) constant.17 = u32[1]{0} constant({267815257}) constant.2 = u32[1]{0} constant({0}) constant.19 = u32[1]{0} constant({3213575472}) custom-call.26 = (u32[1]{0}, u32[1]{0}) custom-call(constant.18, constant.17, constant.2, constant.19), custom_call_target="cu_threefry2x32", operand_layout_constraints={u32[1]{0}, u32[1]{0}, u32[1]{0}, u32[1]{0}}, api_version=API_VERSION_STATUS_RETURNING, backend_config="\001\000\000\000\000\000\000\000" get-tuple-element.27 = u32[1]{0} get-tuple-element(custom-call.26), index=0 reshape.58 = u32[] reshape(get-tuple-element.27) broadcast.62 = u32[32768]{0} broadcast(reshape.58), dimensions={} get-tuple-element.28 = u32[1]{0} get-tuple-element(custom-call.26), index=1 reshape.59 = u32[] reshape(get-tuple-element.28) broadcast.63 = u32[32768]{0} broadcast(reshape.59), dimensions={} iota.57 = u32[65536]{0} iota(), iota_dimension=0 slice.60 = u32[32768]{0} slice(iota.57), slice={[0:32768]} slice.61 = u32[32768]{0} slice(iota.57), slice={[32768:65536]} custom-call.64 = (u32[32768]{0}, u32[32768]{0}) custom-call(broadcast.62, broadcast.63, slice.60, slice.61), custom_call_target="cu_threefry2x32", operand_layout_constraints={u32[32768]{0}, u32[32768]{0}, u32[32768]{0}, u32[32768]{0}}, api_version=API_VERSION_STATUS_RETURNING, backend_config="\000\200\000\000\000\000\000\000" get-tuple-element.65 = u32[32768]{0} get-tuple-element(custom-call.64), index=0 get-tuple-element.66 = u32[32768]{0} get-tuple-element(custom-call.64), index=1 concatenate.67 = u32[65536]{0} concatenate(get-tuple-element.65, get-tuple-element.66), dimensions={0} constant.15 = u32[] constant(9) broadcast.3 = u32[65536]{0} broadcast(constant.15), dimensions={} shift-right-logical.0 = u32[65536]{0} shift-right-logical(concatenate.67, broadcast.3) constant.13 = u32[] constant(1065353216) broadcast.11 = u32[65536]{0} broadcast(constant.13), dimensions={} or.0 = u32[65536]{0} or(shift-right-logical.0, broadcast.11) bitcast-convert.0 = f32[65536]{0} bitcast-convert(or.0) constant.3 = f32[] constant(-1) broadcast.17 = f32[65536]{0} broadcast(constant.3), dimensions={} add.1 = f32[65536]{0} add(bitcast-convert.0, broadcast.17) broadcast.18 = f32[65536]{0} broadcast(constant.9), dimensions={} maximum.0 = f32[65536]{0} maximum(add.1, broadcast.18) constant.7 = f32[] constant(0.9) broadcast.19 = f32[65536]{0} broadcast(constant.7), dimensions={} compare.0 = pred[65536]{0} compare(maximum.0, broadcast.19), direction=LT constant = bf16[] constant(1.109) broadcast.20 = bf16[65536]{0} broadcast(constant), dimensions={} constant.4 = bf16[] constant(0) broadcast.21 = bf16[65536]{0} broadcast(constant.4), dimensions={} select.1 = bf16[65536]{0} select(compare.0, broadcast.20, broadcast.21) reshape.19 = bf16[16,16,256]{2,1,0} reshape(select.1) broadcast.9 = bf16[16,16,256,256]{3,2,1,0} broadcast(reshape.19), dimensions={0,1,3} multiply.79 = bf16[16,16,256,256]{3,2,1,0} multiply(divide.56, broadcast.9) dot.1 = bf16[16,16,64,256]{3,2,1,0} dot(transpose.2, multiply.79), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3} transpose.81 = bf16[16,256,16,64]{1,3,2,0} transpose(dot.1), dimensions={0,3,1,2} ROOT copy.3 = bf16[16,256,16,64]{3,2,1,0} copy(transpose.81) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(), GetCudnnVersion()}; TF_ASSERT_OK(RunHloPass(&fusedMhaRewriter, m.get()).status()); const HloInstruction* fmha; SCOPED_TRACE(m->ToString()); EXPECT_THAT( m->entry_computation()->root_instruction(), GmockMatch( m::Copy(m::Transpose(m::Transpose(m::GetTupleElement( m::CustomCall( &fmha, {kCudnnfMHAScaleBiasSoftmaxDropoutCallTarget}), 0)))) .WithShape(BF16, {16, 256, 16, 64}))); TF_ASSERT_OK_AND_ASSIGN(auto gpu_config, fmha->backend_config<GpuBackendConfig>()); const CudnnfMHABackendConfig& config = gpu_config.cudnn_fmha_backend_config(); EXPECT_EQ(fmha->operands().size(), 4); EXPECT_NEAR(config.dropout_rate(), 0.1, 1e-2); } TEST_F(CudnnFusedMhaRewriterTestHloTest, BF16Bmm1ScaleBiasSoftmaxDropoutForm2Bmm2) { if (skip_reason_) GTEST_SKIP() << *skip_reason_; const char* module_str = R"( HloModule jit__unnamed_wrapped_function_, entry_computation_layout={(bf16[32,40,60,64]{3,2,1,0},bf16[32,40,60,64]{3,2,1,0},bf16[32,40,60,64]{3,2,1,0})->bf16[32,40,60,64]{3,2,1,0}}, allow_spmd_sharding_propagation_to_output={true} region_0.29 { Arg_0.30 = bf16[] parameter(0) Arg_1.31 = bf16[] parameter(1) ROOT maximum.32 = bf16[] maximum(Arg_0.30, Arg_1.31) } region_1.41 { Arg_0.42 = f32[] parameter(0) Arg_1.43 = f32[] parameter(1) ROOT add.44 = f32[] add(Arg_0.42, Arg_1.43) } ENTRY main.79 { Arg_2.3 = bf16[32,40,60,64]{3,2,1,0} parameter(2), sharding={replicated} copy = bf16[32,40,60,64]{1,3,2,0} copy(Arg_2.3), sharding={replicated} transpose.2 = bf16[32,60,64,40]{3,2,1,0} transpose(copy), dimensions={0,2,3,1} constant.19 = u32[1]{0} constant({2718843009}) constant.18 = u32[1]{0} constant({1272950319}) constant.2 = u32[1]{0} constant({0}) constant.20 = u32[1]{0} constant({2711844646}) custom-call.54 = (u32[1]{0}, u32[1]{0}) custom-call(constant.19, constant.18, constant.2, constant.20), custom_call_target="cu_threefry2x32", operand_layout_constraints={u32[1]{0}, u32[1]{0}, u32[1]{0}, u32[1]{0}}, api_version=API_VERSION_STATUS_RETURNING, backend_config="\001\000\000\000\000\000\000\000" get-tuple-element.55 = u32[1]{0} get-tuple-element(custom-call.54), index=0 reshape.58 = u32[] reshape(get-tuple-element.55) broadcast.62 = u32[1536000]{0} broadcast(reshape.58), dimensions={} get-tuple-element.56 = u32[1]{0} get-tuple-element(custom-call.54), index=1 reshape.59 = u32[] reshape(get-tuple-element.56) broadcast.63 = u32[1536000]{0} broadcast(reshape.59), dimensions={} iota.57 = u32[3072000]{0} iota(), iota_dimension=0 slice.60 = u32[1536000]{0} slice(iota.57), slice={[0:1536000]} slice.61 = u32[1536000]{0} slice(iota.57), slice={[1536000:3072000]} custom-call.64 = (u32[1536000]{0}, u32[1536000]{0}) custom-call(broadcast.62, broadcast.63, slice.60, slice.61), custom_call_target="cu_threefry2x32", operand_layout_constraints={u32[1536000]{0}, u32[1536000]{0}, u32[1536000]{0}, u32[1536000]{0}}, api_version=API_VERSION_STATUS_RETURNING, backend_config="\000p\027\000\000\000\000\000" get-tuple-element.65 = u32[1536000]{0} get-tuple-element(custom-call.64), index=0 get-tuple-element.66 = u32[1536000]{0} get-tuple-element(custom-call.64), index=1 concatenate.67 = u32[3072000]{0} concatenate(get-tuple-element.65, get-tuple-element.66), dimensions={0} constant.16 = u32[] constant(9) broadcast.2 = u32[3072000]{0} broadcast(constant.16), dimensions={} shift-right-logical.0 = u32[3072000]{0} shift-right-logical(concatenate.67, broadcast.2) constant.14 = u32[] constant(1065353216) broadcast.6 = u32[3072000]{0} broadcast(constant.14), dimensions={} or.0 = u32[3072000]{0} or(shift-right-logical.0, broadcast.6) bitcast-convert.0 = f32[3072000]{0} bitcast-convert(or.0) constant.3 = f32[] constant(-1) broadcast.8 = f32[3072000]{0} broadcast(constant.3), dimensions={} add.1 = f32[3072000]{0} add(bitcast-convert.0, broadcast.8) constant.10 = f32[] constant(0) broadcast.10 = f32[3072000]{0} broadcast(constant.10), dimensions={} maximum.0 = f32[3072000]{0} maximum(add.1, broadcast.10) constant.8 = f32[] constant(0.9) broadcast.12 = f32[3072000]{0} broadcast(constant.8), dimensions={} compare.0 = pred[3072000]{0} compare(maximum.0, broadcast.12), direction=LT reshape.18 = pred[32,60,40,40]{3,2,1,0} reshape(compare.0) Arg_0.1 = bf16[32,40,60,64]{3,2,1,0} parameter(0), sharding={replicated} copy.1 = bf16[32,40,60,64]{3,1,2,0} copy(Arg_0.1), sharding={replicated} transpose = bf16[32,60,40,64]{3,2,1,0} transpose(copy.1), dimensions={0,2,1,3} Arg_1.2 = bf16[32,40,60,64]{3,2,1,0} parameter(1), sharding={replicated} copy.2 = bf16[32,40,60,64]{1,3,2,0} copy(Arg_1.2), sharding={replicated} transpose.1 = bf16[32,60,64,40]{3,2,1,0} transpose(copy.2), dimensions={0,2,3,1} dot = bf16[32,60,40,40]{3,2,1,0} dot(transpose, transpose.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} constant.25 = bf16[] constant(1) broadcast.26 = bf16[32,60,40,40]{3,2,1,0} broadcast(constant.25), dimensions={} add.28 = bf16[32,60,40,40]{3,2,1,0} add(dot, broadcast.26) constant.24 = bf16[] constant(-inf) reduce.33 = bf16[32,60,40]{2,1,0} reduce(add.28, constant.24), dimensions={3}, to_apply=region_0.29 broadcast.37 = bf16[32,60,40,40]{3,2,1,0} broadcast(reduce.33), dimensions={0,1,2} subtract.38 = bf16[32,60,40,40]{3,2,1,0} subtract(add.28, broadcast.37) exponential.39 = bf16[32,60,40,40]{3,2,1,0} exponential(subtract.38) convert.40 = f32[32,60,40,40]{3,2,1,0} convert(exponential.39) reduce.45 = f32[32,60,40]{2,1,0} reduce(convert.40, constant.10), dimensions={3}, to_apply=region_1.41 convert.0 = bf16[32,60,40]{2,1,0} convert(reduce.45) broadcast.50 = bf16[32,60,40,40]{3,2,1,0} broadcast(convert.0), dimensions={0,1,2} divide.51 = bf16[32,60,40,40]{3,2,1,0} divide(exponential.39, broadcast.50) constant = bf16[] constant(1.109) broadcast.1 = bf16[32,60,40,40]{3,2,1,0} broadcast(constant), dimensions={} multiply = bf16[32,60,40,40]{3,2,1,0} multiply(divide.51, broadcast.1) constant.4 = bf16[] constant(0) broadcast.5 = bf16[32,60,40,40]{3,2,1,0} broadcast(constant.4), dimensions={} select.76 = bf16[32,60,40,40]{3,2,1,0} select(reshape.18, multiply, broadcast.5) dot.1 = bf16[32,60,64,40]{3,2,1,0} dot(transpose.2, select.76), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3} transpose.78 = bf16[32,40,60,64]{1,3,2,0} transpose(dot.1), dimensions={0,3,1,2} ROOT copy.3 = bf16[32,40,60,64]{3,2,1,0} copy(transpose.78) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(), GetCudnnVersion()}; TF_ASSERT_OK(RunHloPass(&fusedMhaRewriter, m.get()).status()); const HloInstruction* fmha; SCOPED_TRACE(m->ToString()); EXPECT_THAT( m->entry_computation()->root_instruction(), GmockMatch( m::Copy(m::Transpose(m::Transpose(m::GetTupleElement( m::CustomCall( &fmha, {kCudnnfMHAScaleBiasSoftmaxDropoutCallTarget}), 0)))) .WithShape(BF16, {32, 40, 60, 64}))); TF_ASSERT_OK_AND_ASSIGN(auto gpu_config, fmha->backend_config<GpuBackendConfig>()); const CudnnfMHABackendConfig& config = gpu_config.cudnn_fmha_backend_config(); EXPECT_NEAR(config.dropout_rate(), 0.1, 1e-2); EXPECT_EQ(fmha->operands().size(), 4); } TEST_F(CudnnFusedMhaRewriterTestHloTest, BF16TrainingBmm1Bmm2) { if (skip_reason_) GTEST_SKIP() << *skip_reason_; const char* module_str = R"( HloModule jit__unnamed_wrapped_function_, entry_computation_layout={(bf16[16,256,16,64]{3,2,1,0},bf16[16,256,16,64]{3,2,1,0},bf16[16,256,16,64]{3,2,1,0},bf16[16,256,16,64]{3,2,1,0})->(bf16[16,256,16,64]{3,2,1,0}, bf16[16,256,16,64]{3,2,1,0}, bf16[16,256,16,64]{3,2,1,0}, bf16[16,256,16,64]{3,2,1,0})} ENTRY main.17 { Arg_2.3 = bf16[16,256,16,64]{3,2,1,0} parameter(2), sharding={replicated} copy = bf16[16,256,16,64]{1,3,2,0} copy(Arg_2.3), sharding={replicated} transpose.2 = bf16[16,16,64,256]{3,2,1,0} transpose(copy), dimensions={0,2,3,1} Arg_0.1 = bf16[16,256,16,64]{3,2,1,0} parameter(0), sharding={replicated} copy.1 = bf16[16,256,16,64]{3,1,2,0} copy(Arg_0.1), sharding={replicated} transpose = bf16[16,16,256,64]{3,2,1,0} transpose(copy.1), dimensions={0,2,1,3} Arg_1.2 = bf16[16,256,16,64]{3,2,1,0} parameter(1), sharding={replicated} copy.2 = bf16[16,256,16,64]{1,3,2,0} copy(Arg_1.2), sharding={replicated} transpose.1 = bf16[16,16,64,256]{3,2,1,0} transpose(copy.2), dimensions={0,2,3,1} dot = bf16[16,16,256,256]{3,2,1,0} dot(transpose, transpose.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} dot.1 = bf16[16,16,64,256]{3,2,1,0} dot(transpose.2, dot), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3} transpose.7 = bf16[16,256,16,64]{1,3,2,0} transpose(dot.1), dimensions={0,3,1,2} Arg_3.4 = bf16[16,256,16,64]{3,2,1,0} parameter(3), sharding={replicated} copy.3 = bf16[16,256,16,64]{3,1,2,0} copy(Arg_3.4), sharding={replicated} transpose.4 = bf16[16,16,256,64]{3,2,1,0} transpose(copy.3), dimensions={0,2,1,3} dot.2 = bf16[16,16,256,256]{3,2,1,0} dot(transpose.4, transpose.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} copy.4 = bf16[16,256,16,64]{3,1,2,0} copy(Arg_1.2), sharding={replicated} transpose.12 = bf16[16,16,256,64]{3,2,1,0} transpose(copy.4), dimensions={0,2,1,3} dot.4 = bf16[16,16,256,64]{3,2,1,0} dot(dot.2, transpose.12), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} transpose.15 = bf16[16,256,16,64]{3,1,2,0} transpose(dot.4), dimensions={0,2,1,3} dot.3 = bf16[16,16,256,64]{3,2,1,0} dot(dot.2, transpose), lhs_batch_dims={0,1}, lhs_contracting_dims={2}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} transpose.13 = bf16[16,256,16,64]{3,1,2,0} transpose(dot.3), dimensions={0,2,1,3} copy.5 = bf16[16,256,16,64]{1,3,2,0} copy(Arg_3.4), sharding={replicated} transpose.8 = bf16[16,16,64,256]{3,2,1,0} transpose(copy.5), dimensions={0,2,3,1} dot.10 = bf16[16,16,64,256]{3,2,1,0} dot(transpose.8, dot), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} transpose.11 = bf16[16,256,16,64]{1,3,2,0} transpose(dot.10), dimensions={0,3,1,2} tuple.16 = (bf16[16,256,16,64]{1,3,2,0}, bf16[16,256,16,64]{3,1,2,0}, bf16[16,256,16,64]{3,1,2,0}, bf16[16,256,16,64]{1,3,2,0}) tuple(transpose.7, transpose.15, transpose.13, transpose.11) get-tuple-element = bf16[16,256,16,64]{1,3,2,0} get-tuple-element(tuple.16), index=0 copy.6 = bf16[16,256,16,64]{3,2,1,0} copy(get-tuple-element) get-tuple-element.1 = bf16[16,256,16,64]{3,1,2,0} get-tuple-element(tuple.16), index=1 copy.7 = bf16[16,256,16,64]{3,2,1,0} copy(get-tuple-element.1) get-tuple-element.2 = bf16[16,256,16,64]{3,1,2,0} get-tuple-element(tuple.16), index=2 copy.8 = bf16[16,256,16,64]{3,2,1,0} copy(get-tuple-element.2) get-tuple-element.3 = bf16[16,256,16,64]{1,3,2,0} get-tuple-element(tuple.16), index=3 copy.9 = bf16[16,256,16,64]{3,2,1,0} copy(get-tuple-element.3) ROOT tuple = (bf16[16,256,16,64]{3,2,1,0}, bf16[16,256,16,64]{3,2,1,0}, bf16[16,256,16,64]{3,2,1,0}, bf16[16,256,16,64]{3,2,1,0}) tuple(copy.6, copy.7, copy.8, copy.9) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(), GetCudnnVersion()}; const auto status = RunHloPass(&fusedMhaRewriter, m.get()); const bool changed = status.value(); EXPECT_EQ(changed, false); } TEST_F(CudnnFusedMhaRewriterTestHloTest, BF16MiniT5xTest) { if (skip_reason_) GTEST_SKIP() << *skip_reason_; const char* module_str = R"( HloModule jit__lambda_, entry_computation_layout={(bf16[12,512,32,64]{3,2,1,0},bf16[12,512,2,32,64]{4,3,2,1,0},f32[12,512]{1,0},f32[12,512]{1,0})->(bf16[], bf16[12,512,32,64]{3,2,1,0}, bf16[12,512,2,32,64]{4,3,2,1,0})}, allow_spmd_sharding_propagation_to_output={true,true,true} region_0.51 { Arg_0.52 = bf16[] parameter(0) Arg_1.53 = bf16[] parameter(1) ROOT maximum.54 = bf16[] maximum(Arg_0.52, Arg_1.53) } region_1.63 { Arg_0.64 = f32[] parameter(0) Arg_1.65 = f32[] parameter(1) ROOT add.66 = f32[] add(Arg_0.64, Arg_1.65) } region_3.99 { Arg_0.100 = bf16[] parameter(0) Arg_1.101 = bf16[] parameter(1) ROOT add.102 = bf16[] add(Arg_0.100, Arg_1.101) } ENTRY main.129 { Arg_1.2 = bf16[12,512,2,32,64]{4,3,2,1,0} parameter(1), sharding={replicated} copy = bf16[12,512,2,32,64]{1,4,3,0,2} copy(Arg_1.2), sharding={replicated} slice.42 = bf16[12,512,1,32,64]{1,4,3,0,2} slice(copy), slice={[0:12], [0:512], [1:2], [0:32], [0:64]} reshape.44 = bf16[12,512,32,64]{1,3,2,0} reshape(slice.42) transpose.5 = bf16[12,32,64,512]{3,2,1,0} transpose(reshape.44), dimensions={0,2,3,1} Arg_0.1 = bf16[12,512,32,64]{3,2,1,0} parameter(0), sharding={replicated} copy.1 = bf16[12,512,32,64]{3,1,2,0} copy(Arg_0.1), sharding={replicated} constant.5 = bf16[] constant(0.125) broadcast.6 = bf16[12,512,32,64]{3,1,2,0} broadcast(constant.5), dimensions={} multiply.45 = bf16[12,512,32,64]{3,1,2,0} multiply(copy.1, broadcast.6) transpose = bf16[12,32,512,64]{3,2,1,0} transpose(multiply.45), dimensions={0,2,1,3} copy.2 = bf16[12,512,2,32,64]{1,4,3,0,2} copy(Arg_1.2), sharding={replicated} slice.41 = bf16[12,512,1,32,64]{1,4,3,0,2} slice(copy.2), slice={[0:12], [0:512], [0:1], [0:32], [0:64]} reshape.43 = bf16[12,512,32,64]{1,3,2,0} reshape(slice.41) transpose.1 = bf16[12,32,64,512]{3,2,1,0} transpose(reshape.43), dimensions={0,2,3,1} dot = bf16[12,32,512,512]{3,2,1,0} dot(transpose, transpose.1), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} Arg_2.3 = f32[12,512]{1,0} parameter(2), sharding={replicated} constant.14 = f32[] constant(0) broadcast.19 = f32[12,512]{1,0} broadcast(constant.14), dimensions={} compare.24 = pred[12,512]{1,0} compare(Arg_2.3, broadcast.19), direction=GT broadcast.30 = pred[12,512,512]{2,1,0} broadcast(compare.24), dimensions={0,1} Arg_3.4 = f32[12,512]{1,0} parameter(3), sharding={replicated} compare.25 = pred[12,512]{1,0} compare(Arg_3.4, broadcast.19), direction=GT broadcast.33 = pred[12,512,512]{2,1,0} broadcast(compare.25), dimensions={0,2} and.34 = pred[12,512,512]{2,1,0} and(broadcast.30, broadcast.33) convert.4 = s32[12,512,512]{2,1,0} convert(and.34) constant.16 = s32[] constant(0) broadcast.21 = s32[12,512,512]{2,1,0} broadcast(constant.16), dimensions={} compare.0 = pred[12,512,512]{2,1,0} compare(convert.4, broadcast.21), direction=GT constant.20 = bf16[] constant(0) broadcast.22 = bf16[12,512,512]{2,1,0} broadcast(constant.20), dimensions={} constant.11 = bf16[] constant(-9.999e+09) broadcast.23 = bf16[12,512,512]{2,1,0} broadcast(constant.11), dimensions={} select.0 = bf16[12,512,512]{2,1,0} select(compare.0, broadcast.22, broadcast.23) broadcast.49 = bf16[12,32,512,512]{3,2,1,0} broadcast(select.0), dimensions={0,2,3} add.50 = bf16[12,32,512,512]{3,2,1,0} add(dot, broadcast.49) constant.22 = bf16[] constant(-inf) reduce.55 = bf16[12,32,512]{2,1,0} reduce(add.50, constant.22), dimensions={3}, to_apply=region_0.51 broadcast.59 = bf16[12,32,512,512]{3,2,1,0} broadcast(reduce.55), dimensions={0,1,2} subtract.60 = bf16[12,32,512,512]{3,2,1,0} subtract(add.50, broadcast.59) exponential.61 = bf16[12,32,512,512]{3,2,1,0} exponential(subtract.60) convert.62 = f32[12,32,512,512]{3,2,1,0} convert(exponential.61) reduce.67 = f32[12,32,512]{2,1,0} reduce(convert.62, constant.14), dimensions={3}, to_apply=region_1.63 convert.5 = bf16[12,32,512]{2,1,0} convert(reduce.67) broadcast.72 = bf16[12,32,512,512]{3,2,1,0} broadcast(convert.5), dimensions={0,1,2} divide.73 = bf16[12,32,512,512]{3,2,1,0} divide(exponential.61, broadcast.72) dot.1 = bf16[12,32,64,512]{3,2,1,0} dot(transpose.5, divide.73), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3} convert.6 = f32[12,32,64,512]{3,2,1,0} convert(dot.1) reduce.83 = f32[] reduce(convert.6, constant.14), dimensions={0,3,1,2}, to_apply=region_1.63 convert.84 = bf16[] convert(reduce.83) constant.2 = bf16[] constant(0.0007935) multiply.86 = bf16[] multiply(convert.84, constant.2) broadcast.9 = bf16[12,32,512,64]{3,2,1,0} broadcast(constant.2), dimensions={} dot.2 = bf16[12,32,512,512]{3,2,1,0} dot(broadcast.9, transpose.5), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} divide.109 = bf16[12,32,512,512]{3,2,1,0} divide(dot.2, broadcast.72) constant.10 = bf16[] constant(1) broadcast.24 = bf16[12,32,512]{2,1,0} broadcast(constant.10), dimensions={} multiply.4 = bf16[12,32,512]{2,1,0} multiply(convert.5, convert.5) divide.0 = bf16[12,32,512]{2,1,0} divide(broadcast.24, multiply.4) broadcast.96 = bf16[12,32,512,512]{3,2,1,0} broadcast(divide.0), dimensions={0,1,2} multiply.97 = bf16[12,32,512,512]{3,2,1,0} multiply(dot.2, broadcast.96) multiply.98 = bf16[12,32,512,512]{3,2,1,0} multiply(multiply.97, exponential.61) reduce.103 = bf16[12,32,512]{2,1,0} reduce(multiply.98, constant.20), dimensions={3}, to_apply=region_3.99 negate.0 = bf16[12,32,512]{2,1,0} negate(reduce.103) broadcast.10 = bf16[12,32,512,512]{3,2,1,0} broadcast(negate.0), dimensions={0,1,2} add.118 = bf16[12,32,512,512]{3,2,1,0} add(divide.109, broadcast.10) multiply.119 = bf16[12,32,512,512]{3,2,1,0} multiply(add.118, exponential.61) transpose.9 = bf16[12,32,512,64]{2,3,1,0} transpose(reshape.43), dimensions={0,2,1,3} copy.3 = bf16[12,32,512,64]{3,2,1,0} copy(transpose.9) dot.4 = bf16[12,32,512,64]{3,2,1,0} dot(multiply.119, copy.3), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} broadcast.12 = bf16[12,32,512,64]{3,2,1,0} broadcast(constant.5), dimensions={} multiply.3 = bf16[12,32,512,64]{3,2,1,0} multiply(dot.4, broadcast.12) transpose.11 = bf16[12,512,32,64]{3,1,2,0} transpose(multiply.3), dimensions={0,2,1,3} broadcast.7 = bf16[12,32,64,512]{3,2,1,0} broadcast(constant.2), dimensions={} dot.90 = bf16[12,32,64,512]{3,2,1,0} dot(broadcast.7, divide.73), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} transpose.91 = bf16[12,512,32,64]{1,3,2,0} transpose(dot.90), dimensions={0,3,1,2} reshape.92 = bf16[12,512,1,32,64]{1,4,3,0,2} reshape(transpose.91) pad.93 = bf16[12,512,2,32,64]{1,4,3,0,2} pad(reshape.92, constant.20), padding=0_0x0_0x1_0x0_0x0_0 dot.3 = bf16[12,32,512,64]{3,2,1,0} dot(multiply.119, transpose), lhs_batch_dims={0,1}, lhs_contracting_dims={2}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} copy.4 = bf16[12,32,512,64]{2,3,1,0} copy(dot.3) transpose.121 = bf16[12,512,32,64]{1,3,2,0} transpose(copy.4), dimensions={0,2,1,3} reshape.124 = bf16[12,512,1,32,64]{1,4,3,0,2} reshape(transpose.121) pad.125 = bf16[12,512,2,32,64]{1,4,3,0,2} pad(reshape.124, constant.20), padding=0_0x0_0x0_1x0_0x0_0 add.126 = bf16[12,512,2,32,64]{1,4,3,0,2} add(pad.93, pad.125) tuple.128 = (bf16[], bf16[12,512,32,64]{3,1,2,0}, bf16[12,512,2,32,64]{1,4,3,0,2}) tuple(multiply.86, transpose.11, add.126) get-tuple-element = bf16[] get-tuple-element(tuple.128), index=0 get-tuple-element.1 = bf16[12,512,32,64]{3,1,2,0} get-tuple-element(tuple.128), index=1 copy.5 = bf16[12,512,32,64]{3,2,1,0} copy(get-tuple-element.1) get-tuple-element.2 = bf16[12,512,2,32,64]{1,4,3,0,2} get-tuple-element(tuple.128), index=2 copy.6 = bf16[12,512,2,32,64]{4,3,2,1,0} copy(get-tuple-element.2) ROOT tuple = (bf16[], bf16[12,512,32,64]{3,2,1,0}, bf16[12,512,2,32,64]{4,3,2,1,0}) tuple(get-tuple-element, copy.5, copy.6) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); AlgebraicSimplifierOptions alg_sim_options; alg_sim_options.set_supports_non_canonical_dots(false); alg_sim_options.set_is_layout_sensitive(true); alg_sim_options.set_enable_conv_operand_swap(false); AlgebraicSimplifier alge_simp{alg_sim_options}; ReshapeDecomposer reshape_decomposer; LayoutNormalization layout_normalizer; HloCSE cse{true}; TF_ASSERT_OK(RunHloPass(&reshape_decomposer, m.get()).status()); TF_ASSERT_OK(RunHloPass(&layout_normalizer, m.get()).status()); TF_ASSERT_OK(RunHloPass(&cse, m.get()).status()); TF_ASSERT_OK(RunHloPass(&alge_simp, m.get()).status()); CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(), GetCudnnVersion()}; TF_ASSERT_OK(RunHloPass(&fusedMhaRewriter, m.get()).status()); CudnnFusedMHATransposeFusion fmha_transpose_fusion; HloDCE dce; TF_ASSERT_OK(RunHloPass(&alge_simp, m.get()).status()); TF_ASSERT_OK(RunHloPass(&fmha_transpose_fusion, m.get()).status()); TF_ASSERT_OK(RunHloPass(&dce, m.get()).status()); EXPECT_EQ(CountFusedAttentionCall(m.get()), 1); EXPECT_EQ(CountFusedAttentionCall(m.get(), true), 1); } TEST_F(CudnnFusedMhaRewriterTestHloTest, ActivationHasMoreThan1UserShouldNotLower) { if (skip_reason_) GTEST_SKIP() << *skip_reason_; const char* module_str = R"( HloModule test %region_50.2457 (Arg_0.2458: bf16[], Arg_1.2459: bf16[]) -> bf16[] { %Arg_0.2458 = bf16[] parameter(0) %Arg_1.2459 = bf16[] parameter(1) ROOT %maximum.2 = bf16[] maximum(bf16[] %Arg_0.2458, bf16[] %Arg_1.2459) } %region_36.2316 (Arg_0.2317: f32[], Arg_1.2318: f32[]) -> f32[] { %Arg_0.2317 = f32[] parameter(0) %Arg_1.2318 = f32[] parameter(1) ROOT %add.342 = f32[] add(f32[] %Arg_0.2317, f32[] %Arg_1.2318) } ENTRY main { %transpose.482 = bf16[4,5,64]{2,1,0} parameter(0) %transpose.484 = bf16[4,64,5]{2,1,0} parameter(1) %dot.20 = bf16[4,5,5]{2,1,0} dot(bf16[4,5,64]{2,1,0} %transpose.482, bf16[4,64,5]{2,1,0} %transpose.484), lhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_batch_dims={0}, rhs_contracting_dims={1} %constant.2515 = bf16[] constant(0.125) %broadcast.789 = bf16[4,5,5]{2,1,0} broadcast(bf16[] %constant.2515), dimensions={} %multiply.267 = bf16[4,5,5]{2,1,0} multiply(bf16[4,5,5]{2,1,0} %dot.20, bf16[4,5,5]{2,1,0} %broadcast.789) %constant.287 = f32[] constant(-1) %broadcast.792 = bf16[4,5,5]{2,1,0} parameter(3) %add.348 = bf16[4,5,5]{2,1,0} add(bf16[4,5,5]{2,1,0} %multiply.267, bf16[4,5,5]{2,1,0} %broadcast.792) %constant.2510 = bf16[] constant(-inf) %reduce.2550 = bf16[4,5]{1,0} reduce(bf16[4,5,5]{2,1,0} %add.348, bf16[] %constant.2510), dimensions={2}, to_apply=%region_50.2457 %broadcast.793 = bf16[4,5,5]{2,1,0} broadcast(bf16[4,5]{1,0} %reduce.2550), dimensions={0,1} %subtract.81 = bf16[4,5,5]{2,1,0} subtract(bf16[4,5,5]{2,1,0} %add.348, bf16[4,5,5]{2,1,0} %broadcast.793) %exponential.21 = bf16[4,5,5]{2,1,0} exponential(bf16[4,5,5]{2,1,0} %subtract.81) %convert.180 = f32[4,5,5]{2,1,0} convert(bf16[4,5,5]{2,1,0} %exponential.21) %constant.2509 = f32[] constant(0) %reduce.2558 = f32[4,5]{1,0} reduce(f32[4,5,5]{2,1,0} %convert.180, f32[] %constant.2509), dimensions={2}, to_apply=%region_36.2316 %convert.182 = bf16[4,5]{1,0} convert(f32[4,5]{1,0} %reduce.2558) %broadcast.794 = bf16[4,5,5]{2,1,0} broadcast(bf16[4,5]{1,0} %convert.182), dimensions={0,1} %divide.25 = bf16[4,5,5]{2,1,0} divide(bf16[4,5,5]{2,1,0} %exponential.21, bf16[4,5,5]{2,1,0} %broadcast.794) %transpose.481 = bf16[4,64,5]{2,1,0} parameter(2) %dot.21 = bf16[4,64,5]{2,1,0} dot(bf16[4,64,5]{2,1,0} %transpose.481, bf16[4,5,5]{2,1,0} %divide.25), lhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_batch_dims={0}, rhs_contracting_dims={2} ROOT %tuple.2668 = (bf16[4,5,5]{2,1,0}, bf16[4,64,5]{2,1,0}) tuple(bf16[4,5,5]{2,1,0} %divide.25, bf16[4,64,5]{2,1,0} %dot.21) })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(), GetCudnnVersion()}; TF_ASSERT_OK(RunHloPass(&fusedMhaRewriter, m.get()).status()); HloDCE dce; TF_ASSERT_OK(RunHloPass(&dce, m.get()).status()); HloVerifier verifier(false, true); ASSERT_IS_OK(verifier.Run(m.get()).status()); EXPECT_EQ(CountFusedAttentionCall(m.get()), 0); } TEST_F(CudnnFusedMhaRewriterTestHloTest, F16InvalidTrainingBmm1ScaleBiasMaskSoftmaxBmm2ShouldNotBeLowered) { if (skip_reason_) GTEST_SKIP() << *skip_reason_; const char* module_str = R"( HloModule jit__unnamed_wrapped_function_, entry_computation_layout={(f16[2,6,128,64]{3,2,1,0},f16[2,6,64,128]{3,2,1,0},f16[2,6,128,64]{3,2,1,0},f16[2,6,128,64]{3,2,1,0})->(f16[2,6,128,64]{3,2,1,0}, f16[2,6,128,64]{3,2,1,0}, f16[2,6,64,128]{3,2,1,0}, f16[2,6,128,64]{3,2,1,0})}, allow_spmd_sharding_propagation_to_output={true,true,true,true} region_0.21 { Arg_0.22 = f16[] parameter(0) Arg_1.23 = f16[] parameter(1) ROOT maximum = f16[] maximum(Arg_0.22, Arg_1.23) } region_1.33 { Arg_0.34 = f32[] parameter(0) Arg_1.35 = f32[] parameter(1) ROOT add = f32[] add(Arg_0.34, Arg_1.35) } region_2.55 { Arg_0.56 = f16[] parameter(0) Arg_1.57 = f16[] parameter(1) ROOT add.1 = f16[] add(Arg_0.56, Arg_1.57) } ENTRY main.82 { constant.18 = pred[2,6,128,128]{3,2,1,0} constant({...}) Arg_0.1 = f16[2,6,128,64]{3,2,1,0} parameter(0), sharding={replicated} Arg_1.2 = f16[2,6,64,128]{3,2,1,0} parameter(1), sharding={replicated} dot.17 = f16[2,6,128,128]{3,2,1,0} dot(Arg_0.1, Arg_1.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} constant.22 = f16[] constant(2) broadcast.24 = f16[2,6,128,128]{3,2,1,0} broadcast(constant.22), dimensions={} multiply.2 = f16[2,6,128,128]{3,2,1,0} multiply(dot.17, broadcast.24) constant.19 = f16[] constant(1) broadcast.13 = f16[2,6,128,128]{3,2,1,0} broadcast(constant.19), dimensions={} add.3 = f16[2,6,128,128]{3,2,1,0} add(multiply.2, broadcast.13) constant.21 = f16[] constant(0) broadcast.23 = f16[2,6,128,128]{3,2,1,0} broadcast(constant.21), dimensions={} select.1 = f16[2,6,128,128]{3,2,1,0} select(constant.18, add.3, broadcast.23) constant.15 = f16[] constant(-inf) reduce.25 = f16[2,6,128]{2,1,0} reduce(select.1, constant.15), dimensions={3}, to_apply=region_0.21 broadcast.17 = f16[2,6,128,128]{3,2,1,0} broadcast(reduce.25), dimensions={0,1,2} subtract.1 = f16[2,6,128,128]{3,2,1,0} subtract(select.1, broadcast.17) exponential.1 = f16[2,6,128,128]{3,2,1,0} exponential(subtract.1) convert.5 = f32[2,6,128,128]{3,2,1,0} convert(exponential.1) constant.17 = f32[] constant(0) reduce.37 = f32[2,6,128]{2,1,0} reduce(convert.5, constant.17), dimensions={3}, to_apply=region_1.33 convert.9 = f16[2,6,128]{2,1,0} convert(reduce.37) broadcast.26 = f16[2,6,128,128]{3,2,1,0} broadcast(convert.9), dimensions={0,1,2} divide.5 = f16[2,6,128,128]{3,2,1,0} divide(exponential.1, broadcast.26) Arg_2.3 = f16[2,6,128,64]{3,2,1,0} parameter(2), sharding={replicated} dot.46 = f16[2,6,128,64]{3,2,1,0} dot(divide.5, Arg_2.3), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} Arg_3.4 = f16[2,6,128,64]{3,2,1,0} parameter(3), sharding={replicated} dot.49 = f16[2,6,128,128]{3,2,1,0} dot(Arg_3.4, Arg_2.3), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3} divide.4 = f16[2,6,128,128]{3,2,1,0} divide(dot.49, broadcast.26) broadcast.20 = f16[2,6,128]{2,1,0} broadcast(constant.19), dimensions={} multiply.3 = f16[2,6,128]{2,1,0} multiply(convert.9, convert.9) divide.3 = f16[2,6,128]{2,1,0} divide(broadcast.20, multiply.3) broadcast.21 = f16[2,6,128,128]{3,2,1,0} broadcast(divide.3), dimensions={0,1,2} multiply.4 = f16[2,6,128,128]{3,2,1,0} multiply(dot.49, broadcast.21) multiply.5 = f16[2,6,128,128]{3,2,1,0} multiply(multiply.4, exponential.1) reduce.59 = f16[2,6,128]{2,1,0} reduce(multiply.5, constant.21), dimensions={3}, to_apply=region_2.55 broadcast.25 = f16[2,6,128,128]{3,2,1,0} broadcast(reduce.59), dimensions={0,1,2} add.5 = f16[2,6,128,128]{3,2,1,0} add(divide.4, broadcast.25) multiply.8 = f16[2,6,128,128]{3,2,1,0} multiply(add.5, exponential.1) select.3 = f16[2,6,128,128]{3,2,1,0} select(constant.18, multiply.8, broadcast.23) multiply.9 = f16[2,6,128,128]{3,2,1,0} multiply(select.3, broadcast.24) dot.80 = f16[2,6,128,64]{3,2,1,0} dot(multiply.9, Arg_1.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3} dot = f16[2,6,64,128]{3,2,1,0} dot(Arg_0.1, multiply.9), lhs_batch_dims={0,1}, lhs_contracting_dims={2}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} dot.1 = f16[2,6,128,64]{3,2,1,0} dot(divide.5, Arg_3.4), lhs_batch_dims={0,1}, lhs_contracting_dims={2}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} ROOT tuple.81 = (f16[2,6,128,64]{3,2,1,0}, f16[2,6,128,64]{3,2,1,0}, f16[2,6,64,128]{3,2,1,0}, f16[2,6,128,64]{3,2,1,0}) tuple(dot.46, dot.80, dot, dot.1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(), GetCudnnVersion()}; TF_ASSERT_OK(RunHloPass(&fusedMhaRewriter, m.get()).status()); HloDCE dce; TF_ASSERT_OK(RunHloPass(&dce, m.get()).status()); ComputationLayout computation_layout( m->entry_computation()->ComputeProgramShape()); HloVerifier verifier(false, true); ASSERT_IS_OK(verifier.Run(m.get()).status()); EXPECT_EQ(CountFusedAttentionCall(m.get()), 0); EXPECT_EQ(CountFusedAttentionCall(m.get(), true), 0); } TEST_F(CudnnFusedMhaRewriterTestHloTest, F16InvalidTrainingBmm1ScaleBiasMaskSoftmaxDropoutBmm2ShouldNotLower) { if (skip_reason_) GTEST_SKIP() << *skip_reason_; const char* module_str = R"( HloModule jit__unnamed_wrapped_function_, entry_computation_layout={(f16[2,6,128,64]{3,2,1,0},f16[2,6,64,128]{3,2,1,0},f16[2,6,128,64]{3,2,1,0},f16[2,6,128,64]{3,2,1,0})->(f16[2,6,128,64]{3,2,1,0}, f16[2,6,128,64]{3,2,1,0}, f16[2,6,64,128]{3,2,1,0}, f16[2,6,128,64]{3,2,1,0})}, allow_spmd_sharding_propagation_to_output={true,true,true,true} region_0.38 { Arg_0.39 = f16[] parameter(0) Arg_1.40 = f16[] parameter(1) ROOT maximum.1 = f16[] maximum(Arg_0.39, Arg_1.40) } region_1.50 { Arg_0.51 = f32[] parameter(0) Arg_1.52 = f32[] parameter(1) ROOT add.2 = f32[] add(Arg_0.51, Arg_1.52) } region_2.99 { Arg_0.100 = f16[] parameter(0) Arg_1.101 = f16[] parameter(1) ROOT add.3 = f16[] add(Arg_0.100, Arg_1.101) } ENTRY main.126 { constant.6 = u32[1]{0} constant({2718843009}) constant.8 = u32[1]{0} constant({1272950319}) constant.10 = u32[1]{0} constant({0}) constant.12 = u32[1]{0} constant({2711844646}) custom-call.65 = (u32[1]{0}, u32[1]{0}) custom-call(constant.6, constant.8, constant.10, constant.12), custom_call_target="cu_threefry2x32", operand_layout_constraints={u32[1]{0}, u32[1]{0}, u32[1]{0}, u32[1]{0}}, api_version=API_VERSION_STATUS_RETURNING, backend_config="\001\000\000\000\000\000\000\000" get-tuple-element.66 = u32[1]{0} get-tuple-element(custom-call.65), index=0 bitcast.343 = u32[] bitcast(get-tuple-element.66) broadcast.27 = u32[98304]{0} broadcast(bitcast.343), dimensions={} get-tuple-element.67 = u32[1]{0} get-tuple-element(custom-call.65), index=1 bitcast.344 = u32[] bitcast(get-tuple-element.67) broadcast.28 = u32[98304]{0} broadcast(bitcast.344), dimensions={} iota.68 = u32[196608]{0} iota(), iota_dimension=0 slice = u32[98304]{0} slice(iota.68), slice={[0:98304]} slice.1 = u32[98304]{0} slice(iota.68), slice={[98304:196608]} custom-call.75 = (u32[98304]{0}, u32[98304]{0}) custom-call(broadcast.27, broadcast.28, slice, slice.1), custom_call_target="cu_threefry2x32", operand_layout_constraints={u32[98304]{0}, u32[98304]{0}, u32[98304]{0}, u32[98304]{0}}, api_version=API_VERSION_STATUS_RETURNING, backend_config="\000\200\001\000\000\000\000\000" get-tuple-element.76 = u32[98304]{0} get-tuple-element(custom-call.75), index=0 get-tuple-element.77 = u32[98304]{0} get-tuple-element(custom-call.75), index=1 concatenate.2 = u32[196608]{0} concatenate(get-tuple-element.76, get-tuple-element.77), dimensions={0} constant.56 = u32[] constant(9) broadcast.63 = u32[196608]{0} broadcast(constant.56), dimensions={} shift-right-logical.3 = u32[196608]{0} shift-right-logical(concatenate.2, broadcast.63) constant.57 = u32[] constant(1065353216) broadcast.64 = u32[196608]{0} broadcast(constant.57), dimensions={} or.3 = u32[196608]{0} or(shift-right-logical.3, broadcast.64) bitcast-convert.3 = f32[196608]{0} bitcast-convert(or.3) constant.58 = f32[] constant(-1) broadcast.65 = f32[196608]{0} broadcast(constant.58), dimensions={} add.10 = f32[196608]{0} add(bitcast-convert.3, broadcast.65) constant.48 = f32[] constant(0) broadcast.66 = f32[196608]{0} broadcast(constant.48), dimensions={} maximum.4 = f32[196608]{0} maximum(add.10, broadcast.66) constant.59 = f32[] constant(0.9) broadcast.67 = f32[196608]{0} broadcast(constant.59), dimensions={} compare.3 = pred[196608]{0} compare(maximum.4, broadcast.67), direction=LT bitcast.308 = pred[2,6,128,128]{3,2,1,0} bitcast(compare.3) constant.44 = pred[2,6,128,128]{3,2,1,0} constant({...}) Arg_0.1 = f16[2,6,128,64]{3,2,1,0} parameter(0), sharding={replicated} Arg_1.2 = f16[2,6,64,128]{3,2,1,0} parameter(1), sharding={replicated} dot.34 = f16[2,6,128,128]{3,2,1,0} dot(Arg_0.1, Arg_1.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} constant.55 = f16[] constant(2) broadcast.61 = f16[2,6,128,128]{3,2,1,0} broadcast(constant.55), dimensions={} multiply.8 = f16[2,6,128,128]{3,2,1,0} multiply(dot.34, broadcast.61) constant.52 = f16[] constant(1) broadcast.39 = f16[2,6,128,128]{3,2,1,0} broadcast(constant.52), dimensions={} add.6 = f16[2,6,128,128]{3,2,1,0} add(multiply.8, broadcast.39) constant.54 = f16[] constant(0) broadcast.52 = f16[2,6,128,128]{3,2,1,0} broadcast(constant.54), dimensions={} select.1 = f16[2,6,128,128]{3,2,1,0} select(constant.44, add.6, broadcast.52) constant.41 = f16[] constant(-inf) reduce.42 = f16[2,6,128]{2,1,0} reduce(select.1, constant.41), dimensions={3}, to_apply=region_0.38 broadcast.42 = f16[2,6,128,128]{3,2,1,0} broadcast(reduce.42), dimensions={0,1,2} subtract.1 = f16[2,6,128,128]{3,2,1,0} subtract(select.1, broadcast.42) exponential.1 = f16[2,6,128,128]{3,2,1,0} exponential(subtract.1) convert.5 = f32[2,6,128,128]{3,2,1,0} convert(exponential.1) reduce.54 = f32[2,6,128]{2,1,0} reduce(convert.5, constant.48), dimensions={3}, to_apply=region_1.50 convert.9 = f16[2,6,128]{2,1,0} convert(reduce.54) broadcast.68 = f16[2,6,128,128]{3,2,1,0} broadcast(convert.9), dimensions={0,1,2} divide.5 = f16[2,6,128,128]{3,2,1,0} divide(exponential.1, broadcast.68) constant.60 = f16[] constant(1.1113) broadcast.69 = f16[2,6,128,128]{3,2,1,0} broadcast(constant.60), dimensions={} multiply.20 = f16[2,6,128,128]{3,2,1,0} multiply(divide.5, broadcast.69) select.8 = f16[2,6,128,128]{3,2,1,0} select(bitcast.308, multiply.20, broadcast.52) Arg_2.3 = f16[2,6,128,64]{3,2,1,0} parameter(2), sharding={replicated} dot.88 = f16[2,6,128,64]{3,2,1,0} dot(select.8, Arg_2.3), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} bitcast.248 = pred[2,6,128,128]{3,2,1,0} bitcast(compare.3) Arg_3.4 = f16[2,6,128,64]{3,2,1,0} parameter(3), sharding={replicated} dot.91 = f16[2,6,128,128]{3,2,1,0} dot(Arg_3.4, Arg_2.3), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3} select.6 = f16[2,6,128,128]{3,2,1,0} select(bitcast.248, dot.91, broadcast.52) multiply.17 = f16[2,6,128,128]{3,2,1,0} multiply(select.6, broadcast.69) divide.4 = f16[2,6,128,128]{3,2,1,0} divide(multiply.17, broadcast.68) broadcast.55 = f16[2,6,128]{2,1,0} broadcast(constant.52), dimensions={} multiply.11 = f16[2,6,128]{2,1,0} multiply(convert.9, convert.9) divide.3 = f16[2,6,128]{2,1,0} divide(broadcast.55, multiply.11) broadcast.56 = f16[2,6,128]{2,1,0} broadcast(constant.60), dimensions={} multiply.12 = f16[2,6,128]{2,1,0} multiply(divide.3, broadcast.56) broadcast.58 = f16[2,6,128,128]{3,2,1,0} broadcast(multiply.12), dimensions={0,1,2} multiply.13 = f16[2,6,128,128]{3,2,1,0} multiply(select.6, broadcast.58) multiply.14 = f16[2,6,128,128]{3,2,1,0} multiply(multiply.13, exponential.1) reduce.103 = f16[2,6,128]{2,1,0} reduce(multiply.14, constant.54), dimensions={3}, to_apply=region_2.99 broadcast.62 = f16[2,6,128,128]{3,2,1,0} broadcast(reduce.103), dimensions={0,1,2} add.9 = f16[2,6,128,128]{3,2,1,0} add(divide.4, broadcast.62) multiply.18 = f16[2,6,128,128]{3,2,1,0} multiply(add.9, exponential.1) select.7 = f16[2,6,128,128]{3,2,1,0} select(constant.44, multiply.18, broadcast.52) multiply.19 = f16[2,6,128,128]{3,2,1,0} multiply(select.7, broadcast.61) dot.124 = f16[2,6,128,64]{3,2,1,0} dot(multiply.19, Arg_1.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3} dot = f16[2,6,64,128]{3,2,1,0} dot(Arg_0.1, multiply.19), lhs_batch_dims={0,1}, lhs_contracting_dims={2}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} dot.1 = f16[2,6,128,64]{3,2,1,0} dot(select.8, Arg_3.4), lhs_batch_dims={0,1}, lhs_contracting_dims={2}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} ROOT tuple.125 = (f16[2,6,128,64]{3,2,1,0}, f16[2,6,128,64]{3,2,1,0}, f16[2,6,64,128]{3,2,1,0}, f16[2,6,128,64]{3,2,1,0}) tuple(dot.88, dot.124, dot, dot.1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(), GetCudnnVersion()}; TF_ASSERT_OK(RunHloPass(&fusedMhaRewriter, m.get()).status()); HloDCE dce; TF_ASSERT_OK(RunHloPass(&dce, m.get()).status()); ComputationLayout computation_layout( m->entry_computation()->ComputeProgramShape()); HloVerifier verifier(false, true); ASSERT_IS_OK(verifier.Run(m.get()).status()); EXPECT_EQ(CountFusedAttentionCall(m.get()), 0); EXPECT_EQ(CountFusedAttentionCall(m.get(), true), 0); } TEST_F(CudnnFusedMhaRewriterTestHloTest, F16TrainingBmm1ScaleBiasSoftmaxBmm2QTranspose) { if (skip_reason_) GTEST_SKIP() << *skip_reason_; const char* module_str = R"( HloModule jit__unnamed_wrapped_function_, entry_computation_layout={(f16[2,6,64,128]{3,2,1,0},f16[2,6,64,128]{3,2,1,0},f16[2,6,128,64]{3,2,1,0},f16[2,6,128,64]{3,2,1,0})->(f16[2,6,128,64]{3,2,1,0}, f16[2,6,128,64]{3,2,1,0}, f16[2,6,64,128]{3,2,1,0}, f16[2,6,128,64]{3,2,1,0})}, allow_spmd_sharding_propagation_to_output={true,true,true,true} region_0.21 { Arg_0.22 = f16[] parameter(0) Arg_1.23 = f16[] parameter(1) ROOT maximum = f16[] maximum(Arg_0.22, Arg_1.23) } region_1.33 { Arg_0.34 = f32[] parameter(0) Arg_1.35 = f32[] parameter(1) ROOT add = f32[] add(Arg_0.34, Arg_1.35) } region_2.55 { Arg_0.56 = f16[] parameter(0) Arg_1.57 = f16[] parameter(1) ROOT add.1 = f16[] add(Arg_0.56, Arg_1.57) } ENTRY main.82 { Arg_0.1 = f16[2,6,64,128]{3,2,1,0} parameter(0), sharding={replicated} Arg_1.2 = f16[2,6,64,128]{3,2,1,0} parameter(1), sharding={replicated} dot.17 = f16[2,6,128,128]{3,2,1,0} dot(Arg_0.1, Arg_1.2), lhs_batch_dims={0,1}, lhs_contracting_dims={2}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} constant.22 = f16[] constant(2) broadcast.24 = f16[2,6,128,128]{3,2,1,0} broadcast(constant.22), dimensions={} multiply.2 = f16[2,6,128,128]{3,2,1,0} multiply(dot.17, broadcast.24) constant.19 = f16[] constant(1) broadcast.13 = f16[2,6,128,128]{3,2,1,0} broadcast(constant.19), dimensions={} add.3 = f16[2,6,128,128]{3,2,1,0} add(multiply.2, broadcast.13) constant.21 = f16[] constant(0) constant.15 = f16[] constant(-inf) reduce.25 = f16[2,6,128]{2,1,0} reduce(add.3, constant.15), dimensions={3}, to_apply=region_0.21 broadcast.17 = f16[2,6,128,128]{3,2,1,0} broadcast(reduce.25), dimensions={0,1,2} subtract.1 = f16[2,6,128,128]{3,2,1,0} subtract(add.3, broadcast.17) exponential.1 = f16[2,6,128,128]{3,2,1,0} exponential(subtract.1) convert.5 = f32[2,6,128,128]{3,2,1,0} convert(exponential.1) constant.17 = f32[] constant(0) reduce.37 = f32[2,6,128]{2,1,0} reduce(convert.5, constant.17), dimensions={3}, to_apply=region_1.33 convert.9 = f16[2,6,128]{2,1,0} convert(reduce.37) broadcast.26 = f16[2,6,128,128]{3,2,1,0} broadcast(convert.9), dimensions={0,1,2} divide.5 = f16[2,6,128,128]{3,2,1,0} divide(exponential.1, broadcast.26) Arg_2.3 = f16[2,6,128,64]{3,2,1,0} parameter(2), sharding={replicated} dot.46 = f16[2,6,128,64]{3,2,1,0} dot(divide.5, Arg_2.3), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} Arg_3.4 = f16[2,6,128,64]{3,2,1,0} parameter(3), sharding={replicated} dot.49 = f16[2,6,128,128]{3,2,1,0} dot(Arg_3.4, Arg_2.3), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3} divide.4 = f16[2,6,128,128]{3,2,1,0} divide(dot.49, broadcast.26) broadcast.20 = f16[2,6,128]{2,1,0} broadcast(constant.19), dimensions={} multiply.3 = f16[2,6,128]{2,1,0} multiply(convert.9, convert.9) divide.3 = f16[2,6,128]{2,1,0} divide(broadcast.20, multiply.3) broadcast.21 = f16[2,6,128,128]{3,2,1,0} broadcast(divide.3), dimensions={0,1,2} multiply.4 = f16[2,6,128,128]{3,2,1,0} multiply(dot.49, broadcast.21) multiply.5 = f16[2,6,128,128]{3,2,1,0} multiply(multiply.4, exponential.1) reduce.59 = f16[2,6,128]{2,1,0} reduce(multiply.5, constant.21), dimensions={3}, to_apply=region_2.55 negate.2 = f16[2,6,128]{2,1,0} negate(reduce.59) broadcast.25 = f16[2,6,128,128]{3,2,1,0} broadcast(negate.2), dimensions={0,1,2} add.5 = f16[2,6,128,128]{3,2,1,0} add(divide.4, broadcast.25) multiply.8 = f16[2,6,128,128]{3,2,1,0} multiply(add.5, exponential.1) multiply.9 = f16[2,6,128,128]{3,2,1,0} multiply(multiply.8, broadcast.24) dot.80 = f16[2,6,128,64]{3,2,1,0} dot(multiply.9, Arg_1.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3} dot = f16[2,6,64,128]{3,2,1,0} dot(Arg_0.1, multiply.9), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} dot.1 = f16[2,6,128,64]{3,2,1,0} dot(divide.5, Arg_3.4), lhs_batch_dims={0,1}, lhs_contracting_dims={2}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} ROOT tuple.81 = (f16[2,6,128,64]{3,2,1,0}, f16[2,6,128,64]{3,2,1,0}, f16[2,6,64,128]{3,2,1,0}, f16[2,6,128,64]{3,2,1,0}) tuple(dot.46, dot.80, dot, dot.1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(), GetCudnnVersion()}; TF_ASSERT_OK(RunHloPass(&fusedMhaRewriter, m.get()).status()); HloDCE dce; TF_ASSERT_OK(RunHloPass(&dce, m.get()).status()); ComputationLayout computation_layout( m->entry_computation()->ComputeProgramShape()); const HloInstruction* fmha; SCOPED_TRACE(m->ToString()); EXPECT_THAT( m->entry_computation()->root_instruction(), GmockMatch(m::Tuple( m::GetTupleElement( m::CustomCall(&fmha, {kCudnnfMHAScaleBiasSoftmaxCallTarget}), 0) .WithShape(F16, {2, 6, 128, 64}), m::GetTupleElement( m::CustomCall(&fmha, {kCudnnfMHAScaleBiasSoftmaxBackwardCallTarget}), 0) .WithShape(F16, {2, 6, 128, 64}), m::Transpose( m::GetTupleElement( m::CustomCall({kCudnnfMHAScaleBiasSoftmaxBackwardCallTarget}), 1)) .WithShape(F16, {2, 6, 64, 128}), m::GetTupleElement( m::CustomCall({kCudnnfMHAScaleBiasSoftmaxBackwardCallTarget}), 2) .WithShape(F16, {2, 6, 128, 64})))); TF_ASSERT_OK_AND_ASSIGN(auto gpu_config, fmha->backend_config<GpuBackendConfig>()); const CudnnfMHABackendConfig& config = gpu_config.cudnn_fmha_backend_config(); EXPECT_EQ(fmha->operands().size(), 7); EXPECT_NEAR(config.dropout_rate(), 0, 1e-2); } TEST_F(CudnnFusedMhaRewriterTestHloTest, F16Bmm1UnfusedSoftmaxBmm2IncorrectBmm1NumUsers) { if (skip_reason_) GTEST_SKIP() << *skip_reason_; const char* module_str = R"( HloModule jit__unnamed_wrapped_function_, entry_computation_layout={(f16[2,6,40,64]{3,2,1,0},f16[2,6,64,40]{3,2,1,0},f16[2,6,40,64]{3,2,1,0})->(f16[2,6,40,64]{3,2,1,0}, f16[2,6,40,40]{3,2,1,0})} region_0.7 { Arg_0.8 = f16[] parameter(0) Arg_1.9 = f16[] parameter(1) ROOT maximum = f16[] maximum(Arg_0.8, Arg_1.9) } region_1.19 { Arg_0.20 = f32[] parameter(0) Arg_1.21 = f32[] parameter(1) ROOT add = f32[] add(Arg_0.20, Arg_1.21) } ENTRY main.31 { Arg_0.1 = f16[2,6,40,64]{3,2,1,0} parameter(0), sharding={replicated} Arg_1.2 = f16[2,6,64,40]{3,2,1,0} parameter(1), sharding={replicated} dot = f16[2,6,40,40]{3,2,1,0} dot(Arg_0.1, Arg_1.2), lhs_contracting_dims={3}, rhs_contracting_dims={2}, lhs_batch_dims={0,1}, rhs_batch_dims={0,1} neg.1 = f16[2,6,40,40]{3,2,1,0} negate(dot) constant = f16[] constant(-inf) reduce.11 = f16[2,6,40]{2,1,0} reduce(dot, constant), dimensions={3}, to_apply=region_0.7 broadcast.3 = f16[2,6,40,40]{3,2,1,0} broadcast(reduce.11), dimensions={0,1,2} subtract.1 = f16[2,6,40,40]{3,2,1,0} subtract(dot, broadcast.3) exponential.1 = f16[2,6,40,40]{3,2,1,0} exponential(subtract.1) convert.1 = f32[2,6,40,40]{3,2,1,0} convert(exponential.1) constant.1 = f32[] constant(0) reduce.23 = f32[2,6,40]{2,1,0} reduce(convert.1, constant.1), dimensions={3}, to_apply=region_1.19 convert.2 = f16[2,6,40]{2,1,0} convert(reduce.23) broadcast.4 = f16[2,6,40,40]{3,2,1,0} broadcast(convert.2), dimensions={0,1,2} divide = f16[2,6,40,40]{3,2,1,0} divide(exponential.1, broadcast.4) Arg_2.3 = f16[2,6,40,64]{3,2,1,0} parameter(2), sharding={replicated} dot.1 = f16[2,6,40,64]{3,2,1,0} dot(divide, Arg_2.3), lhs_contracting_dims={3}, rhs_contracting_dims={2}, lhs_batch_dims={0,1}, rhs_batch_dims={0,1} ROOT tuple.81 = (f16[2,6,40,64]{3,2,1,0}, f16[2,6,40,40]{3,2,1,0}) tuple(dot.1, neg.1) })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(), GetCudnnVersion()}; TF_ASSERT_OK(RunHloPass(&fusedMhaRewriter, m.get()).status()); SCOPED_TRACE(m->ToString()); EXPECT_THAT(m->entry_computation()->root_instruction(), GmockMatch(m::Tuple(m::Dot(), m::Negate()))); } TEST_F(CudnnFusedMhaRewriterTestHloTest, F16Bmm1UnfusedSoftmaxBmm2IncorrectSoftmaxNumUsers) { if (skip_reason_) GTEST_SKIP() << *skip_reason_; const char* module_str = R"( HloModule jit__unnamed_wrapped_function_, entry_computation_layout={(f16[2,6,40,64]{3,2,1,0},f16[2,6,64,40]{3,2,1,0},f16[2,6,40,64]{3,2,1,0})->(f16[2,6,40,64]{3,2,1,0}, f16[2,6,40,40]{3,2,1,0})} region_0.7 { Arg_0.8 = f16[] parameter(0) Arg_1.9 = f16[] parameter(1) ROOT maximum = f16[] maximum(Arg_0.8, Arg_1.9) } region_1.19 { Arg_0.20 = f32[] parameter(0) Arg_1.21 = f32[] parameter(1) ROOT add = f32[] add(Arg_0.20, Arg_1.21) } ENTRY main.31 { Arg_0.1 = f16[2,6,40,64]{3,2,1,0} parameter(0), sharding={replicated} Arg_1.2 = f16[2,6,64,40]{3,2,1,0} parameter(1), sharding={replicated} dot = f16[2,6,40,40]{3,2,1,0} dot(Arg_0.1, Arg_1.2), lhs_contracting_dims={3}, rhs_contracting_dims={2}, lhs_batch_dims={0,1}, rhs_batch_dims={0,1} constant = f16[] constant(-inf) reduce.11 = f16[2,6,40]{2,1,0} reduce(dot, constant), dimensions={3}, to_apply=region_0.7 broadcast.3 = f16[2,6,40,40]{3,2,1,0} broadcast(reduce.11), dimensions={0,1,2} subtract.1 = f16[2,6,40,40]{3,2,1,0} subtract(dot, broadcast.3) neg.1 = f16[2,6,40,40]{3,2,1,0} negate(subtract.1) exponential.1 = f16[2,6,40,40]{3,2,1,0} exponential(subtract.1) convert.1 = f32[2,6,40,40]{3,2,1,0} convert(exponential.1) constant.1 = f32[] constant(0) reduce.23 = f32[2,6,40]{2,1,0} reduce(convert.1, constant.1), dimensions={3}, to_apply=region_1.19 convert.2 = f16[2,6,40]{2,1,0} convert(reduce.23) broadcast.4 = f16[2,6,40,40]{3,2,1,0} broadcast(convert.2), dimensions={0,1,2} divide = f16[2,6,40,40]{3,2,1,0} divide(exponential.1, broadcast.4) Arg_2.3 = f16[2,6,40,64]{3,2,1,0} parameter(2), sharding={replicated} dot.1 = f16[2,6,40,64]{3,2,1,0} dot(divide, Arg_2.3), lhs_contracting_dims={3}, rhs_contracting_dims={2}, lhs_batch_dims={0,1}, rhs_batch_dims={0,1} ROOT tuple.81 = (f16[2,6,40,64]{3,2,1,0}, f16[2,6,40,40]{3,2,1,0}) tuple(dot.1, neg.1) })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(), GetCudnnVersion()}; TF_ASSERT_OK(RunHloPass(&fusedMhaRewriter, m.get()).status()); SCOPED_TRACE(m->ToString()); EXPECT_THAT(m->entry_computation()->root_instruction(), GmockMatch(m::Tuple(m::Dot(), m::Negate()))); } TEST_F(CudnnFusedMhaRewriterTestHloTest, F16TrainingBmm1ScaleBiasSoftmaxBmm2IncorrectSoftmaxBwdNumUsers) { if (skip_reason_) GTEST_SKIP() << *skip_reason_; const char* module_str = R"( HloModule jit__unnamed_wrapped_function_, entry_computation_layout={(f16[2,6,64,128]{3,2,1,0},f16[2,6,64,128]{3,2,1,0},f16[2,6,128,64]{3,2,1,0},f16[2,6,128,64]{3,2,1,0})->(f16[2,6,128,64]{3,2,1,0}, f16[2,6,128,64]{3,2,1,0}, f16[2,6,64,128]{3,2,1,0}, f16[2,6,128,64]{3,2,1,0}, f16[2,6,128,128]{3,2,1,0})}, allow_spmd_sharding_propagation_to_output={true,true,true,true} region_0.21 { Arg_0.22 = f16[] parameter(0) Arg_1.23 = f16[] parameter(1) ROOT maximum = f16[] maximum(Arg_0.22, Arg_1.23) } region_1.33 { Arg_0.34 = f32[] parameter(0) Arg_1.35 = f32[] parameter(1) ROOT add = f32[] add(Arg_0.34, Arg_1.35) } region_2.55 { Arg_0.56 = f16[] parameter(0) Arg_1.57 = f16[] parameter(1) ROOT add.1 = f16[] add(Arg_0.56, Arg_1.57) } ENTRY main.82 { Arg_0.1 = f16[2,6,64,128]{3,2,1,0} parameter(0), sharding={replicated} Arg_1.2 = f16[2,6,64,128]{3,2,1,0} parameter(1), sharding={replicated} dot.17 = f16[2,6,128,128]{3,2,1,0} dot(Arg_0.1, Arg_1.2), lhs_batch_dims={0,1}, lhs_contracting_dims={2}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} constant.22 = f16[] constant(2) broadcast.24 = f16[2,6,128,128]{3,2,1,0} broadcast(constant.22), dimensions={} multiply.2 = f16[2,6,128,128]{3,2,1,0} multiply(dot.17, broadcast.24) constant.19 = f16[] constant(1) broadcast.13 = f16[2,6,128,128]{3,2,1,0} broadcast(constant.19), dimensions={} add.3 = f16[2,6,128,128]{3,2,1,0} add(multiply.2, broadcast.13) constant.21 = f16[] constant(0) constant.15 = f16[] constant(-inf) reduce.25 = f16[2,6,128]{2,1,0} reduce(add.3, constant.15), dimensions={3}, to_apply=region_0.21 broadcast.17 = f16[2,6,128,128]{3,2,1,0} broadcast(reduce.25), dimensions={0,1,2} subtract.1 = f16[2,6,128,128]{3,2,1,0} subtract(add.3, broadcast.17) exponential.1 = f16[2,6,128,128]{3,2,1,0} exponential(subtract.1) convert.5 = f32[2,6,128,128]{3,2,1,0} convert(exponential.1) constant.17 = f32[] constant(0) reduce.37 = f32[2,6,128]{2,1,0} reduce(convert.5, constant.17), dimensions={3}, to_apply=region_1.33 convert.9 = f16[2,6,128]{2,1,0} convert(reduce.37) broadcast.26 = f16[2,6,128,128]{3,2,1,0} broadcast(convert.9), dimensions={0,1,2} divide.5 = f16[2,6,128,128]{3,2,1,0} divide(exponential.1, broadcast.26) Arg_2.3 = f16[2,6,128,64]{3,2,1,0} parameter(2), sharding={replicated} dot.46 = f16[2,6,128,64]{3,2,1,0} dot(divide.5, Arg_2.3), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} Arg_3.4 = f16[2,6,128,64]{3,2,1,0} parameter(3), sharding={replicated} dot.49 = f16[2,6,128,128]{3,2,1,0} dot(Arg_3.4, Arg_2.3), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3} divide.4 = f16[2,6,128,128]{3,2,1,0} divide(dot.49, broadcast.26) neg.1 = f16[2,6,128,128]{3,2,1,0} negate(divide.4) broadcast.20 = f16[2,6,128]{2,1,0} broadcast(constant.19), dimensions={} multiply.3 = f16[2,6,128]{2,1,0} multiply(convert.9, convert.9) divide.3 = f16[2,6,128]{2,1,0} divide(broadcast.20, multiply.3) broadcast.21 = f16[2,6,128,128]{3,2,1,0} broadcast(divide.3), dimensions={0,1,2} multiply.4 = f16[2,6,128,128]{3,2,1,0} multiply(dot.49, broadcast.21) multiply.5 = f16[2,6,128,128]{3,2,1,0} multiply(multiply.4, exponential.1) reduce.59 = f16[2,6,128]{2,1,0} reduce(multiply.5, constant.21), dimensions={3}, to_apply=region_2.55 negate.2 = f16[2,6,128]{2,1,0} negate(reduce.59) broadcast.25 = f16[2,6,128,128]{3,2,1,0} broadcast(negate.2), dimensions={0,1,2} add.5 = f16[2,6,128,128]{3,2,1,0} add(divide.4, broadcast.25) multiply.8 = f16[2,6,128,128]{3,2,1,0} multiply(add.5, exponential.1) multiply.9 = f16[2,6,128,128]{3,2,1,0} multiply(multiply.8, broadcast.24) dot.80 = f16[2,6,128,64]{3,2,1,0} dot(multiply.9, Arg_1.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3} dot = f16[2,6,64,128]{3,2,1,0} dot(Arg_0.1, multiply.9), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} dot.1 = f16[2,6,128,64]{3,2,1,0} dot(divide.5, Arg_3.4), lhs_batch_dims={0,1}, lhs_contracting_dims={2}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} ROOT tuple.81 = (f16[2,6,128,64]{3,2,1,0}, f16[2,6,128,64]{3,2,1,0}, f16[2,6,64,128]{3,2,1,0}, f16[2,6,128,64]{3,2,1,0}, f16[2,6,128,128]{3,2,1,0}) tuple(dot.46, dot.80, dot, dot.1, neg.1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(), GetCudnnVersion()}; TF_ASSERT_OK(RunHloPass(&fusedMhaRewriter, m.get()).status()); HloDCE dce; TF_ASSERT_OK(RunHloPass(&dce, m.get()).status()); ComputationLayout computation_layout( m->entry_computation()->ComputeProgramShape()); SCOPED_TRACE(m->ToString()); EXPECT_THAT(m->entry_computation()->root_instruction(), GmockMatch(m::Tuple(m::Dot(), m::Dot(), m::Dot(), m::Dot(), m::Negate()))); } TEST_F(CudnnFusedMhaRewriterTestHloTest, F16Bmm1SoftmaxBmm2IncorrectRank) { if (skip_reason_) GTEST_SKIP() << *skip_reason_; const char* module_str = R"( HloModule reproducer, entry_computation_layout={(f16[1,8,16,5,128]{4,3,2,1,0}, f16[1,8,16,5,128]{4,3,2,1,0}, f16[1,8,16,5,128]{4,3,2,1,0}, f32[128,2,64]{2,1,0}, f32[2,64]{1,0}, f32[128,2,64]{2,1,0}, f32[2,64]{1,0}, f32[128,2,64]{2,1,0}, f32[2,64]{1,0})->f16[8,16,2,5,64]{4,3,2,1,0}} region_0.36 { Arg_0.37 = f16[] parameter(0) Arg_1.38 = f16[] parameter(1) ROOT maximum = f16[] maximum(Arg_0.37, Arg_1.38) } region_1.48 { Arg_0.49 = f32[] parameter(0) Arg_1.50 = f32[] parameter(1) ROOT add.1 = f32[] add(Arg_0.49, Arg_1.50) } ENTRY main { arg2.3 = f16[1,8,16,5,128]{4,3,2,1,0} parameter(2), parameter_replication={false} bitcast.31 = f16[640,128]{1,0} bitcast(arg2.3) arg5.6 = f32[128,2,64]{2,1,0} parameter(5), parameter_replication={false} convert.3 = f16[128,2,64]{2,1,0} convert(arg5.6) bitcast.36 = f16[128,128]{1,0} bitcast(convert.3) dot = f16[640,128]{1,0} dot(bitcast.31, bitcast.36), lhs_contracting_dims={1}, rhs_contracting_dims={0}, frontend_attributes={grad_x="false",grad_y="false"} bitcast.39 = f16[1,8,16,5,2,64]{5,4,3,2,1,0} bitcast(dot) transpose.27 = f16[1,8,16,2,5,64]{5,4,3,2,1,0} transpose(bitcast.39), dimensions={0,1,2,4,3,5}, frontend_attributes={grad_x="false",grad_y="false"} arg6.7 = f32[2,64]{1,0} parameter(6), parameter_replication={false} convert.4 = f16[2,64]{1,0} convert(arg6.7) broadcast.9 = f16[1,8,16,2,5,64]{5,4,3,2,1,0} broadcast(convert.4), dimensions={3,5} add.2 = f16[1,8,16,2,5,64]{5,4,3,2,1,0} add(transpose.27, broadcast.9) bitcast.49 = f16[8,16,2,5,64]{4,3,2,1,0} bitcast(add.2) arg0.1 = f16[1,8,16,5,128]{4,3,2,1,0} parameter(0), parameter_replication={false} bitcast.53 = f16[640,128]{1,0} bitcast(arg0.1) arg3.4 = f32[128,2,64]{2,1,0} parameter(3), parameter_replication={false} convert.5 = f16[128,2,64]{2,1,0} convert(arg3.4) bitcast.58 = f16[128,128]{1,0} bitcast(convert.5) dot.1 = f16[640,128]{1,0} dot(bitcast.53, bitcast.58), lhs_contracting_dims={1}, rhs_contracting_dims={0}, frontend_attributes={grad_x="false",grad_y="false"} bitcast.61 = f16[1,8,16,5,2,64]{5,4,3,2,1,0} bitcast(dot.1) transpose.28 = f16[1,8,16,2,64,5]{5,4,3,2,1,0} transpose(bitcast.61), dimensions={0,1,2,4,5,3}, frontend_attributes={grad_x="false",grad_y="false"} arg4.5 = f32[2,64]{1,0} parameter(4), parameter_replication={false} convert.6 = f16[2,64]{1,0} convert(arg4.5) broadcast.10 = f16[1,8,16,2,64,5]{5,4,3,2,1,0} broadcast(convert.6), dimensions={3,4} add.3 = f16[1,8,16,2,64,5]{5,4,3,2,1,0} add(transpose.28, broadcast.10) constant.29 = f16[] constant(0.125) broadcast.11 = f16[1,8,16,2,64,5]{5,4,3,2,1,0} broadcast(constant.29), dimensions={} multiply = f16[1,8,16,2,64,5]{5,4,3,2,1,0} multiply(add.3, broadcast.11) bitcast.74 = f16[8,16,2,64,5]{4,3,2,1,0} bitcast(multiply) dot.6 = f16[8,16,2,5,5]{4,3,2,1,0} dot(bitcast.49, bitcast.74), lhs_batch_dims={0,1,2}, lhs_contracting_dims={4}, rhs_batch_dims={0,1,2}, rhs_contracting_dims={3}, frontend_attributes={grad_x="false",grad_y="false"} constant.35 = f16[] constant(-inf) reduce.1 = f16[8,16,2,5]{3,2,1,0} reduce(dot.6, constant.35), dimensions={3}, to_apply=region_0.36 broadcast.12 = f16[8,16,2,5,5]{4,3,2,1,0} broadcast(reduce.1), dimensions={0,1,2,4} subtract.2 = f16[8,16,2,5,5]{4,3,2,1,0} subtract(dot.6, broadcast.12) exponential.2 = f16[8,16,2,5,5]{4,3,2,1,0} exponential(subtract.2) convert.7 = f32[8,16,2,5,5]{4,3,2,1,0} convert(exponential.2) constant.34 = f32[] constant(0) reduce.3 = f32[8,16,2,5]{3,2,1,0} reduce(convert.7, constant.34), dimensions={3}, to_apply=region_1.48 convert.8 = f16[8,16,2,5]{3,2,1,0} convert(reduce.3) broadcast.13 = f16[8,16,2,5,5]{4,3,2,1,0} broadcast(convert.8), dimensions={0,1,2,4} divide.2 = f16[8,16,2,5,5]{4,3,2,1,0} divide(exponential.2, broadcast.13) bitcast.98 = f16[8,16,2,5,5]{3,4,2,1,0} bitcast(divide.2) arg1.2 = f16[1,8,16,5,128]{4,3,2,1,0} parameter(1), parameter_replication={false} bitcast.102 = f16[640,128]{1,0} bitcast(arg1.2) arg7.8 = f32[128,2,64]{2,1,0} parameter(7), parameter_replication={false} convert.9 = f16[128,2,64]{2,1,0} convert(arg7.8) bitcast.107 = f16[128,128]{1,0} bitcast(convert.9) dot.3 = f16[640,128]{1,0} dot(bitcast.102, bitcast.107), lhs_contracting_dims={1}, rhs_contracting_dims={0}, frontend_attributes={grad_x="false",grad_y="false"} bitcast.110 = f16[1,8,16,5,2,64]{5,4,3,2,1,0} bitcast(dot.3) transpose.30 = f16[1,8,16,2,5,64]{5,4,3,2,1,0} transpose(bitcast.110), dimensions={0,1,2,4,3,5}, frontend_attributes={grad_x="false",grad_y="false"} arg8.9 = f32[2,64]{1,0} parameter(8), parameter_replication={false} convert.10 = f16[2,64]{1,0} convert(arg8.9) broadcast.14 = f16[1,8,16,2,5,64]{5,4,3,2,1,0} broadcast(convert.10), dimensions={3,5} add.4 = f16[1,8,16,2,5,64]{5,4,3,2,1,0} add(transpose.30, broadcast.14) bitcast.120 = f16[8,16,2,5,64]{4,3,2,1,0} bitcast(add.4) ROOT dot.7 = f16[8,16,2,5,64]{4,3,2,1,0} dot(bitcast.98, bitcast.120), lhs_batch_dims={0,1,2}, lhs_contracting_dims={4}, rhs_batch_dims={0,1,2}, rhs_contracting_dims={3}, frontend_attributes={grad_x="false",grad_y="false"} } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(), GetCudnnVersion()}; const auto status_or = RunHloPass(&fusedMhaRewriter, m.get()); TF_ASSERT_OK(status_or.status()); EXPECT_FALSE(status_or.value()); HloDCE dce; TF_ASSERT_OK(RunHloPass(&dce, m.get()).status()); ComputationLayout computation_layout( m->entry_computation()->ComputeProgramShape()); SCOPED_TRACE(m->ToString()); EXPECT_THAT(m->entry_computation()->root_instruction(), GmockMatch(m::Dot())); } TEST_F(CudnnFusedMhaRewriterTestHloTest, F16TrainingBmm2Grad1IncorrectPattern) { if (skip_reason_) GTEST_SKIP() << *skip_reason_; const char* module_str = R"( HloModule jit__unnamed_wrapped_function_, entry_computation_layout={(f16[2,6,64,128]{3,2,1,0},f16[2,6,64,128]{3,2,1,0},f16[2,6,128,64]{3,2,1,0},f16[2,6,128,64]{3,2,1,0})->(f16[2,6,128,64]{3,2,1,0}, f16[2,6,128,64]{3,2,1,0}, f16[2,6,64,128]{3,2,1,0}, f16[2,6,128,64]{3,2,1,0}, f16[2,6,128,128]{3,2,1,0})}, allow_spmd_sharding_propagation_to_output={true,true,true,true} region_0.21 { Arg_0.22 = f16[] parameter(0) Arg_1.23 = f16[] parameter(1) ROOT maximum = f16[] maximum(Arg_0.22, Arg_1.23) } region_1.33 { Arg_0.34 = f32[] parameter(0) Arg_1.35 = f32[] parameter(1) ROOT add = f32[] add(Arg_0.34, Arg_1.35) } region_2.55 { Arg_0.56 = f16[] parameter(0) Arg_1.57 = f16[] parameter(1) ROOT add.1 = f16[] add(Arg_0.56, Arg_1.57) } ENTRY main.82 { Arg_0.1 = f16[2,6,64,128]{3,2,1,0} parameter(0), sharding={replicated} Arg_1.2 = f16[2,6,64,128]{3,2,1,0} parameter(1), sharding={replicated} dot.17 = f16[2,6,128,128]{3,2,1,0} dot(Arg_0.1, Arg_1.2), lhs_batch_dims={0,1}, lhs_contracting_dims={2}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} constant.22 = f16[] constant(2) broadcast.24 = f16[2,6,128,128]{3,2,1,0} broadcast(constant.22), dimensions={} multiply.2 = f16[2,6,128,128]{3,2,1,0} multiply(dot.17, broadcast.24) constant.19 = f16[] constant(1) broadcast.13 = f16[2,6,128,128]{3,2,1,0} broadcast(constant.19), dimensions={} add.3 = f16[2,6,128,128]{3,2,1,0} add(multiply.2, broadcast.13) constant.21 = f16[] constant(0) constant.15 = f16[] constant(-inf) reduce.25 = f16[2,6,128]{2,1,0} reduce(add.3, constant.15), dimensions={3}, to_apply=region_0.21 broadcast.17 = f16[2,6,128,128]{3,2,1,0} broadcast(reduce.25), dimensions={0,1,2} subtract.1 = f16[2,6,128,128]{3,2,1,0} subtract(add.3, broadcast.17) exponential.1 = f16[2,6,128,128]{3,2,1,0} exponential(subtract.1) convert.5 = f32[2,6,128,128]{3,2,1,0} convert(exponential.1) constant.17 = f32[] constant(0) reduce.37 = f32[2,6,128]{2,1,0} reduce(convert.5, constant.17), dimensions={3}, to_apply=region_1.33 convert.9 = f16[2,6,128]{2,1,0} convert(reduce.37) broadcast.26 = f16[2,6,128,128]{3,2,1,0} broadcast(convert.9), dimensions={0,1,2} divide.5 = f16[2,6,128,128]{3,2,1,0} divide(exponential.1, broadcast.26) Arg_2.3 = f16[2,6,128,64]{3,2,1,0} parameter(2), sharding={replicated} dot.46 = f16[2,6,128,64]{3,2,1,0} dot(divide.5, Arg_2.3), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} Arg_3.4 = f16[2,6,128,64]{3,2,1,0} parameter(3), sharding={replicated} dot.49 = f16[2,6,128,128]{3,2,1,0} dot(Arg_3.4, Arg_2.3), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3} divide.4 = f16[2,6,128,128]{3,2,1,0} divide(dot.49, broadcast.26) broadcast.20 = f16[2,6,128]{2,1,0} broadcast(constant.19), dimensions={} multiply.3 = f16[2,6,128]{2,1,0} multiply(convert.9, convert.9) divide.3 = f16[2,6,128]{2,1,0} divide(broadcast.20, multiply.3) broadcast.21 = f16[2,6,128,128]{3,2,1,0} broadcast(divide.3), dimensions={0,1,2} multiply.4 = f16[2,6,128,128]{3,2,1,0} multiply(dot.49, broadcast.21) multiply.5 = f16[2,6,128,128]{3,2,1,0} multiply(multiply.4, exponential.1) reduce.59 = f16[2,6,128]{2,1,0} reduce(multiply.5, constant.21), dimensions={3}, to_apply=region_2.55 negate.2 = f16[2,6,128]{2,1,0} negate(reduce.59) broadcast.25 = f16[2,6,128,128]{3,2,1,0} broadcast(negate.2), dimensions={0,1,2} add.5 = f16[2,6,128,128]{3,2,1,0} add(divide.4, broadcast.25) multiply.8 = f16[2,6,128,128]{3,2,1,0} multiply(add.5, exponential.1) multiply.9 = f16[2,6,128,128]{3,2,1,0} multiply(multiply.8, broadcast.24) dot.80 = f16[2,6,128,64]{3,2,1,0} dot(multiply.9, Arg_1.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3} dot = f16[2,6,64,128]{3,2,1,0} dot(Arg_0.1, multiply.9), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} neg.1 = f16[2,6,128,128]{3,2,1,0} negate(multiply.9) dot.1 = f16[2,6,128,64]{3,2,1,0} dot(divide.5, Arg_3.4), lhs_batch_dims={0,1}, lhs_contracting_dims={2}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} ROOT tuple.81 = (f16[2,6,128,64]{3,2,1,0}, f16[2,6,128,64]{3,2,1,0}, f16[2,6,64,128]{3,2,1,0}, f16[2,6,128,64]{3,2,1,0}, f16[2,6,128,128]{3,2,1,0}) tuple(dot.46, dot.80, dot, dot.1, neg.1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(), GetCudnnVersion()}; TF_ASSERT_OK(RunHloPass(&fusedMhaRewriter, m.get()).status()); HloDCE dce; TF_ASSERT_OK(RunHloPass(&dce, m.get()).status()); ComputationLayout computation_layout( m->entry_computation()->ComputeProgramShape()); SCOPED_TRACE(m->ToString()); EXPECT_THAT(m->entry_computation()->root_instruction(), GmockMatch(m::Tuple(m::Dot(), m::Dot(), m::Dot(), m::Dot(), m::Negate()))); } TEST_F(CudnnFusedMhaRewriterTestHloTest, FlashAttentionBF16TrainingBmm1CausalMaskSoftmaxBmm2Pattern) { if (skip_reason_) GTEST_SKIP() << *skip_reason_; const char* module_str = R"( HloModule jit__unnamed_wrapped_function_, entry_computation_layout={(bf16[2,6,2048,128]{3,2,1,0},bf16[2,6,128,2048]{3,2,1,0},bf16[2,6,2048,128]{3,2,1,0},bf16[2,6,2048,128]{3,2,1,0})->(bf16[2,6,2048,128]{3,2,1,0}, bf16[2,6,2048,128]{3,2,1,0}, bf16[2,6,128,2048]{3,2,1,0}, bf16[2,6,2048,128]{3,2,1,0})}, allow_spmd_sharding_propagation_to_output={true,true,true,true} region_0.32 { Arg_0.33 = bf16[] parameter(0) Arg_1.34 = bf16[] parameter(1) ROOT maximum = bf16[] maximum(Arg_0.33, Arg_1.34) } region_1.44 { Arg_0.45 = f32[] parameter(0) Arg_1.46 = f32[] parameter(1) ROOT add = f32[] add(Arg_0.45, Arg_1.46) } region_2.66 { Arg_0.67 = bf16[] parameter(0) Arg_1.68 = bf16[] parameter(1) ROOT add.1 = bf16[] add(Arg_0.67, Arg_1.68) } ENTRY main.92 { Arg_0.1 = bf16[2,6,2048,128]{3,2,1,0} parameter(0), sharding={replicated} Arg_1.2 = bf16[2,6,128,2048]{3,2,1,0} parameter(1), sharding={replicated} dot.14 = bf16[2,6,2048,2048]{3,2,1,0} dot(Arg_0.1, Arg_1.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} constant.17 = bf16[] constant(2) broadcast.29 = bf16[2,6,2048,2048]{3,2,1,0} broadcast(constant.17), dimensions={} multiply.2 = bf16[2,6,2048,2048]{3,2,1,0} multiply(dot.14, broadcast.29) iota.2 = s32[2048,2048]{1,0} iota(), iota_dimension=0 iota.5 = s32[2048,2048]{1,0} iota(), iota_dimension=1 compare.1 = pred[2048,2048]{1,0} compare(iota.2, iota.5), direction=LT constant.6 = bf16[] constant(-2.366e+38) broadcast.16 = bf16[2048,2048]{1,0} broadcast(constant.6), dimensions={} constant.16 = bf16[] constant(0) broadcast.17 = bf16[2048,2048]{1,0} broadcast(constant.16), dimensions={} select.2 = bf16[2048,2048]{1,0} select(compare.1, broadcast.16, broadcast.17) broadcast.19 = bf16[2,6,2048,2048]{3,2,1,0} broadcast(select.2), dimensions={2,3} add.3 = bf16[2,6,2048,2048]{3,2,1,0} add(multiply.2, broadcast.19) constant.10 = bf16[] constant(-inf) reduce.36 = bf16[2,6,2048]{2,1,0} reduce(add.3, constant.10), dimensions={3}, to_apply=region_0.32 broadcast.21 = bf16[2,6,2048,2048]{3,2,1,0} broadcast(reduce.36), dimensions={0,1,2} subtract.1 = bf16[2,6,2048,2048]{3,2,1,0} subtract(add.3, broadcast.21) exponential.1 = bf16[2,6,2048,2048]{3,2,1,0} exponential(subtract.1) convert.5 = f32[2,6,2048,2048]{3,2,1,0} convert(exponential.1) constant.14 = f32[] constant(0) reduce.48 = f32[2,6,2048]{2,1,0} reduce(convert.5, constant.14), dimensions={3}, to_apply=region_1.44 convert.9 = bf16[2,6,2048]{2,1,0} convert(reduce.48) broadcast.32 = bf16[2,6,2048,2048]{3,2,1,0} broadcast(convert.9), dimensions={0,1,2} divide.5 = bf16[2,6,2048,2048]{3,2,1,0} divide(exponential.1, broadcast.32) Arg_2.3 = bf16[2,6,2048,128]{3,2,1,0} parameter(2), sharding={replicated} dot.57 = bf16[2,6,2048,128]{3,2,1,0} dot(divide.5, Arg_2.3), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} Arg_3.4 = bf16[2,6,2048,128]{3,2,1,0} parameter(3), sharding={replicated} dot.60 = bf16[2,6,2048,2048]{3,2,1,0} dot(Arg_3.4, Arg_2.3), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3} divide.4 = bf16[2,6,2048,2048]{3,2,1,0} divide(dot.60, broadcast.32) constant.15 = bf16[] constant(1) broadcast.25 = bf16[2,6,2048]{2,1,0} broadcast(constant.15), dimensions={} multiply.3 = bf16[2,6,2048]{2,1,0} multiply(convert.9, convert.9) divide.3 = bf16[2,6,2048]{2,1,0} divide(broadcast.25, multiply.3) broadcast.26 = bf16[2,6,2048,2048]{3,2,1,0} broadcast(divide.3), dimensions={0,1,2} multiply.4 = bf16[2,6,2048,2048]{3,2,1,0} multiply(dot.60, broadcast.26) multiply.5 = bf16[2,6,2048,2048]{3,2,1,0} multiply(multiply.4, exponential.1) reduce.70 = bf16[2,6,2048]{2,1,0} reduce(multiply.5, constant.16), dimensions={3}, to_apply=region_2.66 negate.2 = bf16[2,6,2048]{2,1,0} negate(reduce.70) broadcast.31 = bf16[2,6,2048,2048]{3,2,1,0} broadcast(negate.2), dimensions={0,1,2} add.5 = bf16[2,6,2048,2048]{3,2,1,0} add(divide.4, broadcast.31) multiply.8 = bf16[2,6,2048,2048]{3,2,1,0} multiply(add.5, exponential.1) multiply.9 = bf16[2,6,2048,2048]{3,2,1,0} multiply(multiply.8, broadcast.29) dot.90 = bf16[2,6,2048,128]{3,2,1,0} dot(multiply.9, Arg_1.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3} dot = bf16[2,6,128,2048]{3,2,1,0} dot(Arg_0.1, multiply.9), lhs_batch_dims={0,1}, lhs_contracting_dims={2}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} dot.1 = bf16[2,6,2048,128]{3,2,1,0} dot(divide.5, Arg_3.4), lhs_batch_dims={0,1}, lhs_contracting_dims={2}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} ROOT tuple.91 = (bf16[2,6,2048,128]{3,2,1,0}, bf16[2,6,2048,128]{3,2,1,0}, bf16[2,6,128,2048]{3,2,1,0}, bf16[2,6,2048,128]{3,2,1,0}) tuple(dot.57, dot.90, dot, dot.1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(), GetCudnnVersion()}; TF_ASSERT_OK(RunHloPass(&fusedMhaRewriter, m.get()).status()); HloDCE dce; TF_ASSERT_OK(RunHloPass(&dce, m.get()).status()); ComputationLayout computation_layout( m->entry_computation()->ComputeProgramShape()); const HloInstruction* fwd_fmha; const HloInstruction* bwd_fmha; SCOPED_TRACE(m->ToString()); EXPECT_THAT( m->entry_computation()->root_instruction(), GmockMatch(m::Tuple( m::GetTupleElement( m::CustomCall(&fwd_fmha, {kCudnnfMHASoftmaxCallTarget}), 0) .WithShape(BF16, {2, 6, 2048, 128}), m::GetTupleElement( m::CustomCall(&bwd_fmha, {kCudnnfMHASoftmaxBackwardCallTarget}), 0) .WithShape(BF16, {2, 6, 2048, 128}), m::Transpose( m::GetTupleElement( m::CustomCall({kCudnnfMHASoftmaxBackwardCallTarget}), 1)) .WithShape(BF16, {2, 6, 128, 2048}), m::GetTupleElement( m::CustomCall({kCudnnfMHASoftmaxBackwardCallTarget}), 2) .WithShape(BF16, {2, 6, 2048, 128})))); TF_ASSERT_OK_AND_ASSIGN(auto gpu_config, fwd_fmha->backend_config<GpuBackendConfig>()); const CudnnfMHABackendConfig& config = gpu_config.cudnn_fmha_backend_config(); EXPECT_EQ(fwd_fmha->operands().size(), 3); EXPECT_EQ(bwd_fmha->operands().size(), 6); EXPECT_NEAR(config.dropout_rate(), 0, 1e-2); EXPECT_EQ(config.mask_type(), CudnnfMHABackendConfig::CAUSAL); } TEST_F(CudnnFusedMhaRewriterTestHloTest, FlashAttentionBF16TrainingBmm1BiasSoftmaxBmm2Pattern) { if (skip_reason_) GTEST_SKIP() << *skip_reason_; const char* module_str = R"( HloModule jit__unnamed_wrapped_function_, entry_computation_layout={(bf16[2,6,2048,128]{3,2,1,0},bf16[2,6,128,2048]{3,2,1,0},bf16[2,6,2048,128]{3,2,1,0},bf16[2,6,2048,128]{3,2,1,0},bf16[2,6,2048,2048]{3,2,1,0})->(bf16[2,6,2048,128]{3,2,1,0}, bf16[2,6,2048,128]{3,2,1,0}, bf16[2,6,128,2048]{3,2,1,0}, bf16[2,6,2048,128]{3,2,1,0})}, allow_spmd_sharding_propagation_to_output={true,true,true,true} region_0.32 { Arg_0.33 = bf16[] parameter(0) Arg_1.34 = bf16[] parameter(1) ROOT maximum = bf16[] maximum(Arg_0.33, Arg_1.34) } region_1.44 { Arg_0.45 = f32[] parameter(0) Arg_1.46 = f32[] parameter(1) ROOT add = f32[] add(Arg_0.45, Arg_1.46) } region_2.66 { Arg_0.67 = bf16[] parameter(0) Arg_1.68 = bf16[] parameter(1) ROOT add.1 = bf16[] add(Arg_0.67, Arg_1.68) } ENTRY main.92 { Arg_0.1 = bf16[2,6,2048,128]{3,2,1,0} parameter(0), sharding={replicated} Arg_1.2 = bf16[2,6,128,2048]{3,2,1,0} parameter(1), sharding={replicated} dot.14 = bf16[2,6,2048,2048]{3,2,1,0} dot(Arg_0.1, Arg_1.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} constant.17 = bf16[] constant(2) broadcast.29 = bf16[2,6,2048,2048]{3,2,1,0} broadcast(constant.17), dimensions={} multiply.2 = bf16[2,6,2048,2048]{3,2,1,0} multiply(dot.14, broadcast.29) Arg_4.5 = bf16[2,6,2048,2048]{3,2,1,0} parameter(4), sharding={replicated} add.3 = bf16[2,6,2048,2048]{3,2,1,0} add(multiply.2, Arg_4.5) constant.10 = bf16[] constant(-inf) constant.16 = bf16[] constant(0) reduce.36 = bf16[2,6,2048]{2,1,0} reduce(add.3, constant.10), dimensions={3}, to_apply=region_0.32 broadcast.21 = bf16[2,6,2048,2048]{3,2,1,0} broadcast(reduce.36), dimensions={0,1,2} subtract.1 = bf16[2,6,2048,2048]{3,2,1,0} subtract(add.3, broadcast.21) exponential.1 = bf16[2,6,2048,2048]{3,2,1,0} exponential(subtract.1) convert.5 = f32[2,6,2048,2048]{3,2,1,0} convert(exponential.1) constant.14 = f32[] constant(0) reduce.48 = f32[2,6,2048]{2,1,0} reduce(convert.5, constant.14), dimensions={3}, to_apply=region_1.44 convert.9 = bf16[2,6,2048]{2,1,0} convert(reduce.48) broadcast.32 = bf16[2,6,2048,2048]{3,2,1,0} broadcast(convert.9), dimensions={0,1,2} divide.5 = bf16[2,6,2048,2048]{3,2,1,0} divide(exponential.1, broadcast.32) Arg_2.3 = bf16[2,6,2048,128]{3,2,1,0} parameter(2), sharding={replicated} dot.57 = bf16[2,6,2048,128]{3,2,1,0} dot(divide.5, Arg_2.3), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} Arg_3.4 = bf16[2,6,2048,128]{3,2,1,0} parameter(3), sharding={replicated} dot.60 = bf16[2,6,2048,2048]{3,2,1,0} dot(Arg_3.4, Arg_2.3), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3} divide.4 = bf16[2,6,2048,2048]{3,2,1,0} divide(dot.60, broadcast.32) constant.15 = bf16[] constant(1) broadcast.25 = bf16[2,6,2048]{2,1,0} broadcast(constant.15), dimensions={} multiply.3 = bf16[2,6,2048]{2,1,0} multiply(convert.9, convert.9) divide.3 = bf16[2,6,2048]{2,1,0} divide(broadcast.25, multiply.3) broadcast.26 = bf16[2,6,2048,2048]{3,2,1,0} broadcast(divide.3), dimensions={0,1,2} multiply.4 = bf16[2,6,2048,2048]{3,2,1,0} multiply(dot.60, broadcast.26) multiply.5 = bf16[2,6,2048,2048]{3,2,1,0} multiply(multiply.4, exponential.1) reduce.70 = bf16[2,6,2048]{2,1,0} reduce(multiply.5, constant.16), dimensions={3}, to_apply=region_2.66 negate.2 = bf16[2,6,2048]{2,1,0} negate(reduce.70) broadcast.31 = bf16[2,6,2048,2048]{3,2,1,0} broadcast(negate.2), dimensions={0,1,2} add.5 = bf16[2,6,2048,2048]{3,2,1,0} add(divide.4, broadcast.31) multiply.8 = bf16[2,6,2048,2048]{3,2,1,0} multiply(add.5, exponential.1) multiply.9 = bf16[2,6,2048,2048]{3,2,1,0} multiply(multiply.8, broadcast.29) dot.90 = bf16[2,6,2048,128]{3,2,1,0} dot(multiply.9, Arg_1.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3} dot = bf16[2,6,128,2048]{3,2,1,0} dot(Arg_0.1, multiply.9), lhs_batch_dims={0,1}, lhs_contracting_dims={2}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} dot.1 = bf16[2,6,2048,128]{3,2,1,0} dot(divide.5, Arg_3.4), lhs_batch_dims={0,1}, lhs_contracting_dims={2}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} ROOT tuple.91 = (bf16[2,6,2048,128]{3,2,1,0}, bf16[2,6,2048,128]{3,2,1,0}, bf16[2,6,128,2048]{3,2,1,0}, bf16[2,6,2048,128]{3,2,1,0}) tuple(dot.57, dot.90, dot, dot.1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(), GetCudnnVersion()}; TF_ASSERT_OK(RunHloPass(&fusedMhaRewriter, m.get()).status()); HloDCE dce; TF_ASSERT_OK(RunHloPass(&dce, m.get()).status()); ComputationLayout computation_layout( m->entry_computation()->ComputeProgramShape()); const HloInstruction* fmha; SCOPED_TRACE(m->ToString()); EXPECT_THAT( m->entry_computation()->root_instruction(), GmockMatch(m::Tuple( m::GetTupleElement( m::CustomCall(&fmha, {kCudnnfMHAScaleBiasSoftmaxCallTarget}), 0) .WithShape(BF16, {2, 6, 2048, 128}), m::GetTupleElement( m::CustomCall(&fmha, {kCudnnfMHAScaleBiasSoftmaxBackwardCallTarget}), 0) .WithShape(BF16, {2, 6, 2048, 128}), m::Transpose( m::GetTupleElement( m::CustomCall({kCudnnfMHAScaleBiasSoftmaxBackwardCallTarget}), 1)) .WithShape(BF16, {2, 6, 128, 2048}), m::GetTupleElement( m::CustomCall({kCudnnfMHAScaleBiasSoftmaxBackwardCallTarget}), 2) .WithShape(BF16, {2, 6, 2048, 128})))); TF_ASSERT_OK_AND_ASSIGN(auto gpu_config, fmha->backend_config<GpuBackendConfig>()); const CudnnfMHABackendConfig& config = gpu_config.cudnn_fmha_backend_config(); EXPECT_EQ(fmha->operands().size(), 7); EXPECT_NEAR(config.dropout_rate(), 0, 1e-2); EXPECT_EQ(config.mask_type(), CudnnfMHABackendConfig::NO_MASK); } TEST_F(CudnnFusedMhaRewriterTestHloTest, FlashAttentionBF16TrainingBmm1SoftmaxBmm2Pattern) { if (skip_reason_) GTEST_SKIP() << *skip_reason_; const char* module_str = R"( HloModule jit__unnamed_wrapped_function_, entry_computation_layout={(bf16[2,6,2048,128]{3,2,1,0},bf16[2,6,128,2048]{3,2,1,0},bf16[2,6,2048,128]{3,2,1,0},bf16[2,6,2048,128]{3,2,1,0})->(bf16[2,6,2048,128]{3,2,1,0}, bf16[2,6,2048,128]{3,2,1,0}, bf16[2,6,128,2048]{3,2,1,0}, bf16[2,6,2048,128]{3,2,1,0})}, allow_spmd_sharding_propagation_to_output={true,true,true,true} region_0.32 { Arg_0.33 = bf16[] parameter(0) Arg_1.34 = bf16[] parameter(1) ROOT maximum = bf16[] maximum(Arg_0.33, Arg_1.34) } region_1.44 { Arg_0.45 = f32[] parameter(0) Arg_1.46 = f32[] parameter(1) ROOT add = f32[] add(Arg_0.45, Arg_1.46) } region_2.66 { Arg_0.67 = bf16[] parameter(0) Arg_1.68 = bf16[] parameter(1) ROOT add.1 = bf16[] add(Arg_0.67, Arg_1.68) } ENTRY main.92 { Arg_0.1 = bf16[2,6,2048,128]{3,2,1,0} parameter(0), sharding={replicated} Arg_1.2 = bf16[2,6,128,2048]{3,2,1,0} parameter(1), sharding={replicated} dot.14 = bf16[2,6,2048,2048]{3,2,1,0} dot(Arg_0.1, Arg_1.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} constant.17 = bf16[] constant(2) broadcast.29 = bf16[2,6,2048,2048]{3,2,1,0} broadcast(constant.17), dimensions={} multiply.2 = bf16[2,6,2048,2048]{3,2,1,0} multiply(dot.14, broadcast.29) constant.10 = bf16[] constant(-inf) constant.16 = bf16[] constant(0) reduce.36 = bf16[2,6,2048]{2,1,0} reduce(multiply.2, constant.10), dimensions={3}, to_apply=region_0.32 broadcast.21 = bf16[2,6,2048,2048]{3,2,1,0} broadcast(reduce.36), dimensions={0,1,2} subtract.1 = bf16[2,6,2048,2048]{3,2,1,0} subtract(multiply.2, broadcast.21) exponential.1 = bf16[2,6,2048,2048]{3,2,1,0} exponential(subtract.1) convert.5 = f32[2,6,2048,2048]{3,2,1,0} convert(exponential.1) constant.14 = f32[] constant(0) reduce.48 = f32[2,6,2048]{2,1,0} reduce(convert.5, constant.14), dimensions={3}, to_apply=region_1.44 convert.9 = bf16[2,6,2048]{2,1,0} convert(reduce.48) broadcast.32 = bf16[2,6,2048,2048]{3,2,1,0} broadcast(convert.9), dimensions={0,1,2} divide.5 = bf16[2,6,2048,2048]{3,2,1,0} divide(exponential.1, broadcast.32) Arg_2.3 = bf16[2,6,2048,128]{3,2,1,0} parameter(2), sharding={replicated} dot.57 = bf16[2,6,2048,128]{3,2,1,0} dot(divide.5, Arg_2.3), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} Arg_3.4 = bf16[2,6,2048,128]{3,2,1,0} parameter(3), sharding={replicated} dot.60 = bf16[2,6,2048,2048]{3,2,1,0} dot(Arg_3.4, Arg_2.3), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3} divide.4 = bf16[2,6,2048,2048]{3,2,1,0} divide(dot.60, broadcast.32) constant.15 = bf16[] constant(1) broadcast.25 = bf16[2,6,2048]{2,1,0} broadcast(constant.15), dimensions={} multiply.3 = bf16[2,6,2048]{2,1,0} multiply(convert.9, convert.9) divide.3 = bf16[2,6,2048]{2,1,0} divide(broadcast.25, multiply.3) broadcast.26 = bf16[2,6,2048,2048]{3,2,1,0} broadcast(divide.3), dimensions={0,1,2} multiply.4 = bf16[2,6,2048,2048]{3,2,1,0} multiply(dot.60, broadcast.26) multiply.5 = bf16[2,6,2048,2048]{3,2,1,0} multiply(multiply.4, exponential.1) reduce.70 = bf16[2,6,2048]{2,1,0} reduce(multiply.5, constant.16), dimensions={3}, to_apply=region_2.66 negate.2 = bf16[2,6,2048]{2,1,0} negate(reduce.70) broadcast.31 = bf16[2,6,2048,2048]{3,2,1,0} broadcast(negate.2), dimensions={0,1,2} add.5 = bf16[2,6,2048,2048]{3,2,1,0} add(divide.4, broadcast.31) multiply.8 = bf16[2,6,2048,2048]{3,2,1,0} multiply(add.5, exponential.1) multiply.9 = bf16[2,6,2048,2048]{3,2,1,0} multiply(multiply.8, broadcast.29) dot.90 = bf16[2,6,2048,128]{3,2,1,0} dot(multiply.9, Arg_1.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3} dot = bf16[2,6,128,2048]{3,2,1,0} dot(Arg_0.1, multiply.9), lhs_batch_dims={0,1}, lhs_contracting_dims={2}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} dot.1 = bf16[2,6,2048,128]{3,2,1,0} dot(divide.5, Arg_3.4), lhs_batch_dims={0,1}, lhs_contracting_dims={2}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} ROOT tuple.91 = (bf16[2,6,2048,128]{3,2,1,0}, bf16[2,6,2048,128]{3,2,1,0}, bf16[2,6,128,2048]{3,2,1,0}, bf16[2,6,2048,128]{3,2,1,0}) tuple(dot.57, dot.90, dot, dot.1) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(), GetCudnnVersion()}; TF_ASSERT_OK(RunHloPass(&fusedMhaRewriter, m.get()).status()); HloDCE dce; TF_ASSERT_OK(RunHloPass(&dce, m.get()).status()); ComputationLayout computation_layout( m->entry_computation()->ComputeProgramShape()); const HloInstruction* fmha; SCOPED_TRACE(m->ToString()); EXPECT_THAT( m->entry_computation()->root_instruction(), GmockMatch(m::Tuple( m::GetTupleElement( m::CustomCall(&fmha, {kCudnnfMHASoftmaxCallTarget}), 0) .WithShape(BF16, {2, 6, 2048, 128}), m::GetTupleElement( m::CustomCall(&fmha, {kCudnnfMHASoftmaxBackwardCallTarget}), 0) .WithShape(BF16, {2, 6, 2048, 128}), m::Transpose( m::GetTupleElement( m::CustomCall({kCudnnfMHASoftmaxBackwardCallTarget}), 1)) .WithShape(BF16, {2, 6, 128, 2048}), m::GetTupleElement( m::CustomCall({kCudnnfMHASoftmaxBackwardCallTarget}), 2) .WithShape(BF16, {2, 6, 2048, 128})))); TF_ASSERT_OK_AND_ASSIGN(auto gpu_config, fmha->backend_config<GpuBackendConfig>()); const CudnnfMHABackendConfig& config = gpu_config.cudnn_fmha_backend_config(); EXPECT_EQ(fmha->operands().size(), 6); EXPECT_NEAR(config.dropout_rate(), 0, 1e-2); EXPECT_FLOAT_EQ(config.fmha_scale(), 2); EXPECT_EQ(config.mask_type(), CudnnfMHABackendConfig::NO_MASK); } TEST_F(CudnnFusedMhaRewriterTestHloTest, FlashAttentionBF16TrainingGPT3_5B) { if (skip_reason_) GTEST_SKIP() << *skip_reason_; const char* module_str = R"( HloModule jit__unnamed_wrapped_function_, entry_computation_layout={((s32[], bf16[32,2048,2048]{1,0,2}, bf16[24,8192]{1,0}, bf16[24,1024,8192]{2,1,0}, bf16[24,1024]{0,1}, bf16[24,8192,1024]{1,2,0}, bf16[24,1024]{0,1}, bf16[24,1024]{0,1}, bf16[24,1024]{0,1}, bf16[24,1024]{0,1}, bf16[24,3,16,128]{3,2,1,0}, bf16[24,3,1024,16,128]{4,3,1,2,0}, bf16[24,1024]{1,0}, bf16[24,1024,16,128]{3,2,1,0}, bf16[24,8192]{1,0}, bf16[24,1024,8192]{2,1,0}, bf16[24,8192,1024]{1,2,0}, bf16[24,2048]{1,0}, bf16[24,2048]{1,0}, bf16[24,2048]{1,0}, bf16[24,2048]{1,0}, bf16[24,3,16,128]{3,2,1,0}, bf16[24,3,1024,16,128]{4,3,1,2,0}, bf16[24,1024]{1,0}, bf16[24,1024,16,128]{3,2,1,0}, bf16[24,32,2048,2048]{2,1,3,0}, bf16[32,1,2048,2048]{3,2,0,1}, bf16[32,2048]{1,0}))->(s32[], bf16[32,2048,2048]{1,0,2}, bf16[24,8192]{1,0}, bf16[24,1024,8192]{2,1,0}, bf16[24,1024]{0,1}, bf16[24,8192,1024]{1,2,0}, bf16[24,1024]{0,1}, bf16[24,1024]{0,1}, bf16[24,1024]{0,1}, bf16[24,1024]{0,1}, bf16[24,3,16,128]{3,2,1,0}, bf16[24,3,1024,16,128]{4,3,1,2,0}, bf16[24,1024]{1,0}, bf16[24,1024,16,128]{3,2,1,0}, bf16[24,8192]{1,0}, bf16[24,1024,8192]{2,1,0}, bf16[24,8192,1024]{1,2,0}, bf16[24,2048]{1,0}, bf16[24,2048]{1,0}, bf16[24,2048]{1,0}, bf16[24,2048]{1,0}, bf16[24,3,16,128]{3,2,1,0}, bf16[24,3,1024,16,128]{4,3,1,2,0}, bf16[24,1024]{1,0}, bf16[24,1024,16,128]{3,2,1,0}, bf16[24,32,2048,2048]{2,1,3,0}, bf16[32,1,2048,2048]{3,2,0,1}, bf16[32,2048]{1,0})} add { x = bf16[] parameter(0) y = bf16[] parameter(1) ROOT add.580 = bf16[] add(x, y) } region_20.962 { Arg_0.963 = f32[] parameter(0) Arg_1.964 = f32[] parameter(1) ROOT add.579 = f32[] add(Arg_0.963, Arg_1.964) } region_39.1120 { Arg_0.1121 = f32[] parameter(0) Arg_1.1122 = f32[] parameter(1) ROOT maximum.21 = f32[] maximum(Arg_0.1121, Arg_1.1122) } main { param.3 = (s32[], bf16[32,2048,2048]{1,0,2}, bf16[24,8192]{1,0}, bf16[24,1024,8192]{2,1,0}, bf16[24,1024]{0,1}, bf16[24,8192,1024]{1,2,0}, bf16[24,1024]{0,1}, bf16[24,1024]{0,1}, bf16[24,1024]{0,1}, bf16[24,1024]{0,1}, bf16[24,3,16,128]{3,2,1,0}, bf16[24,3,1024,16,128]{4,3,1,2,0}, bf16[24,1024]{1,0}, bf16[24,1024,16,128]{3,2,1,0}, bf16[24,8192]{1,0}, bf16[24,1024,8192]{2,1,0}, bf16[24,8192,1024]{1,2,0}, bf16[24,2048]{1,0}, bf16[24,2048]{1,0}, bf16[24,2048]{1,0}, bf16[24,2048]{1,0}, bf16[24,3,16,128]{3,2,1,0}, bf16[24,3,1024,16,128]{4,3,1,2,0}, bf16[24,1024]{1,0}, bf16[24,1024,16,128]{3,2,1,0}, bf16[24,32,2048,2048]{2,1,3,0}, bf16[32,1,2048,2048]{3,2,0,1}, bf16[32,2048]{1,0}) parameter(0) get-tuple-element.31 = s32[] get-tuple-element(param.3), index=0 constant.1961 = s32[] constant(1) add.581 = s32[] add(get-tuple-element.31, constant.1961) get-tuple-element.32 = bf16[24,32,2048,2048]{2,1,3,0} get-tuple-element(param.3), index=25 bitcast.187 = bf16[24,2048,32,2048]{3,2,1,0} bitcast(get-tuple-element.32) constant.1977 = s32[] constant(23) subtract.221 = s32[] subtract(constant.1977, get-tuple-element.31) constant.1980 = s32[] constant(0) compare.210 = pred[] compare(subtract.221, constant.1980), direction=LT constant.1979 = s32[] constant(47) subtract.222 = s32[] subtract(constant.1979, get-tuple-element.31) select.372 = s32[] select(compare.210, subtract.222, subtract.221) dynamic-slice.324 = bf16[1,2048,32,2048]{3,2,1,0} dynamic-slice(bitcast.187, select.372, constant.1980, constant.1980, constant.1980), dynamic_slice_sizes={1,2048,32,2048} bitcast.756 = bf16[2048,32,2048]{2,1,0} bitcast(dynamic-slice.324) convert.282 = f32[2048,32,2048]{2,1,0} convert(bitcast.756) constant.1991 = bf16[] constant(1) broadcast.1270 = bf16[32,2048]{1,0} broadcast(constant.1991), dimensions={} get-tuple-element.33 = bf16[32,2048]{1,0} get-tuple-element(param.3), index=27 subtract.229 = bf16[32,2048]{1,0} subtract(broadcast.1270, get-tuple-element.33) convert.285 = f32[32,2048]{1,0} convert(subtract.229) broadcast.1228 = f32[2048,32,2048]{2,1,0} broadcast(convert.285), dimensions={1,2} multiply.656 = f32[2048,32,2048]{2,1,0} multiply(convert.282, broadcast.1228) bitcast.367 = f32[32,2048,2048]{1,0,2} bitcast(multiply.656) constant.1968 = f32[] constant(0) reduce.84 = f32[] reduce(bitcast.367, constant.1968), dimensions={0,1,2}, to_apply=region_20.962 all-reduce.230 = f32[] all-reduce(reduce.84), channel_id=278, replica_groups={{0,1,2,3,4,5,6,7}}, use_global_device_ids=true, to_apply=region_20.962 broadcast.1221 = f32[32,2048,4096]{2,1,0} broadcast(convert.285), dimensions={0,1} reduce.85 = f32[] reduce(broadcast.1221, constant.1968), dimensions={0,1,2}, to_apply=region_20.962 all-reduce.14 = f32[] all-reduce(reduce.85), channel_id=49, replica_groups={{0,2,4,6},{1,3,5,7}}, use_global_device_ids=true, to_apply=region_20.962 constant.2005 = f32[] constant(1) maximum.24 = f32[] maximum(all-reduce.14, constant.2005) divide.96 = f32[] divide(all-reduce.230, maximum.24) broadcast.1223 = f32[2048,32,2048]{2,1,0} broadcast(divide.96), dimensions={} subtract.219 = f32[2048,32,2048]{2,1,0} subtract(convert.282, broadcast.1223) multiply.644 = f32[2048,32,2048]{2,1,0} multiply(subtract.219, broadcast.1228) multiply.645 = f32[2048,32,2048]{2,1,0} multiply(multiply.644, multiply.644) bitcast.271 = f32[32,2048,2048]{1,0,2} bitcast(multiply.645) reduce.86 = f32[] reduce(bitcast.271, constant.1968), dimensions={0,1,2}, to_apply=region_20.962 all-reduce.231 = f32[] all-reduce(reduce.86), channel_id=279, replica_groups={{0,1,2,3,4,5,6,7}}, use_global_device_ids=true, to_apply=region_20.962 divide.99 = f32[] divide(all-reduce.231, maximum.24) rsqrt.16 = f32[] rsqrt(divide.99) multiply.650 = f32[] multiply(rsqrt.16, constant.1968) divide.100 = f32[] divide(multiply.650, maximum.24) constant.1974 = f32[] constant(2) multiply.652 = f32[] multiply(divide.100, constant.1974) broadcast.1227 = f32[2048,32,2048]{2,1,0} broadcast(multiply.652), dimensions={} multiply.653 = f32[2048,32,2048]{2,1,0} multiply(multiply.644, broadcast.1227) multiply.654 = f32[2048,32,2048]{2,1,0} multiply(multiply.653, broadcast.1228) negate.56 = f32[2048,32,2048]{2,1,0} negate(multiply.654) bitcast.321 = f32[32,2048,2048]{1,0,2} bitcast(negate.56) reduce.87 = f32[] reduce(bitcast.321, constant.1968), dimensions={0,1,2}, to_apply=region_20.962 all-reduce.232 = f32[] all-reduce(reduce.87), channel_id=280, replica_groups={{0,1,2,3,4,5,6,7}}, use_global_device_ids=true, to_apply=region_20.962 divide.101 = f32[] divide(all-reduce.232, maximum.24) broadcast.1229 = f32[32,2048]{1,0} broadcast(divide.101), dimensions={} multiply.655 = f32[32,2048]{1,0} multiply(broadcast.1229, convert.285) broadcast.1230 = f32[2048,32,2048]{2,1,0} broadcast(multiply.655), dimensions={1,2} add.582 = f32[2048,32,2048]{2,1,0} add(multiply.654, broadcast.1230) broadcast.1236 = f32[2048,32,2048]{2,1,0} broadcast(constant.1968), dimensions={} compare.208 = pred[2048,32,2048]{2,1,0} compare(multiply.656, broadcast.1236), direction=GE abs.22 = f32[2048,32,2048]{2,1,0} abs(multiply.656) bitcast.373 = f32[32,2048,2048]{1,0,2} bitcast(abs.22) constant.1989 = f32[] constant(-inf) reduce.88 = f32[] reduce(bitcast.373, constant.1989), dimensions={0,1,2}, to_apply=region_39.1120 all-reduce.233 = f32[] all-reduce(reduce.88), channel_id=281, replica_groups={{0,1,2,3,4,5,6,7}}, use_global_device_ids=true, to_apply=region_39.1120 broadcast.1233 = f32[2048,32,2048]{2,1,0} broadcast(all-reduce.233), dimensions={} compare.207 = pred[2048,32,2048]{2,1,0} compare(abs.22, broadcast.1233), direction=EQ convert.286 = f32[2048,32,2048]{2,1,0} convert(compare.207) bitcast.393 = f32[32,2048,2048]{1,0,2} bitcast(convert.286) reduce.89 = f32[] reduce(bitcast.393, constant.1968), dimensions={0,1,2}, to_apply=region_20.962 all-reduce.234 = f32[] all-reduce(reduce.89), channel_id=282, replica_groups={{0,1,2,3,4,5,6,7}}, use_global_device_ids=true, to_apply=region_20.962 divide.103 = f32[] divide(constant.1968, all-reduce.234) broadcast.1238 = f32[2048,32,2048]{2,1,0} broadcast(divide.103), dimensions={} select.370 = f32[2048,32,2048]{2,1,0} select(compare.207, broadcast.1238, broadcast.1236) select.369 = f32[2048,32,2048]{2,1,0} select(compare.208, select.370, broadcast.1236) constant.1976 = pred[] constant(false) broadcast.1237 = pred[2048,32,2048]{2,1,0} broadcast(constant.1976), dimensions={} compare.209 = pred[2048,32,2048]{2,1,0} compare(compare.208, broadcast.1237), direction=EQ select.371 = f32[2048,32,2048]{2,1,0} select(compare.209, select.370, broadcast.1236) negate.57 = f32[2048,32,2048]{2,1,0} negate(select.371) add.583 = f32[2048,32,2048]{2,1,0} add(select.369, negate.57) multiply.658 = f32[2048,32,2048]{2,1,0} multiply(add.583, broadcast.1228) add.585 = f32[2048,32,2048]{2,1,0} add(add.582, multiply.658) convert.287 = bf16[2048,32,2048]{2,1,0} convert(add.585) get-tuple-element.34 = bf16[32,2048,2048]{1,0,2} get-tuple-element(param.3), index=1 bitcast.1652 = bf16[2048,32,2048]{2,1,0} bitcast(get-tuple-element.34) get-tuple-element.35 = bf16[24,3,1024,16,128]{4,3,1,2,0} get-tuple-element(param.3), index=22 bitcast.461 = bf16[24,1024,3,16,128]{4,3,2,1,0} bitcast(get-tuple-element.35) dynamic-slice.325 = bf16[1,1024,3,16,128]{4,3,2,1,0} dynamic-slice(bitcast.461, select.372, constant.1980, constant.1980, constant.1980, constant.1980), dynamic_slice_sizes={1,1024,3,16,128} bitcast.485 = bf16[3,1024,16,128]{3,2,0,1} bitcast(dynamic-slice.325) all-gather.7 = bf16[3,4096,16,128]{3,2,0,1} all-gather(bitcast.485), channel_id=60, replica_groups={{0,2,4,6},{1,3,5,7}}, dimensions={1}, use_global_device_ids=true bitcast.1420 = bf16[6144,4096]{0,1} bitcast(all-gather.7) bitcast.500 = f32[32,2048,2048]{1,0,2} bitcast(convert.282) reduce.90 = f32[32,2048]{1,0} reduce(bitcast.500, constant.1968), dimensions={2}, to_apply=region_20.962 all-reduce.23 = f32[32,2048]{1,0} all-reduce(reduce.90), channel_id=58, replica_groups={{0,1},{2,3},{4,5},{6,7}}, use_global_device_ids=true, to_apply=region_20.962 constant.1983 = f32[] constant(0.000244140625) broadcast.1243 = f32[32,2048]{1,0} broadcast(constant.1983), dimensions={} multiply.660 = f32[32,2048]{1,0} multiply(all-reduce.23, broadcast.1243) broadcast.1242 = f32[2048,32,2048]{2,1,0} broadcast(multiply.660), dimensions={1,2} subtract.224 = f32[2048,32,2048]{2,1,0} subtract(convert.282, broadcast.1242) multiply.661 = f32[2048,32,2048]{2,1,0} multiply(subtract.224, subtract.224) bitcast.527 = f32[32,2048,2048]{1,0,2} bitcast(multiply.661) reduce.91 = f32[32,2048]{1,0} reduce(bitcast.527, constant.1968), dimensions={2}, to_apply=region_20.962 all-reduce.24 = f32[32,2048]{1,0} all-reduce(reduce.91), channel_id=59, replica_groups={{0,1},{2,3},{4,5},{6,7}}, use_global_device_ids=true, to_apply=region_20.962 multiply.662 = f32[32,2048]{1,0} multiply(all-reduce.24, broadcast.1243) constant.1990 = f32[] constant(1e-05) broadcast.1264 = f32[32,2048]{1,0} broadcast(constant.1990), dimensions={} add.587 = f32[32,2048]{1,0} add(multiply.662, broadcast.1264) bitcast.1447 = f32[1,32,2048]{2,1,0} bitcast(add.587) rsqrt.20 = f32[1,32,2048]{2,1,0} rsqrt(bitcast.1447) bitcast.1892 = f32[32,2048]{1,0} bitcast(rsqrt.20) broadcast.1337 = f32[2048,32,2048]{2,1,0} broadcast(bitcast.1892), dimensions={1,2} multiply.754 = f32[2048,32,2048]{2,1,0} multiply(subtract.224, broadcast.1337) convert.314 = bf16[2048,32,2048]{2,1,0} convert(multiply.754) get-tuple-element.36 = bf16[24,2048]{1,0} get-tuple-element(param.3), index=20 dynamic-slice.326 = bf16[1,2048]{1,0} dynamic-slice(get-tuple-element.36, select.372, constant.1980), dynamic_slice_sizes={1,2048} broadcast.1266 = bf16[1,2048]{1,0} broadcast(constant.1991), dimensions={} add.588 = bf16[1,2048]{1,0} add(dynamic-slice.326, broadcast.1266) bitcast.1992 = bf16[2048]{0} bitcast(add.588) broadcast.1338 = bf16[2048,32,2048]{2,1,0} broadcast(bitcast.1992), dimensions={0} multiply.755 = bf16[2048,32,2048]{2,1,0} multiply(convert.314, broadcast.1338) get-tuple-element.37 = bf16[24,2048]{1,0} get-tuple-element(param.3), index=19 dynamic-slice.327 = bf16[1,2048]{1,0} dynamic-slice(get-tuple-element.37, select.372, constant.1980), dynamic_slice_sizes={1,2048} bitcast.1998 = bf16[2048]{0} bitcast(dynamic-slice.327) broadcast.1339 = bf16[2048,32,2048]{2,1,0} broadcast(bitcast.1998), dimensions={0} add.640 = bf16[2048,32,2048]{2,1,0} add(multiply.755, broadcast.1339) bitcast.2003 = bf16[32,2048,2048]{1,0,2} bitcast(add.640) all-gather.8 = bf16[32,2048,4096]{1,0,2} all-gather(bitcast.2003), channel_id=61, replica_groups={{0,1},{2,3},{4,5},{6,7}}, dimensions={2}, use_global_device_ids=true bitcast.597 = bf16[4096,65536]{1,0} bitcast(all-gather.8) dot.42 = bf16[6144,65536]{1,0} dot(bitcast.1420, bitcast.597), lhs_contracting_dims={1}, rhs_contracting_dims={0} bitcast.623 = bf16[3,16,128,32,2048]{4,3,2,1,0} bitcast(dot.42) transpose.112 = bf16[3,32,16,128,2048]{4,3,2,1,0} transpose(bitcast.623), dimensions={0,3,1,2,4} get-tuple-element.38 = bf16[24,3,16,128]{3,2,1,0} get-tuple-element(param.3), index=21 dynamic-slice.328 = bf16[1,3,16,128]{3,2,1,0} dynamic-slice(get-tuple-element.38, select.372, constant.1980, constant.1980, constant.1980), dynamic_slice_sizes={1,3,16,128} bitcast.626 = bf16[3,16,128]{2,1,0} bitcast(dynamic-slice.328) broadcast.1250 = bf16[3,32,16,128,2048]{4,3,2,1,0} broadcast(bitcast.626), dimensions={0,2,3} add.591 = bf16[3,32,16,128,2048]{4,3,2,1,0} add(transpose.112, broadcast.1250) slice.87 = bf16[1,32,16,128,2048]{4,3,2,1,0} slice(add.591), slice={[2:3], [0:32], [0:16], [0:128], [0:2048]} bitcast.1280 = bf16[32,16,128,2048]{3,2,1,0} bitcast(slice.87) slice.88 = bf16[1,32,16,128,2048]{4,3,2,1,0} slice(add.591), slice={[0:1], [0:32], [0:16], [0:128], [0:2048]} constant.2007 = bf16[] constant(0.08838) broadcast.1251 = bf16[1,32,16,128,2048]{4,3,2,1,0} broadcast(constant.2007), dimensions={} multiply.666 = bf16[1,32,16,128,2048]{4,3,2,1,0} multiply(slice.88, broadcast.1251) bitcast.1330 = bf16[32,16,128,2048]{3,2,1,0} bitcast(multiply.666) transpose.113 = bf16[32,16,2048,128]{3,2,1,0} transpose(bitcast.1330), dimensions={0,1,3,2} slice.89 = bf16[1,32,16,128,2048]{4,3,2,1,0} slice(add.591), slice={[1:2], [0:32], [0:16], [0:128], [0:2048]} bitcast.647 = bf16[32,16,128,2048]{3,2,1,0} bitcast(slice.89) dot.43 = bf16[32,16,2048,2048]{3,2,1,0} dot(transpose.113, bitcast.647), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} convert.291 = f32[32,16,2048,2048]{3,2,1,0} convert(dot.43) get-tuple-element.39 = bf16[32,1,2048,2048]{3,2,0,1} get-tuple-element(param.3), index=26 bitcast.651 = bf16[1,32,2048,2048]{3,2,1,0} bitcast(get-tuple-element.39) iota.38 = s32[2048,2048]{1,0} iota(), iota_dimension=0 iota.39 = s32[2048,2048]{1,0} iota(), iota_dimension=1 compare.211 = pred[2048,2048]{1,0} compare(iota.38, iota.39), direction=LT constant.1987 = bf16[] constant(-2.366e+38) broadcast.1252 = bf16[2048,2048]{1,0} broadcast(constant.1987), dimensions={} constant.2006 = bf16[] constant(0) broadcast.1253 = bf16[2048,2048]{1,0} broadcast(constant.2006), dimensions={} select.373 = bf16[2048,2048]{1,0} select(compare.211, broadcast.1252, broadcast.1253) broadcast.1254 = bf16[1,32,2048,2048]{3,2,1,0} broadcast(select.373), dimensions={2,3} minimum.5 = bf16[1,32,2048,2048]{3,2,1,0} minimum(bitcast.651, broadcast.1254) bitcast.673 = bf16[32,2048,2048]{2,1,0} bitcast(minimum.5) convert.292 = f32[32,2048,2048]{2,1,0} convert(bitcast.673) broadcast.1256 = f32[32,16,2048,2048]{3,2,1,0} broadcast(convert.292), dimensions={0,2,3} add.593 = f32[32,16,2048,2048]{3,2,1,0} add(convert.291, broadcast.1256) reduce.92 = f32[32,16,2048]{2,1,0} reduce(add.593, constant.1989), dimensions={3}, to_apply=region_39.1120 broadcast.1258 = f32[32,16,2048,2048]{3,2,1,0} broadcast(reduce.92), dimensions={0,1,2} subtract.226 = f32[32,16,2048,2048]{3,2,1,0} subtract(add.593, broadcast.1258) exponential.8 = f32[32,16,2048,2048]{3,2,1,0} exponential(subtract.226) reduce.93 = f32[32,16,2048]{2,1,0} reduce(exponential.8, constant.1968), dimensions={3}, to_apply=region_20.962 broadcast.1309 = f32[32,16,2048,2048]{3,2,1,0} broadcast(reduce.93), dimensions={0,1,2} divide.109 = f32[32,16,2048,2048]{3,2,1,0} divide(exponential.8, broadcast.1309) convert.306 = bf16[32,16,2048,2048]{3,2,1,0} convert(divide.109) dot.44 = bf16[32,16,128,2048]{3,2,1,0} dot(bitcast.1280, convert.306), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3} transpose.116 = bf16[32,2048,16,128]{3,2,1,0} transpose(dot.44), dimensions={0,3,1,2} bitcast.711 = bf16[65536,2048]{1,0} bitcast(transpose.116) get-tuple-element.40 = bf16[24,1024,16,128]{3,2,1,0} get-tuple-element(param.3), index=24 dynamic-slice.329 = bf16[1,1024,16,128]{3,2,1,0} dynamic-slice(get-tuple-element.40, select.372, constant.1980, constant.1980, constant.1980), dynamic_slice_sizes={1,1024,16,128} bitcast.724 = bf16[1024,16,128]{2,1,0} bitcast(dynamic-slice.329) all-gather.9 = bf16[4096,16,128]{2,1,0} all-gather(bitcast.724), channel_id=62, replica_groups={{0,2,4,6},{1,3,5,7}}, dimensions={0}, use_global_device_ids=true bitcast.729 = bf16[2048,4096]{0,1} bitcast(all-gather.9) dot.57 = bf16[65536,4096]{0,1} dot(bitcast.711, bitcast.729), lhs_contracting_dims={1}, rhs_contracting_dims={0} bitcast.733 = bf16[32,2048,4096]{1,0,2} bitcast(dot.57) reduce-scatter = bf16[32,2048,2048]{1,0,2} reduce-scatter(bitcast.733), channel_id=322, replica_groups={{0,1},{2,3},{4,5},{6,7}}, use_global_device_ids=true, dimensions={2}, to_apply=add bitcast.763 = bf16[2048,32,2048]{2,1,0} bitcast(reduce-scatter) get-tuple-element.41 = bf16[24,1024]{1,0} get-tuple-element(param.3), index=23 dynamic-slice.330 = bf16[1,1024]{1,0} dynamic-slice(get-tuple-element.41, select.372, constant.1980), dynamic_slice_sizes={1,1024} bitcast.748 = bf16[1024]{0} bitcast(dynamic-slice.330) collective-permute.1 = bf16[1024]{0} collective-permute(bitcast.748), channel_id=64, source_target_pairs={{0,0},{1,2},{2,4},{3,6},{4,1},{5,3},{6,5},{7,7}} all-gather.10 = bf16[2048]{0} all-gather(collective-permute.1), channel_id=65, replica_groups={{0,4},{2,6},{1,5},{3,7}}, dimensions={0}, use_global_device_ids=true broadcast.1261 = bf16[2048,32,2048]{2,1,0} broadcast(all-gather.10), dimensions={0} add.596 = bf16[2048,32,2048]{2,1,0} add(bitcast.763, broadcast.1261) add.597 = bf16[2048,32,2048]{2,1,0} add(add.596, bitcast.756) convert.295 = f32[2048,32,2048]{2,1,0} convert(add.597) bitcast.774 = f32[32,2048,2048]{1,0,2} bitcast(convert.295) reduce.94 = f32[32,2048]{1,0} reduce(bitcast.774, constant.1968), dimensions={2}, to_apply=region_20.962 all-reduce.26 = f32[32,2048]{1,0} all-reduce(reduce.94), channel_id=66, replica_groups={{0,1},{2,3},{4,5},{6,7}}, use_global_device_ids=true, to_apply=region_20.962 multiply.668 = f32[32,2048]{1,0} multiply(all-reduce.26, broadcast.1243) broadcast.1263 = f32[2048,32,2048]{2,1,0} broadcast(multiply.668), dimensions={1,2} subtract.228 = f32[2048,32,2048]{2,1,0} subtract(convert.295, broadcast.1263) multiply.669 = f32[2048,32,2048]{2,1,0} multiply(subtract.228, subtract.228) bitcast.809 = f32[32,2048,2048]{1,0,2} bitcast(multiply.669) reduce.95 = f32[32,2048]{1,0} reduce(bitcast.809, constant.1968), dimensions={2}, to_apply=region_20.962 all-reduce.27 = f32[32,2048]{1,0} all-reduce(reduce.95), channel_id=67, replica_groups={{0,1},{2,3},{4,5},{6,7}}, use_global_device_ids=true, to_apply=region_20.962 multiply.670 = f32[32,2048]{1,0} multiply(all-reduce.27, broadcast.1243) add.598 = f32[32,2048]{1,0} add(multiply.670, broadcast.1264) bitcast.1148 = f32[1,32,2048]{2,1,0} bitcast(add.598) rsqrt.19 = f32[1,32,2048]{2,1,0} rsqrt(bitcast.1148) bitcast.1602 = f32[32,2048]{1,0} bitcast(rsqrt.19) broadcast.1329 = f32[2048,32,2048]{2,1,0} broadcast(bitcast.1602), dimensions={1,2} multiply.750 = f32[2048,32,2048]{2,1,0} multiply(subtract.228, broadcast.1329) convert.312 = bf16[2048,32,2048]{2,1,0} convert(multiply.750) get-tuple-element.42 = bf16[24,2048]{1,0} get-tuple-element(param.3), index=18 dynamic-slice.331 = bf16[1,2048]{1,0} dynamic-slice(get-tuple-element.42, select.372, constant.1980), dynamic_slice_sizes={1,2048} add.599 = bf16[1,2048]{1,0} add(dynamic-slice.331, broadcast.1266) bitcast.1609 = bf16[2048]{0} bitcast(add.599) broadcast.1330 = bf16[2048,32,2048]{2,1,0} broadcast(bitcast.1609), dimensions={0} multiply.745 = bf16[2048,32,2048]{2,1,0} multiply(convert.312, broadcast.1330) get-tuple-element.43 = bf16[24,2048]{1,0} get-tuple-element(param.3), index=17 dynamic-slice.332 = bf16[1,2048]{1,0} dynamic-slice(get-tuple-element.43, select.372, constant.1980), dynamic_slice_sizes={1,2048} bitcast.1615 = bf16[2048]{0} bitcast(dynamic-slice.332) broadcast.1331 = bf16[2048,32,2048]{2,1,0} broadcast(bitcast.1615), dimensions={0} add.636 = bf16[2048,32,2048]{2,1,0} add(multiply.745, broadcast.1331) bitcast.1620 = bf16[32,2048,2048]{1,0,2} bitcast(add.636) all-gather.12 = bf16[32,2048,4096]{1,0,2} all-gather(bitcast.1620), channel_id=69, replica_groups={{0,1},{2,3},{4,5},{6,7}}, dimensions={2}, use_global_device_ids=true bitcast.877 = bf16[65536,4096]{0,1} bitcast(all-gather.12) get-tuple-element.44 = bf16[24,1024,8192]{2,1,0} get-tuple-element(param.3), index=15 dynamic-slice.333 = bf16[1,1024,8192]{2,1,0} dynamic-slice(get-tuple-element.44, select.372, constant.1980, constant.1980), dynamic_slice_sizes={1,1024,8192} bitcast.890 = bf16[1024,8192]{1,0} bitcast(dynamic-slice.333) all-gather.11 = bf16[4096,8192]{1,0} all-gather(bitcast.890), channel_id=68, replica_groups={{0,2,4,6},{1,3,5,7}}, dimensions={0}, use_global_device_ids=true dot.45 = bf16[65536,8192]{1,0} dot(bitcast.877, all-gather.11), lhs_contracting_dims={1}, rhs_contracting_dims={0} get-tuple-element.45 = bf16[24,8192]{1,0} get-tuple-element(param.3), index=14 dynamic-slice.334 = bf16[1,8192]{1,0} dynamic-slice(get-tuple-element.45, select.372, constant.1980), dynamic_slice_sizes={1,8192} bitcast.906 = bf16[8192]{0} bitcast(dynamic-slice.334) broadcast.1269 = bf16[65536,8192]{1,0} broadcast(bitcast.906), dimensions={1} add.601 = bf16[65536,8192]{1,0} add(dot.45, broadcast.1269) bitcast.997 = bf16[32,2048,8192]{2,1,0} bitcast(add.601) broadcast.1333 = bf16[2048,32,2048]{2,1,0} broadcast(subtract.229), dimensions={1,2} multiply.746 = bf16[2048,32,2048]{2,1,0} multiply(bitcast.1652, broadcast.1333) bitcast.1739 = bf16[32,2048,2048]{1,0,2} bitcast(multiply.746) all-gather.14 = bf16[32,2048,4096]{1,0,2} all-gather(bitcast.1739), channel_id=71, replica_groups={{0,1},{2,3},{4,5},{6,7}}, dimensions={2}, use_global_device_ids=true bitcast.934 = bf16[65536,4096]{0,1} bitcast(all-gather.14) get-tuple-element.46 = bf16[24,8192,1024]{1,2,0} get-tuple-element(param.3), index=16 bitcast.935 = bf16[24,1024,8192]{2,1,0} bitcast(get-tuple-element.46) dynamic-slice.335 = bf16[1,1024,8192]{2,1,0} dynamic-slice(bitcast.935, select.372, constant.1980, constant.1980), dynamic_slice_sizes={1,1024,8192} bitcast.947 = bf16[8192,1024]{0,1} bitcast(dynamic-slice.335) all-gather.13 = bf16[8192,4096]{0,1} all-gather(bitcast.947), channel_id=70, replica_groups={{0,2,4,6},{1,3,5,7}}, dimensions={1}, use_global_device_ids=true dot.46 = bf16[65536,8192]{1,0} dot(bitcast.934, all-gather.13), lhs_contracting_dims={1}, rhs_contracting_dims={1} bitcast.1092 = bf16[32,2048,8192]{2,1,0} bitcast(dot.46) broadcast.1335 = bf16[32,2048,8192]{2,1,0} broadcast(subtract.229), dimensions={0,1} multiply.703 = bf16[32,2048,8192]{2,1,0} multiply(bitcast.1092, broadcast.1335) multiply.685 = bf16[32,2048,8192]{2,1,0} multiply(bitcast.997, multiply.703) constant.2002 = bf16[] constant(0.5) broadcast.1288 = bf16[32,2048,8192]{2,1,0} broadcast(constant.2002), dimensions={} multiply.686 = bf16[32,2048,8192]{2,1,0} multiply(multiply.685, broadcast.1288) broadcast.1287 = bf16[32,2048,8192]{2,1,0} broadcast(constant.1991), dimensions={} multiply.700 = bf16[32,2048,8192]{2,1,0} multiply(bitcast.997, bitcast.997) multiply.693 = bf16[32,2048,8192]{2,1,0} multiply(bitcast.997, multiply.700) constant.1998 = bf16[] constant(0.04468) broadcast.1282 = bf16[32,2048,8192]{2,1,0} broadcast(constant.1998), dimensions={} multiply.694 = bf16[32,2048,8192]{2,1,0} multiply(multiply.693, broadcast.1282) add.605 = bf16[32,2048,8192]{2,1,0} add(bitcast.997, multiply.694) constant.2010 = bf16[] constant(0.7969) broadcast.1324 = bf16[32,2048,8192]{2,1,0} broadcast(constant.2010), dimensions={} multiply.695 = bf16[32,2048,8192]{2,1,0} multiply(add.605, broadcast.1324) tanh.7 = bf16[32,2048,8192]{2,1,0} tanh(multiply.695) subtract.231 = bf16[32,2048,8192]{2,1,0} subtract(broadcast.1287, tanh.7) multiply.691 = bf16[32,2048,8192]{2,1,0} multiply(multiply.686, subtract.231) multiply.737 = bf16[32,2048,8192]{2,1,0} multiply(multiply.691, tanh.7) add.630 = bf16[32,2048,8192]{2,1,0} add(multiply.691, multiply.737) multiply.738 = bf16[32,2048,8192]{2,1,0} multiply(add.630, broadcast.1324) constant.2011 = bf16[] constant(0.03564) broadcast.1326 = bf16[32,2048,8192]{2,1,0} broadcast(constant.2011), dimensions={} multiply.739 = bf16[32,2048,8192]{2,1,0} multiply(add.630, broadcast.1326) constant.2012 = bf16[] constant(3) broadcast.1327 = bf16[32,2048,8192]{2,1,0} broadcast(constant.2012), dimensions={} multiply.740 = bf16[32,2048,8192]{2,1,0} multiply(multiply.700, broadcast.1327) multiply.741 = bf16[32,2048,8192]{2,1,0} multiply(multiply.739, multiply.740) add.632 = bf16[32,2048,8192]{2,1,0} add(multiply.738, multiply.741) add.637 = bf16[32,2048,8192]{2,1,0} add(tanh.7, broadcast.1287) multiply.747 = bf16[32,2048,8192]{2,1,0} multiply(add.637, broadcast.1288) multiply.743 = bf16[32,2048,8192]{2,1,0} multiply(multiply.703, multiply.747) add.635 = bf16[32,2048,8192]{2,1,0} add(add.632, multiply.743) bitcast.1629 = bf16[65536,8192]{1,0} bitcast(add.635) dot.47 = bf16[65536,4096]{0,1} dot(bitcast.1629, all-gather.11), lhs_contracting_dims={1}, rhs_contracting_dims={1} bitcast.1130 = bf16[32,2048,4096]{1,0,2} bitcast(dot.47) reduce-scatter.1 = bf16[32,2048,2048]{1,0,2} reduce-scatter(bitcast.1130), channel_id=323, replica_groups={{0,1},{2,3},{4,5},{6,7}}, use_global_device_ids=true, dimensions={2}, to_apply=add bitcast.1766 = bf16[2048,32,2048]{2,1,0} bitcast(reduce-scatter.1) multiply.712 = bf16[2048,32,2048]{2,1,0} multiply(bitcast.1766, broadcast.1330) convert.299 = f32[2048,32,2048]{2,1,0} convert(multiply.712) multiply.707 = f32[2048,32,2048]{2,1,0} multiply(subtract.228, convert.299) bitcast.1135 = f32[32,2048,2048]{1,0,2} bitcast(multiply.707) reduce.96 = f32[32,2048]{1,0} reduce(bitcast.1135, constant.1968), dimensions={2}, to_apply=region_20.962 all-reduce.29 = f32[32,2048]{1,0} all-reduce(reduce.96), channel_id=73, replica_groups={{0,1},{2,3},{4,5},{6,7}}, use_global_device_ids=true, to_apply=region_20.962 bitcast.1140 = f32[1,32,2048]{2,1,0} bitcast(all-reduce.29) divide.105 = f32[1,32,2048]{2,1,0} divide(rsqrt.19, bitcast.1148) constant.2008 = f32[] constant(-0.5) broadcast.1313 = f32[1,32,2048]{2,1,0} broadcast(constant.2008), dimensions={} multiply.708 = f32[1,32,2048]{2,1,0} multiply(divide.105, broadcast.1313) multiply.709 = f32[1,32,2048]{2,1,0} multiply(bitcast.1140, multiply.708) constant.2009 = f32[] constant(0.00048828125) broadcast.1315 = f32[1,32,2048]{2,1,0} broadcast(constant.2009), dimensions={} multiply.710 = f32[1,32,2048]{2,1,0} multiply(multiply.709, broadcast.1315) bitcast.1235 = f32[32,2048]{1,0} bitcast(multiply.710) broadcast.1296 = f32[2048,32,2048]{2,1,0} broadcast(bitcast.1235), dimensions={1,2} multiply.717 = f32[2048,32,2048]{2,1,0} multiply(subtract.228, broadcast.1296) multiply.718 = f32[2048,32,2048]{2,1,0} multiply(convert.299, broadcast.1329) add.617 = f32[2048,32,2048]{2,1,0} add(multiply.717, multiply.718) negate.58 = f32[2048,32,2048]{2,1,0} negate(multiply.717) bitcast.1189 = f32[32,2048,2048]{1,0,2} bitcast(negate.58) reduce.97 = f32[32,2048]{1,0} reduce(bitcast.1189, constant.1968), dimensions={2}, to_apply=region_20.962 negate.59 = f32[2048,32,2048]{2,1,0} negate(multiply.718) bitcast.1203 = f32[32,2048,2048]{1,0,2} bitcast(negate.59) reduce.98 = f32[32,2048]{1,0} reduce(bitcast.1203, constant.1968), dimensions={2}, to_apply=region_20.962 add.613 = f32[32,2048]{1,0} add(reduce.97, reduce.98) all-reduce.274 = f32[32,2048]{1,0} all-reduce(add.613), channel_id=335, replica_groups={{0,1},{2,3},{4,5},{6,7}}, use_global_device_ids=true, to_apply=region_20.962 multiply.719 = f32[32,2048]{1,0} multiply(all-reduce.274, broadcast.1243) broadcast.1297 = f32[2048,32,2048]{2,1,0} broadcast(multiply.719), dimensions={1,2} add.618 = f32[2048,32,2048]{2,1,0} add(add.617, broadcast.1297) convert.301 = bf16[2048,32,2048]{2,1,0} convert(add.618) add.619 = bf16[2048,32,2048]{2,1,0} add(bitcast.1652, convert.301) add.616 = bf16[2048,32,2048]{2,1,0} add(convert.287, add.619) bitcast.2063 = bf16[32,2048,2048]{1,0,2} bitcast(add.619) all-gather.15 = bf16[32,2048,4096]{1,0,2} all-gather(bitcast.2063), channel_id=76, replica_groups={{0,1},{2,3},{4,5},{6,7}}, dimensions={2}, use_global_device_ids=true bitcast.1263 = bf16[65536,4096]{0,1} bitcast(all-gather.15) bitcast.1269 = bf16[4096,2048]{1,0} bitcast(all-gather.9) dot.48 = bf16[65536,2048]{1,0} dot(bitcast.1263, bitcast.1269), lhs_contracting_dims={1}, rhs_contracting_dims={0} bitcast.1381 = bf16[32,2048,16,128]{3,2,1,0} bitcast(dot.48) transpose.122 = bf16[32,16,2048,128]{3,2,1,0} transpose(bitcast.1381), dimensions={0,2,1,3} dot.49 = bf16[32,16,2048,2048]{3,2,1,0} dot(transpose.122, bitcast.1280), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} convert.303 = f32[32,16,2048,2048]{3,2,1,0} convert(dot.49) broadcast.1298 = f32[32,16,2048]{2,1,0} broadcast(constant.2005), dimensions={} multiply.720 = f32[32,16,2048]{2,1,0} multiply(reduce.93, reduce.93) divide.106 = f32[32,16,2048]{2,1,0} divide(broadcast.1298, multiply.720) broadcast.1299 = f32[32,16,2048,2048]{3,2,1,0} broadcast(divide.106), dimensions={0,1,2} multiply.721 = f32[32,16,2048,2048]{3,2,1,0} multiply(convert.303, broadcast.1299) multiply.722 = f32[32,16,2048,2048]{3,2,1,0} multiply(multiply.721, exponential.8) reduce.99 = f32[32,16,2048]{2,1,0} reduce(multiply.722, constant.1968), dimensions={3}, to_apply=region_20.962 negate.61 = f32[32,16,2048]{2,1,0} negate(reduce.99) broadcast.1305 = f32[32,16,2048,2048]{3,2,1,0} broadcast(negate.61), dimensions={0,1,2} divide.108 = f32[32,16,2048,2048]{3,2,1,0} divide(convert.303, broadcast.1309) add.622 = f32[32,16,2048,2048]{3,2,1,0} add(broadcast.1305, divide.108) multiply.724 = f32[32,16,2048,2048]{3,2,1,0} multiply(add.622, exponential.8) convert.305 = bf16[32,16,2048,2048]{3,2,1,0} convert(multiply.724) dot.50 = bf16[32,16,2048,128]{3,2,1,0} dot(convert.305, transpose.113), lhs_batch_dims={0,1}, lhs_contracting_dims={2}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} bitcast.1934 = bf16[1,32,16,2048,128]{4,3,2,1,0} bitcast(dot.50) pad.6 = bf16[3,32,16,2048,128]{4,3,2,1,0} pad(bitcast.1934, constant.2006), padding=1_1x0_0x0_0x0_0x0_0 transpose.120 = bf16[32,16,2048,128]{3,2,1,0} transpose(bitcast.647), dimensions={0,1,3,2} dot.51 = bf16[32,16,2048,128]{3,2,1,0} dot(convert.305, transpose.120), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} broadcast.1307 = bf16[32,16,2048,128]{3,2,1,0} broadcast(constant.2007), dimensions={} multiply.725 = bf16[32,16,2048,128]{3,2,1,0} multiply(dot.51, broadcast.1307) bitcast.1941 = bf16[1,32,16,2048,128]{4,3,2,1,0} bitcast(multiply.725) pad.7 = bf16[3,32,16,2048,128]{4,3,2,1,0} pad(bitcast.1941, constant.2006), padding=0_2x0_0x0_0x0_0x0_0 add.638 = bf16[3,32,16,2048,128]{4,3,2,1,0} add(pad.6, pad.7) transpose.123 = bf16[32,16,128,2048]{3,2,1,0} transpose(bitcast.1381), dimensions={0,2,3,1} dot.89 = bf16[32,16,2048,128]{3,2,1,0} dot(convert.306, transpose.123), lhs_batch_dims={0,1}, lhs_contracting_dims={2}, rhs_batch_dims={0,1}, rhs_contracting_dims={3} bitcast.1949 = bf16[1,32,16,2048,128]{4,3,2,1,0} bitcast(dot.89) pad.8 = bf16[3,32,16,2048,128]{4,3,2,1,0} pad(bitcast.1949, constant.2006), padding=2_0x0_0x0_0x0_0x0_0 add.639 = bf16[3,32,16,2048,128]{4,3,2,1,0} add(add.638, pad.8) transpose.127 = bf16[32,2048,3,16,128]{4,3,2,1,0} transpose(add.639), dimensions={1,3,0,2,4} bitcast.1416 = bf16[65536,6144]{1,0} bitcast(transpose.127) dot.52 = bf16[65536,4096]{0,1} dot(bitcast.1416, bitcast.1420), lhs_contracting_dims={1}, rhs_contracting_dims={0} bitcast.1424 = bf16[32,2048,4096]{1,0,2} bitcast(dot.52) reduce-scatter.2 = bf16[32,2048,2048]{1,0,2} reduce-scatter(bitcast.1424), channel_id=324, replica_groups={{0,1},{2,3},{4,5},{6,7}}, use_global_device_ids=true, dimensions={2}, to_apply=add bitcast.1851 = bf16[2048,32,2048]{2,1,0} bitcast(reduce-scatter.2) multiply.732 = bf16[2048,32,2048]{2,1,0} multiply(bitcast.1851, broadcast.1338) convert.308 = f32[2048,32,2048]{2,1,0} convert(multiply.732) multiply.727 = f32[2048,32,2048]{2,1,0} multiply(subtract.224, convert.308) bitcast.1434 = f32[32,2048,2048]{1,0,2} bitcast(multiply.727) reduce.100 = f32[32,2048]{1,0} reduce(bitcast.1434, constant.1968), dimensions={2}, to_apply=region_20.962 all-reduce.33 = f32[32,2048]{1,0} all-reduce(reduce.100), channel_id=78, replica_groups={{0,1},{2,3},{4,5},{6,7}}, use_global_device_ids=true, to_apply=region_20.962 bitcast.1439 = f32[1,32,2048]{2,1,0} bitcast(all-reduce.33) divide.110 = f32[1,32,2048]{2,1,0} divide(rsqrt.20, bitcast.1447) multiply.728 = f32[1,32,2048]{2,1,0} multiply(divide.110, broadcast.1313) multiply.729 = f32[1,32,2048]{2,1,0} multiply(bitcast.1439, multiply.728) multiply.730 = f32[1,32,2048]{2,1,0} multiply(multiply.729, broadcast.1315) bitcast.1485 = f32[32,2048]{1,0} bitcast(multiply.730) broadcast.1321 = f32[2048,32,2048]{2,1,0} broadcast(bitcast.1485), dimensions={1,2} multiply.734 = f32[2048,32,2048]{2,1,0} multiply(subtract.224, broadcast.1321) multiply.735 = f32[2048,32,2048]{2,1,0} multiply(convert.308, broadcast.1337) add.625 = f32[2048,32,2048]{2,1,0} add(multiply.734, multiply.735) negate.62 = f32[2048,32,2048]{2,1,0} negate(multiply.734) bitcast.1491 = f32[32,2048,2048]{1,0,2} bitcast(negate.62) reduce.101 = f32[32,2048]{1,0} reduce(bitcast.1491, constant.1968), dimensions={2}, to_apply=region_20.962 negate.63 = f32[2048,32,2048]{2,1,0} negate(multiply.735) bitcast.1505 = f32[32,2048,2048]{1,0,2} bitcast(negate.63) reduce.102 = f32[32,2048]{1,0} reduce(bitcast.1505, constant.1968), dimensions={2}, to_apply=region_20.962 add.626 = f32[32,2048]{1,0} add(reduce.101, reduce.102) all-reduce.275 = f32[32,2048]{1,0} all-reduce(add.626), channel_id=336, replica_groups={{0,1},{2,3},{4,5},{6,7}}, use_global_device_ids=true, to_apply=region_20.962 multiply.736 = f32[32,2048]{1,0} multiply(all-reduce.275, broadcast.1243) broadcast.1323 = f32[2048,32,2048]{2,1,0} broadcast(multiply.736), dimensions={1,2} add.628 = f32[2048,32,2048]{2,1,0} add(add.625, broadcast.1323) convert.309 = bf16[2048,32,2048]{2,1,0} convert(add.628) add.629 = bf16[2048,32,2048]{2,1,0} add(add.616, convert.309) bitcast.1525 = bf16[32,2048,2048]{1,0,2} bitcast(add.629) get-tuple-element.47 = bf16[24,8192]{1,0} get-tuple-element(param.3), index=2 reduce.103 = bf16[8192]{0} reduce(add.635, constant.2006), dimensions={0,1}, to_apply=add all-reduce.36 = bf16[8192]{0} all-reduce(reduce.103), channel_id=81, replica_groups={{0,2,4,6},{1,3,5,7}}, use_global_device_ids=true, to_apply=add bitcast.1583 = bf16[1,8192]{1,0} bitcast(all-reduce.36) dynamic-update-slice.28 = bf16[24,8192]{1,0} dynamic-update-slice(get-tuple-element.47, bitcast.1583, select.372, constant.1980) get-tuple-element.48 = bf16[24,1024,8192]{2,1,0} get-tuple-element(param.3), index=3 all-gather.16 = bf16[32,2048,4096]{1,0,2} all-gather(bitcast.1620), channel_id=82, replica_groups={{0,1},{2,3},{4,5},{6,7}}, dimensions={2}, use_global_device_ids=true bitcast.1625 = bf16[4096,65536]{1,0} bitcast(all-gather.16) dot.53 = bf16[4096,8192]{1,0} dot(bitcast.1625, bitcast.1629), lhs_contracting_dims={1}, rhs_contracting_dims={0} reduce-scatter.3 = bf16[1024,8192]{1,0} reduce-scatter(dot.53), channel_id=325, replica_groups={{0,2,4,6},{1,3,5,7}}, use_global_device_ids=true, dimensions={0}, to_apply=add bitcast.1634 = bf16[1,1024,8192]{2,1,0} bitcast(reduce-scatter.3) dynamic-update-slice.29 = bf16[24,1024,8192]{2,1,0} dynamic-update-slice(get-tuple-element.48, bitcast.1634, select.372, constant.1980, constant.1980) get-tuple-element.49 = bf16[24,1024]{0,1} get-tuple-element(param.3), index=4 collective-permute.2 = bf16[24,1024]{0,1} collective-permute(get-tuple-element.49), channel_id=85, source_target_pairs={{0,0},{1,2},{2,4},{3,6},{4,1},{5,3},{6,5},{7,7}} all-gather.17 = bf16[24,2048]{0,1} all-gather(collective-permute.2), channel_id=86, replica_groups={{0,4},{2,6},{1,5},{3,7}}, dimensions={1}, use_global_device_ids=true bitcast.1649 = bf16[2048,24]{1,0} bitcast(all-gather.17) reduce.104 = bf16[2048]{0} reduce(bitcast.1739, constant.2006), dimensions={0,1}, to_apply=add all-reduce.38 = bf16[2048]{0} all-reduce(reduce.104), channel_id=84, replica_groups={{0,2,4,6},{1,3,5,7}}, use_global_device_ids=true, to_apply=add bitcast.1671 = bf16[2048,1]{1,0} bitcast(all-reduce.38) dynamic-update-slice.30 = bf16[2048,24]{1,0} dynamic-update-slice(bitcast.1649, bitcast.1671, constant.1980, select.372) constant.2013 = s32[8]{0} constant({0, 2048, 0, 2048, 1024, 3072, 1024, 3072}) partition-id.3 = u32[] partition-id() dynamic-slice.336 = s32[1]{0} dynamic-slice(constant.2013, partition-id.3), dynamic_slice_sizes={1} constant.2014 = s32[8]{0} constant({0, 2048, 0, 2048, 0, 2048, 0, 2048}) dynamic-slice.337 = s32[1]{0} dynamic-slice(constant.2014, partition-id.3), dynamic_slice_sizes={1} subtract.232 = s32[1]{0} subtract(dynamic-slice.336, dynamic-slice.337) bitcast.2087 = s32[] bitcast(subtract.232) dynamic-slice.338 = bf16[1024,24]{1,0} dynamic-slice(dynamic-update-slice.30, bitcast.2087, constant.1980), dynamic_slice_sizes={1024,24} bitcast.1695 = bf16[24,1024]{0,1} bitcast(dynamic-slice.338) collective-permute.9 = bf16[24,1024]{0,1} collective-permute(bitcast.1695), channel_id=109, source_target_pairs={{0,0},{2,1},{4,2},{6,3},{1,4},{3,5},{5,6},{7,7}} get-tuple-element.50 = bf16[24,8192,1024]{1,2,0} get-tuple-element(param.3), index=5 bitcast.1698 = bf16[24,1024,8192]{2,1,0} bitcast(get-tuple-element.50) multiply.748 = bf16[32,2048,8192]{2,1,0} multiply(bitcast.997, multiply.747) multiply.749 = bf16[32,2048,8192]{2,1,0} multiply(multiply.748, broadcast.1335) bitcast.1735 = bf16[8192,65536]{0,1} bitcast(multiply.749) all-gather.18 = bf16[32,2048,4096]{1,0,2} all-gather(bitcast.1739), channel_id=87, replica_groups={{0,1},{2,3},{4,5},{6,7}}, dimensions={2}, use_global_device_ids=true bitcast.1743 = bf16[65536,4096]{0,1} bitcast(all-gather.18) dot.54 = bf16[8192,4096]{0,1} dot(bitcast.1735, bitcast.1743), lhs_contracting_dims={1}, rhs_contracting_dims={0} reduce-scatter.4 = bf16[8192,1024]{0,1} reduce-scatter(dot.54), channel_id=326, replica_groups={{0,2,4,6},{1,3,5,7}}, use_global_device_ids=true, dimensions={1}, to_apply=add bitcast.1748 = bf16[1,1024,8192]{2,1,0} bitcast(reduce-scatter.4) dynamic-update-slice.31 = bf16[24,1024,8192]{2,1,0} dynamic-update-slice(bitcast.1698, bitcast.1748, select.372, constant.1980, constant.1980) bitcast.1758 = bf16[24,8192,1024]{1,2,0} bitcast(dynamic-update-slice.31) get-tuple-element.51 = bf16[24,1024]{0,1} get-tuple-element(param.3), index=6 collective-permute.3 = bf16[24,1024]{0,1} collective-permute(get-tuple-element.51), channel_id=90, source_target_pairs={{0,0},{1,2},{2,4},{3,6},{4,1},{5,3},{6,5},{7,7}} all-gather.19 = bf16[24,2048]{0,1} all-gather(collective-permute.3), channel_id=91, replica_groups={{0,4},{2,6},{1,5},{3,7}}, dimensions={1}, use_global_device_ids=true bitcast.1763 = bf16[2048,24]{1,0} bitcast(all-gather.19) reduce.105 = bf16[2048]{0} reduce(reduce-scatter.1, constant.2006), dimensions={0,1}, to_apply=add all-reduce.40 = bf16[2048]{0} all-reduce(reduce.105), channel_id=89, replica_groups={{0,2,4,6},{1,3,5,7}}, use_global_device_ids=true, to_apply=add bitcast.1779 = bf16[2048,1]{1,0} bitcast(all-reduce.40) dynamic-update-slice.32 = bf16[2048,24]{1,0} dynamic-update-slice(bitcast.1763, bitcast.1779, constant.1980, select.372) dynamic-slice.339 = bf16[1024,24]{1,0} dynamic-slice(dynamic-update-slice.32, bitcast.2087, constant.1980), dynamic_slice_sizes={1024,24} bitcast.1794 = bf16[24,1024]{0,1} bitcast(dynamic-slice.339) collective-permute.10 = bf16[24,1024]{0,1} collective-permute(bitcast.1794), channel_id=110, source_target_pairs={{0,0},{2,1},{4,2},{6,3},{1,4},{3,5},{5,6},{7,7}} get-tuple-element.52 = bf16[24,1024]{0,1} get-tuple-element(param.3), index=7 collective-permute.4 = bf16[24,1024]{0,1} collective-permute(get-tuple-element.52), channel_id=93, source_target_pairs={{0,0},{1,2},{2,4},{3,6},{4,1},{5,3},{6,5},{7,7}} all-gather.20 = bf16[24,2048]{0,1} all-gather(collective-permute.4), channel_id=94, replica_groups={{0,4},{2,6},{1,5},{3,7}}, dimensions={1}, use_global_device_ids=true bitcast.1801 = bf16[2048,24]{1,0} bitcast(all-gather.20) multiply.751 = bf16[2048,32,2048]{2,1,0} multiply(convert.312, bitcast.1766) bitcast.1817 = bf16[32,2048,2048]{1,0,2} bitcast(multiply.751) reduce.106 = bf16[2048]{0} reduce(bitcast.1817, constant.2006), dimensions={0,1}, to_apply=add all-reduce.41 = bf16[2048]{0} all-reduce(reduce.106), channel_id=92, replica_groups={{0,2,4,6},{1,3,5,7}}, use_global_device_ids=true, to_apply=add bitcast.1826 = bf16[2048,1]{1,0} bitcast(all-reduce.41) dynamic-update-slice.33 = bf16[2048,24]{1,0} dynamic-update-slice(bitcast.1801, bitcast.1826, constant.1980, select.372) dynamic-slice.340 = bf16[1024,24]{1,0} dynamic-slice(dynamic-update-slice.33, bitcast.2087, constant.1980), dynamic_slice_sizes={1024,24} bitcast.1841 = bf16[24,1024]{0,1} bitcast(dynamic-slice.340) collective-permute.11 = bf16[24,1024]{0,1} collective-permute(bitcast.1841), channel_id=111, source_target_pairs={{0,0},{2,1},{4,2},{6,3},{1,4},{3,5},{5,6},{7,7}} get-tuple-element.53 = bf16[24,1024]{0,1} get-tuple-element(param.3), index=8 collective-permute.5 = bf16[24,1024]{0,1} collective-permute(get-tuple-element.53), channel_id=96, source_target_pairs={{0,0},{1,2},{2,4},{3,6},{4,1},{5,3},{6,5},{7,7}} all-gather.21 = bf16[24,2048]{0,1} all-gather(collective-permute.5), channel_id=97, replica_groups={{0,4},{2,6},{1,5},{3,7}}, dimensions={1}, use_global_device_ids=true bitcast.1848 = bf16[2048,24]{1,0} bitcast(all-gather.21) reduce.107 = bf16[2048]{0} reduce(reduce-scatter.2, constant.2006), dimensions={0,1}, to_apply=add all-reduce.42 = bf16[2048]{0} all-reduce(reduce.107), channel_id=95, replica_groups={{0,2,4,6},{1,3,5,7}}, use_global_device_ids=true, to_apply=add bitcast.1864 = bf16[2048,1]{1,0} bitcast(all-reduce.42) dynamic-update-slice.34 = bf16[2048,24]{1,0} dynamic-update-slice(bitcast.1848, bitcast.1864, constant.1980, select.372) dynamic-slice.341 = bf16[1024,24]{1,0} dynamic-slice(dynamic-update-slice.34, bitcast.2087, constant.1980), dynamic_slice_sizes={1024,24} bitcast.1879 = bf16[24,1024]{0,1} bitcast(dynamic-slice.341) collective-permute.12 = bf16[24,1024]{0,1} collective-permute(bitcast.1879), channel_id=112, source_target_pairs={{0,0},{2,1},{4,2},{6,3},{1,4},{3,5},{5,6},{7,7}} get-tuple-element.54 = bf16[24,1024]{0,1} get-tuple-element(param.3), index=9 collective-permute.6 = bf16[24,1024]{0,1} collective-permute(get-tuple-element.54), channel_id=99, source_target_pairs={{0,0},{1,2},{2,4},{3,6},{4,1},{5,3},{6,5},{7,7}} all-gather.22 = bf16[24,2048]{0,1} all-gather(collective-permute.6), channel_id=100, replica_groups={{0,4},{2,6},{1,5},{3,7}}, dimensions={1}, use_global_device_ids=true bitcast.1886 = bf16[2048,24]{1,0} bitcast(all-gather.22) multiply.753 = bf16[2048,32,2048]{2,1,0} multiply(convert.314, bitcast.1851) bitcast.1905 = bf16[32,2048,2048]{1,0,2} bitcast(multiply.753) reduce.108 = bf16[2048]{0} reduce(bitcast.1905, constant.2006), dimensions={0,1}, to_apply=add all-reduce.43 = bf16[2048]{0} all-reduce(reduce.108), channel_id=98, replica_groups={{0,2,4,6},{1,3,5,7}}, use_global_device_ids=true, to_apply=add bitcast.1914 = bf16[2048,1]{1,0} bitcast(all-reduce.43) dynamic-update-slice.35 = bf16[2048,24]{1,0} dynamic-update-slice(bitcast.1886, bitcast.1914, constant.1980, select.372) dynamic-slice.342 = bf16[1024,24]{1,0} dynamic-slice(dynamic-update-slice.35, bitcast.2087, constant.1980), dynamic_slice_sizes={1024,24} bitcast.1929 = bf16[24,1024]{0,1} bitcast(dynamic-slice.342) collective-permute.13 = bf16[24,1024]{0,1} collective-permute(bitcast.1929), channel_id=113, source_target_pairs={{0,0},{2,1},{4,2},{6,3},{1,4},{3,5},{5,6},{7,7}} get-tuple-element.55 = bf16[24,3,16,128]{3,2,1,0} get-tuple-element(param.3), index=10 bitcast.1979 = bf16[3,32,2048,16,128]{4,2,3,1,0} bitcast(add.639) reduce.109 = bf16[3,16,128]{2,1,0} reduce(bitcast.1979, constant.2006), dimensions={1,2}, to_apply=add all-reduce.44 = bf16[3,16,128]{2,1,0} all-reduce(reduce.109), channel_id=101, replica_groups={{0,2,4,6},{1,3,5,7}}, use_global_device_ids=true, to_apply=add bitcast.1963 = bf16[1,3,16,128]{3,2,1,0} bitcast(all-reduce.44) dynamic-update-slice.36 = bf16[24,3,16,128]{3,2,1,0} dynamic-update-slice(get-tuple-element.55, bitcast.1963, select.372, constant.1980, constant.1980, constant.1980) get-tuple-element.56 = bf16[24,3,1024,16,128]{4,3,1,2,0} get-tuple-element(param.3), index=11 bitcast.1974 = bf16[24,1024,3,16,128]{4,3,2,1,0} bitcast(get-tuple-element.56) transpose.130 = bf16[3,16,128,32,2048]{4,3,2,1,0} transpose(add.639), dimensions={0,2,4,1,3} bitcast.1983 = bf16[6144,65536]{1,0} bitcast(transpose.130) all-gather.23 = bf16[32,2048,4096]{1,0,2} all-gather(bitcast.2003), channel_id=102, replica_groups={{0,1},{2,3},{4,5},{6,7}}, dimensions={2}, use_global_device_ids=true bitcast.2007 = bf16[65536,4096]{0,1} bitcast(all-gather.23) dot.55 = bf16[6144,4096]{0,1} dot(bitcast.1983, bitcast.2007), lhs_contracting_dims={1}, rhs_contracting_dims={0} bitcast.2011 = bf16[3,16,128,4096]{2,1,0,3} bitcast(dot.55) reduce-scatter.5 = bf16[3,16,128,1024]{2,1,0,3} reduce-scatter(bitcast.2011), channel_id=327, replica_groups={{0,2,4,6},{1,3,5,7}}, use_global_device_ids=true, dimensions={3}, to_apply=add bitcast.2015 = bf16[1,1024,3,16,128]{4,3,2,1,0} bitcast(reduce-scatter.5) dynamic-update-slice.37 = bf16[24,1024,3,16,128]{4,3,2,1,0} dynamic-update-slice(bitcast.1974, bitcast.2015, select.372, constant.1980, constant.1980, constant.1980, constant.1980) bitcast.2025 = bf16[24,3,1024,16,128]{4,3,1,2,0} bitcast(dynamic-update-slice.37) get-tuple-element.57 = bf16[24,1024]{1,0} get-tuple-element(param.3), index=12 reduce.110 = bf16[2048]{0} reduce(bitcast.2063, constant.2006), dimensions={0,1}, to_apply=add all-reduce.46 = bf16[2048]{0} all-reduce(reduce.110), channel_id=104, replica_groups={{0,2,4,6},{1,3,5,7}}, use_global_device_ids=true, to_apply=add dynamic-slice.343 = bf16[1024]{0} dynamic-slice(all-reduce.46, bitcast.2087), dynamic_slice_sizes={1024} bitcast.2046 = bf16[1,1024]{1,0} bitcast(dynamic-slice.343) collective-permute.7 = bf16[1,1024]{1,0} collective-permute(bitcast.2046), channel_id=105, source_target_pairs={{0,0},{2,1},{4,2},{6,3},{1,4},{3,5},{5,6},{7,7}} dynamic-update-slice.38 = bf16[24,1024]{1,0} dynamic-update-slice(get-tuple-element.57, collective-permute.7, select.372, constant.1980) get-tuple-element.58 = bf16[24,1024,16,128]{3,2,1,0} get-tuple-element(param.3), index=13 bitcast.2066 = bf16[2048,65536]{1,0} bitcast(add.619) transpose.133 = bf16[16,32,2048,128]{3,2,1,0} transpose(dot.44), dimensions={1,0,3,2} bitcast.2072 = bf16[32,2048,16,128]{3,1,0,2} bitcast(transpose.133) all-gather.24 = bf16[32,2048,32,128]{3,1,0,2} all-gather(bitcast.2072), channel_id=106, replica_groups={{0,1},{2,3},{4,5},{6,7}}, dimensions={2}, use_global_device_ids=true bitcast.2073 = bf16[32,32,2048,128]{3,2,1,0} bitcast(all-gather.24) transpose.134 = bf16[32,2048,32,128]{3,2,1,0} transpose(bitcast.2073), dimensions={1,2,0,3} bitcast.2077 = bf16[65536,4096]{1,0} bitcast(transpose.134) dot.56 = bf16[2048,4096]{1,0} dot(bitcast.2066, bitcast.2077), lhs_contracting_dims={1}, rhs_contracting_dims={0} bitcast.2081 = bf16[2048,32,128]{2,1,0} bitcast(dot.56) all-reduce.47 = bf16[2048,32,128]{2,1,0} all-reduce(bitcast.2081), channel_id=107, replica_groups={{0,2,4,6},{1,3,5,7}}, use_global_device_ids=true, to_apply=add constant.2015 = s32[8]{0} constant({0, 0, 16, 16, 0, 0, 16, 16}) dynamic-slice.344 = s32[1]{0} dynamic-slice(constant.2015, partition-id.3), dynamic_slice_sizes={1} bitcast.2095 = s32[] bitcast(dynamic-slice.344) dynamic-slice.345 = bf16[1024,16,128]{2,1,0} dynamic-slice(all-reduce.47, bitcast.2087, bitcast.2095, constant.1980), dynamic_slice_sizes={1024,16,128} bitcast.2102 = bf16[1,1024,16,128]{3,2,1,0} bitcast(dynamic-slice.345) collective-permute.8 = bf16[1,1024,16,128]{3,2,1,0} collective-permute(bitcast.2102), channel_id=108, source_target_pairs={{0,0},{2,1},{4,2},{6,3},{1,4},{3,5},{5,6},{7,7}} dynamic-update-slice.39 = bf16[24,1024,16,128]{3,2,1,0} dynamic-update-slice(get-tuple-element.58, collective-permute.8, select.372, constant.1980, constant.1980, constant.1980) ROOT tuple.2 = (s32[], bf16[32,2048,2048]{1,0,2}, bf16[24,8192]{1,0}, bf16[24,1024,8192]{2,1,0}, bf16[24,1024]{0,1}, bf16[24,8192,1024]{1,2,0}, bf16[24,1024]{0,1}, bf16[24,1024]{0,1}, bf16[24,1024]{0,1}, bf16[24,1024]{0,1}, bf16[24,3,16,128]{3,2,1,0}, bf16[24,3,1024,16,128]{4,3,1,2,0}, bf16[24,1024]{1,0}, bf16[24,1024,16,128]{3,2,1,0}, bf16[24,8192]{1,0}, bf16[24,1024,8192]{2,1,0}, bf16[24,8192,1024]{1,2,0}, bf16[24,2048]{1,0}, bf16[24,2048]{1,0}, bf16[24,2048]{1,0}, bf16[24,2048]{1,0}, bf16[24,3,16,128]{3,2,1,0}, bf16[24,3,1024,16,128]{4,3,1,2,0}, bf16[24,1024]{1,0}, bf16[24,1024,16,128]{3,2,1,0}, bf16[24,32,2048,2048]{2,1,3,0}, bf16[32,1,2048,2048]{3,2,0,1}, bf16[32,2048]{1,0}) tuple(add.581, bitcast.1525, dynamic-update-slice.28, dynamic-update-slice.29, collective-permute.9, bitcast.1758, collective-permute.10, collective-permute.11, collective-permute.12, collective-permute.13, dynamic-update-slice.36, bitcast.2025, dynamic-update-slice.38, dynamic-update-slice.39, get-tuple-element.45, get-tuple-element.44, get-tuple-element.46, get-tuple-element.43, get-tuple-element.42, get-tuple-element.37, get-tuple-element.36, get-tuple-element.38, get-tuple-element.35, get-tuple-element.41, get-tuple-element.40, get-tuple-element.32, get-tuple-element.39, get-tuple-element.33) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(), GetCudnnVersion()}; TF_ASSERT_OK(RunHloPass(&fusedMhaRewriter, m.get()).status()); ComputationLayout computation_layout( m->entry_computation()->ComputeProgramShape()); HloInstruction* fwd_instruction = nullptr; HloInstruction* bwd_instruction = nullptr; SCOPED_TRACE(m->ToString()); for (HloInstruction* instr : m->entry_computation()->MakeInstructionPostOrder()) { if (instr->opcode() == HloOpcode::kCustomCall && instr->custom_call_target() == kCudnnfMHASoftmaxCallTarget) { fwd_instruction = instr; } if (instr->opcode() == HloOpcode::kCustomCall && instr->custom_call_target() == kCudnnfMHASoftmaxBackwardCallTarget) { bwd_instruction = instr; } } EXPECT_NE(fwd_instruction, nullptr); EXPECT_NE(bwd_instruction, nullptr); TF_ASSERT_OK_AND_ASSIGN(auto gpu_config, fwd_instruction->backend_config<GpuBackendConfig>()); const CudnnfMHABackendConfig& config = gpu_config.cudnn_fmha_backend_config(); EXPECT_EQ(config.mask_type(), CudnnfMHABackendConfig::CAUSAL); } TEST_F(CudnnFusedMhaRewriterTestHloTest, BF16TrainingBmm2CanonicalizationRestoreFwdGraph) { if (skip_reason_) GTEST_SKIP() << *skip_reason_; const char* module_str = R"( HloModule pjit__unnamed_function_, entry_computation_layout={(bf16[2,256,4,64]{3,2,1,0}, bf16[2,256,4,64]{3,2,1,0}, bf16[2,256,4,64]{3,2,1,0}, bf16[2,256,4,64]{3,2,1,0}, bf16[2,4,256,256]{3,2,1,0})->(bf16[4,256,8,64]{3,2,1,0}, bf16[2,256,4,64]{3,2,1,0}, bf16[2,256,4,64]{3,2,1,0}, bf16[2,256,4,64]{3,2,1,0})}, allow_spmd_sharding_propagation_to_output={false,false,false,false}, num_partitions=4 region_0.6 { Arg_0.7 = bf16[] parameter(0) Arg_1.8 = bf16[] parameter(1) ROOT maximum.5 = bf16[] maximum(Arg_0.7, Arg_1.8) } region_1.10 { Arg_0.11 = f32[] parameter(0) Arg_1.12 = f32[] parameter(1) ROOT add.14 = f32[] add(Arg_0.11, Arg_1.12) } add.clone { x.1 = u32[] parameter(0) y.1 = u32[] parameter(1) ROOT add.15 = u32[] add(x.1, y.1) } region_2.65 { Arg_0.66 = bf16[] parameter(0) Arg_1.67 = bf16[] parameter(1) ROOT add.16 = bf16[] add(Arg_0.66, Arg_1.67) } ENTRY main.164_spmd { param = bf16[2,256,4,64]{3,2,1,0} parameter(2), sharding={devices=[2,1,2,1]<=[4]} transpose.26 = bf16[2,4,64,256]{3,2,1,0} transpose(param), dimensions={0,2,3,1} param.1 = bf16[2,256,4,64]{3,2,1,0} parameter(0), sharding={devices=[2,1,2,1]<=[4]} transpose.27 = bf16[2,4,256,64]{3,2,1,0} transpose(param.1), dimensions={0,2,1,3} constant.46 = bf16[] constant(0.5) broadcast.126 = bf16[2,4,256,64]{3,2,1,0} broadcast(constant.46), dimensions={} multiply.34 = bf16[2,4,256,64]{3,2,1,0} multiply(transpose.27, broadcast.126) param.2 = bf16[2,256,4,64]{3,2,1,0} parameter(1), sharding={devices=[2,1,2,1]<=[4]} transpose.29 = bf16[2,4,64,256]{3,2,1,0} transpose(param.2), dimensions={0,2,3,1} dot.12 = bf16[2,4,256,256]{3,2,1,0} dot(multiply.34, transpose.29), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} param.3 = bf16[2,4,256,256]{3,2,1,0} parameter(4), sharding={devices=[2,2,1,1]<=[4]} add.17 = bf16[2,4,256,256]{3,2,1,0} add(dot.12, param.3) constant.47 = bf16[] constant(-inf) reduce.4 = bf16[2,4,256]{2,1,0} reduce(add.17, constant.47), dimensions={3}, to_apply=region_0.6 broadcast.127 = bf16[2,4,256,256]{3,2,1,0} broadcast(reduce.4), dimensions={0,1,2} subtract.14 = bf16[2,4,256,256]{3,2,1,0} subtract(add.17, broadcast.127) exponential.2 = bf16[2,4,256,256]{3,2,1,0} exponential(subtract.14) convert.46 = f32[2,4,256,256]{3,2,1,0} convert(exponential.2) constant.48 = f32[] constant(0) reduce.5 = f32[2,4,256]{2,1,0} reduce(convert.46, constant.48), dimensions={3}, to_apply=region_1.10 convert.47 = bf16[2,4,256]{2,1,0} convert(reduce.5) broadcast.128 = bf16[2,4,256,256]{3,2,1,0} broadcast(convert.47), dimensions={0,1,2} divide.7 = bf16[2,4,256,256]{3,2,1,0} divide(exponential.2, broadcast.128) broadcast.129 = f32[4096]{0} broadcast(constant.48), dimensions={} constant.50 = u32[] constant(0) broadcast.131 = u32[8192]{0} broadcast(constant.50), dimensions={} broadcast.133 = u32[4096]{0} broadcast(constant.50), dimensions={} iota.3 = u32[8192]{0} iota(), iota_dimension=0 slice.14 = u32[4096]{0} slice(iota.3), slice={[0:4096]} slice.15 = u32[4096]{0} slice(iota.3), slice={[4096:8192]} custom-call.3 = (u32[4096]{0}, u32[4096]{0}) custom-call(broadcast.133, broadcast.133, slice.14, slice.15), custom_call_target="cu_threefry2x32", operand_layout_constraints={u32[4096]{0}, u32[4096]{0}, u32[4096]{0}, u32[4096]{0}}, api_version=API_VERSION_STATUS_RETURNING, backend_config="\000\020\000\000\000\000\000\000" get-tuple-element.6 = u32[4096]{0} get-tuple-element(custom-call.3), index=0 constant.115 = u32[1]{0} constant({0}) constant.52 = u32[4]{0} constant({0, 0, 1, 1}) partition-id = u32[] partition-id() dynamic-slice.21 = u32[1]{0} dynamic-slice(constant.52, partition-id), dynamic_slice_sizes={1} constant.116 = u32[1]{0} constant({1}) clamp.3 = u32[1]{0} clamp(constant.115, dynamic-slice.21, constant.116) convert.48 = s32[1]{0} convert(clamp.3) constant.117 = s32[1]{0} constant({2048}) multiply.35 = s32[1]{0} multiply(convert.48, constant.117) bitcast.105 = s32[] bitcast(multiply.35) dynamic-slice.22 = u32[2048]{0} dynamic-slice(get-tuple-element.6, bitcast.105), dynamic_slice_sizes={2048} constant.58 = s32[4]{0} constant({0, 0, 1, 1}) dynamic-slice.23 = s32[1]{0} dynamic-slice(constant.58, partition-id), dynamic_slice_sizes={1} multiply.36 = s32[1]{0} multiply(dynamic-slice.23, constant.117) bitcast.108 = s32[] bitcast(multiply.36) dynamic-update-slice.2 = u32[8192]{0} dynamic-update-slice(broadcast.131, dynamic-slice.22, bitcast.108) get-tuple-element.7 = u32[4096]{0} get-tuple-element(custom-call.3), index=1 dynamic-slice.24 = u32[2048]{0} dynamic-slice(get-tuple-element.7, bitcast.105), dynamic_slice_sizes={2048} constant.65 = s32[] constant(4096) add.18 = s32[] add(bitcast.108, constant.65) dynamic-update-slice.3 = u32[8192]{0} dynamic-update-slice(dynamic-update-slice.2, dynamic-slice.24, add.18) all-reduce = u32[8192]{0} all-reduce(dynamic-update-slice.3), channel_id=1, replica_groups={{0,1,2,3}}, use_global_device_ids=true, to_apply=add.clone constant.118 = s32[1]{0} constant({4096}) multiply.37 = s32[1]{0} multiply(dynamic-slice.23, constant.118) bitcast.119 = s32[] bitcast(multiply.37) dynamic-slice.25 = u32[4096]{0} dynamic-slice(all-reduce, bitcast.119), dynamic_slice_sizes={4096} constant.69 = u32[] constant(9) broadcast.134 = u32[4096]{0} broadcast(constant.69), dimensions={} shift-right-logical.6 = u32[4096]{0} shift-right-logical(dynamic-slice.25, broadcast.134) constant.70 = u32[] constant(1065353216) broadcast.135 = u32[4096]{0} broadcast(constant.70), dimensions={} or.5 = u32[4096]{0} or(shift-right-logical.6, broadcast.135) bitcast-convert.5 = f32[4096]{0} bitcast-convert(or.5) constant.71 = f32[] constant(-1) broadcast.136 = f32[4096]{0} broadcast(constant.71), dimensions={} add.19 = f32[4096]{0} add(bitcast-convert.5, broadcast.136) maximum.6 = f32[4096]{0} maximum(broadcast.129, add.19) constant.72 = f32[] constant(0.5) broadcast.137 = f32[4096]{0} broadcast(constant.72), dimensions={} compare.4 = pred[4096]{0} compare(maximum.6, broadcast.137), direction=LT bitcast.135 = pred[2,8,256]{2,1,0} bitcast(compare.4) convert.49 = bf16[2,8,256]{2,1,0} convert(bitcast.135) constant.80 = s32[] constant(0) constant.78 = s32[4]{0} constant({0, 4, 0, 4}) dynamic-slice.26 = s32[1]{0} dynamic-slice(constant.78, partition-id), dynamic_slice_sizes={1} bitcast.181 = s32[] bitcast(dynamic-slice.26) dynamic-slice.27 = bf16[2,4,256]{2,1,0} dynamic-slice(convert.49, constant.80, bitcast.181, constant.80), dynamic_slice_sizes={2,4,256} broadcast.139 = bf16[2,4,256,256]{3,2,1,0} broadcast(dynamic-slice.27), dimensions={0,1,3} multiply.38 = bf16[2,4,256,256]{3,2,1,0} multiply(divide.7, broadcast.139) constant.93 = bf16[] constant(2) broadcast.141 = bf16[2,4,256,256]{3,2,1,0} broadcast(constant.93), dimensions={} multiply.39 = bf16[2,4,256,256]{3,2,1,0} multiply(multiply.38, broadcast.141) dot.13 = bf16[2,4,64,256]{3,2,1,0} dot(transpose.26, multiply.39), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3} transpose.31 = bf16[4,2,64,256]{3,2,1,0} transpose(dot.13), dimensions={1,0,2,3} bitcast.154 = bf16[2,256,4,64]{1,3,0,2} bitcast(transpose.31) all-gather = bf16[2,256,8,64]{1,3,0,2} all-gather(bitcast.154), channel_id=2, replica_groups={{0,1},{2,3}}, dimensions={2}, use_global_device_ids=true bitcast.155 = bf16[8,2,64,256]{3,2,1,0} bitcast(all-gather) transpose.32 = bf16[2,8,64,256]{3,2,1,0} transpose(bitcast.155), dimensions={1,0,2,3} bitcast.157 = bf16[2,256,8,64]{1,3,2,0} bitcast(transpose.32) all-gather.1 = bf16[4,256,8,64]{1,3,2,0} all-gather(bitcast.157), channel_id=3, replica_groups={{0,2},{1,3}}, dimensions={0}, use_global_device_ids=true bitcast.236 = bf16[4,8,64,256]{3,2,1,0} bitcast(all-gather.1) transpose.38 = bf16[4,256,8,64]{3,2,1,0} transpose(bitcast.236), dimensions={0,3,1,2} param.4 = bf16[2,256,4,64]{3,2,1,0} parameter(3), sharding={devices=[2,1,2,1]<=[4]} transpose.33 = bf16[2,4,256,64]{3,2,1,0} transpose(param.4), dimensions={0,2,1,3} dot.14 = bf16[2,4,256,256]{3,2,1,0} dot(transpose.33, transpose.26), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} broadcast.142 = bf16[4096]{0} broadcast(constant.93), dimensions={} constant.95 = bf16[] constant(0) broadcast.143 = bf16[4096]{0} broadcast(constant.95), dimensions={} select.4 = bf16[4096]{0} select(compare.4, broadcast.142, broadcast.143) bitcast.176 = bf16[2,8,256]{2,1,0} bitcast(select.4) dynamic-slice.28 = bf16[2,4,256]{2,1,0} dynamic-slice(bitcast.176, constant.80, bitcast.181, constant.80), dynamic_slice_sizes={2,4,256} broadcast.145 = bf16[2,4,256,256]{3,2,1,0} broadcast(dynamic-slice.28), dimensions={0,1,3} multiply.40 = bf16[2,4,256,256]{3,2,1,0} multiply(dot.14, broadcast.145) divide.8 = bf16[2,4,256,256]{3,2,1,0} divide(multiply.40, broadcast.128) constant.106 = bf16[] constant(1) broadcast.146 = bf16[2,4,256]{2,1,0} broadcast(constant.106), dimensions={} multiply.41 = bf16[2,4,256]{2,1,0} multiply(convert.47, convert.47) divide.9 = bf16[2,4,256]{2,1,0} divide(broadcast.146, multiply.41) broadcast.147 = bf16[2,4,256,256]{3,2,1,0} broadcast(divide.9), dimensions={0,1,2} multiply.42 = bf16[2,4,256,256]{3,2,1,0} multiply(multiply.40, broadcast.147) multiply.43 = bf16[2,4,256,256]{3,2,1,0} multiply(multiply.42, exponential.2) reduce.6 = bf16[2,4,256]{2,1,0} reduce(multiply.43, constant.95), dimensions={3}, to_apply=region_2.65 negate.4 = bf16[2,4,256]{2,1,0} negate(reduce.6) broadcast.148 = bf16[2,4,256,256]{3,2,1,0} broadcast(negate.4), dimensions={0,1,2} add.20 = bf16[2,4,256,256]{3,2,1,0} add(divide.8, broadcast.148) multiply.44 = bf16[2,4,256,256]{3,2,1,0} multiply(add.20, exponential.2) transpose.34 = bf16[2,4,256,64]{3,2,1,0} transpose(param.2), dimensions={0,2,1,3} dot.15 = bf16[2,4,256,64]{3,2,1,0} dot(multiply.44, transpose.34), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} multiply.45 = bf16[2,4,256,64]{3,2,1,0} multiply(dot.15, broadcast.126) transpose.39 = bf16[2,256,4,64]{3,2,1,0} transpose(multiply.45), dimensions={0,2,1,3} dot.16 = bf16[2,4,256,64]{3,2,1,0} dot(multiply.44, multiply.34), lhs_batch_dims={0,1}, lhs_contracting_dims={2}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} transpose.40 = bf16[2,256,4,64]{3,2,1,0} transpose(dot.16), dimensions={0,2,1,3} transpose.36 = bf16[2,4,64,256]{3,2,1,0} transpose(param.4), dimensions={0,2,3,1} dot.11 = bf16[2,4,64,256]{3,2,1,0} dot(transpose.36, multiply.39), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} transpose.41 = bf16[2,256,4,64]{3,2,1,0} transpose(dot.11), dimensions={0,3,1,2} ROOT tuple.2 = (bf16[4,256,8,64]{3,2,1,0}, bf16[2,256,4,64]{3,2,1,0}, bf16[2,256,4,64]{3,2,1,0}, bf16[2,256,4,64]{3,2,1,0}) tuple(transpose.38, transpose.39, transpose.40, transpose.41) } )"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); CudnnFusedMHARewriter fusedMhaRewriter{GetCudaComputeCapability(), GetCudnnVersion()}; TF_ASSERT_OK(RunHloPass(&fusedMhaRewriter, m.get()).status()); SCOPED_TRACE(m->ToString()); EXPECT_THAT( m->entry_computation()->root_instruction(), GmockMatch(m::Tuple( m::Transpose(), m::Transpose(), m::Transpose(), m::Transpose(m::Dot( m::Op(), m::Op().WithPredicate([](const HloInstruction* instr) { return instr->name() == "multiply.39.fmha_no_match_clone"; })))))); } constexpr absl::string_view hlo_BF16Bmm1BiasSoftmaxBmm2Pattern_dbias = R"( HloModule jit__unnamed_wrapped_function_, entry_computation_layout={(bf16[2,1024,4,64]{3,2,1,0}, bf16[2,1024,4,64]{3,2,1,0}, bf16[2,1024,4,64]{3,2,1,0}, bf16[2,1024,4,64]{3,2,1,0}, bf16[4,1024,1024]{2,1,0})->(bf16[2,1024,4,64]{3,2,1,0}, bf16[2,1024,4,64]{3,2,1,0}, bf16[2,1024,4,64]{3,2,1,0}, bf16[2,1024,4,64]{3,2,1,0}, bf16[4,1024,1024]{2,1,0})}, allow_spmd_sharding_propagation_to_parameters={true,true,true,true,true}, allow_spmd_sharding_propagation_to_output={true,true,true,true,true} region_0.14 { Arg_0.15 = bf16[] parameter(0) Arg_1.16 = bf16[] parameter(1) ROOT maximum = bf16[] maximum(Arg_0.15, Arg_1.16) } region_1.27 { Arg_0.28 = f32[] parameter(0) Arg_1.29 = f32[] parameter(1) ROOT add = f32[] add(Arg_0.28, Arg_1.29) } region_2.56 { Arg_0.57 = bf16[] parameter(0) Arg_1.58 = bf16[] parameter(1) ROOT add.1 = bf16[] add(Arg_0.57, Arg_1.58) } ENTRY main.87 { Arg_2.3 = bf16[2,1024,4,64]{3,2,1,0} parameter(2) transpose.12 = bf16[2,4,64,1024]{3,2,1,0} transpose(Arg_2.3), dimensions={0,2,3,1} Arg_0.1 = bf16[2,1024,4,64]{3,2,1,0} parameter(0) transpose.13 = bf16[2,4,1024,64]{3,2,1,0} transpose(Arg_0.1), dimensions={0,2,1,3} Arg_1.2 = bf16[2,1024,4,64]{3,2,1,0} parameter(1) transpose.15 = bf16[2,4,64,1024]{3,2,1,0} transpose(Arg_1.2), dimensions={0,2,3,1} dot = bf16[2,4,1024,1024]{3,2,1,0} dot(transpose.13, transpose.15), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} Arg_4.5 = bf16[4,1024,1024]{2,1,0} parameter(4) broadcast.9 = bf16[2,4,1024,1024]{3,2,1,0} broadcast(Arg_4.5), dimensions={1,2,3} add.2 = bf16[2,4,1024,1024]{3,2,1,0} add(dot, broadcast.9) constant.10 = bf16[] constant(-inf) reduce.18 = bf16[2,4,1024]{2,1,0} reduce(add.2, constant.10), dimensions={3}, to_apply=region_0.14 broadcast.10 = bf16[2,4,1024,1024]{3,2,1,0} broadcast(reduce.18), dimensions={0,1,2} subtract = bf16[2,4,1024,1024]{3,2,1,0} subtract(add.2, broadcast.10) exponential = bf16[2,4,1024,1024]{3,2,1,0} exponential(subtract) convert.5 = f32[2,4,1024,1024]{3,2,1,0} convert(exponential) constant.9 = f32[] constant(0) reduce.31 = f32[2,4,1024]{2,1,0} reduce(convert.5, constant.9), dimensions={3}, to_apply=region_1.27 convert.6 = bf16[2,4,1024]{2,1,0} convert(reduce.31) broadcast.11 = bf16[2,4,1024,1024]{3,2,1,0} broadcast(convert.6), dimensions={0,1,2} divide.2 = bf16[2,4,1024,1024]{3,2,1,0} divide(exponential, broadcast.11) dot.1 = bf16[2,4,64,1024]{3,2,1,0} dot(transpose.12, divide.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={3} transpose.22 = bf16[2,1024,4,64]{3,2,1,0} transpose(dot.1), dimensions={0,3,1,2} Arg_3.4 = bf16[2,1024,4,64]{3,2,1,0} parameter(3) transpose.17 = bf16[2,4,1024,64]{3,2,1,0} transpose(Arg_3.4), dimensions={0,2,1,3} dot.2 = bf16[2,4,1024,1024]{3,2,1,0} dot(transpose.17, transpose.12), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} divide.3 = bf16[2,4,1024,1024]{3,2,1,0} divide(dot.2, broadcast.11) constant.0 = bf16[] constant(1) broadcast.13 = bf16[2,4,1024]{2,1,0} broadcast(constant.0), dimensions={} multiply.2 = bf16[2,4,1024]{2,1,0} multiply(convert.6, convert.6) divide.4 = bf16[2,4,1024]{2,1,0} divide(broadcast.13, multiply.2) broadcast.14 = bf16[2,4,1024,1024]{3,2,1,0} broadcast(divide.4), dimensions={0,1,2} multiply.3 = bf16[2,4,1024,1024]{3,2,1,0} multiply(dot.2, broadcast.14) multiply.4 = bf16[2,4,1024,1024]{3,2,1,0} multiply(multiply.3, exponential) constant.8 = bf16[] constant(0) reduce.60 = bf16[2,4,1024]{2,1,0} reduce(multiply.4, constant.8), dimensions={3}, to_apply=region_2.56 negate.1 = bf16[2,4,1024]{2,1,0} negate(reduce.60) broadcast.15 = bf16[2,4,1024,1024]{3,2,1,0} broadcast(negate.1), dimensions={0,1,2} add.3 = bf16[2,4,1024,1024]{3,2,1,0} add(divide.3, broadcast.15) multiply.5 = bf16[2,4,1024,1024]{3,2,1,0} multiply(add.3, exponential) transpose.18 = bf16[2,4,1024,64]{3,2,1,0} transpose(Arg_1.2), dimensions={0,2,1,3} dot.4 = bf16[2,4,1024,64]{3,2,1,0} dot(multiply.5, transpose.18), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} transpose.23 = bf16[2,1024,4,64]{3,2,1,0} transpose(dot.4), dimensions={0,2,1,3} dot.3 = bf16[2,4,1024,64]{3,2,1,0} dot(multiply.5, transpose.13), lhs_batch_dims={0,1}, lhs_contracting_dims={2}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} transpose.24 = bf16[2,1024,4,64]{3,2,1,0} transpose(dot.3), dimensions={0,2,1,3} transpose.20 = bf16[2,4,64,1024]{3,2,1,0} transpose(Arg_3.4), dimensions={0,2,3,1} dot.49 = bf16[2,4,64,1024]{3,2,1,0} dot(transpose.20, divide.2), lhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_batch_dims={0,1}, rhs_contracting_dims={2} transpose.25 = bf16[2,1024,4,64]{3,2,1,0} transpose(dot.49), dimensions={0,3,1,2} reduce.81 = bf16[4,1024,1024]{2,1,0} reduce(multiply.5, constant.8), dimensions={0}, to_apply=region_2.56 ROOT tuple = (bf16[2,1024,4,64]{3,2,1,0}, bf16[2,1024,4,64]{3,2,1,0}, bf16[2,1024,4,64]{3,2,1,0}, bf16[2,1024,4,64]{3,2,1,0}, bf16[4,1024,1024]{2,1,0}) tuple(transpose.22, transpose.23, transpose.24, transpose.25, reduce.81) } )"; TEST_F(CudnnFusedMhaRewriterTestHloTest, BF16Bmm1BiasSoftmaxBmm2PatternDbias) { if (skip_reason_) GTEST_SKIP() << *skip_reason_; TF_ASSERT_OK_AND_ASSIGN( auto m, ParseAndReturnVerifiedModule(hlo_BF16Bmm1BiasSoftmaxBmm2Pattern_dbias)); CudnnFusedMHARewriter fusedMhaRewriter{se::CudaComputeCapability(9, 0), se::dnn::VersionInfo(9, 0, 0)}; TF_ASSERT_OK(RunHloPass(&fusedMhaRewriter, m.get()).status()); ComputationLayout computation_layout( m->entry_computation()->ComputeProgramShape()); const HloInstruction* fmha; SCOPED_TRACE(m->ToString()); EXPECT_THAT( m->entry_computation()->root_instruction(), GmockMatch(m::Tuple( m::Transpose( m::Transpose(m::GetTupleElement( m::CustomCall(&fmha, {kCudnnfMHAScaleBiasSoftmaxCallTarget}), 0))) .WithShape(BF16, {2, 1024, 4, 64}), m::Transpose( m::GetTupleElement( m::CustomCall({kCudnnfMHAScaleBiasSoftmaxBackwardCallTarget}), 0)) .WithShape(BF16, {2, 1024, 4, 64}), m::Transpose( m::GetTupleElement( m::CustomCall({kCudnnfMHAScaleBiasSoftmaxBackwardCallTarget}), 1)) .WithShape(BF16, {2, 1024, 4, 64}), m::Transpose( m::Transpose(m::GetTupleElement( m::CustomCall({kCudnnfMHAScaleBiasSoftmaxBackwardCallTarget}), 2))) .WithShape(BF16, {2, 1024, 4, 64}), m::Reshape( m::GetTupleElement( m::CustomCall({kCudnnfMHAScaleBiasSoftmaxBackwardCallTarget}), 3)) .WithShape(BF16, {4, 1024, 1024})))); TF_ASSERT_OK_AND_ASSIGN(auto gpu_config, fmha->backend_config<GpuBackendConfig>()); const CudnnfMHABackendConfig& config = gpu_config.cudnn_fmha_backend_config(); EXPECT_EQ(fmha->operands().size(), 4); EXPECT_EQ(fmha->operand(3)->shape(), ShapeUtil::MakeShape(BF16, {1, 4, 1024, 1024})); EXPECT_EQ(config.fmha_scale(), 1.0); EXPECT_EQ(config.dropout_rate(), 0.0); EXPECT_EQ(config.mask_type(), CudnnfMHABackendConfig::NO_MASK); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/cudnn_fused_mha_rewriter.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/cudnn_fused_mha_rewriter_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
da101305-6d44-451a-9a8c-94414305ae64
cpp
tensorflow/tensorflow
variadic_op_splitter
third_party/xla/xla/service/gpu/transforms/variadic_op_splitter.cc
third_party/xla/xla/service/gpu/transforms/variadic_op_splitter_test.cc
#include "xla/service/gpu/transforms/variadic_op_splitter.h" #include <cstdint> #include <vector> #include "absl/container/flat_hash_set.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/shape.h" #include "xla/util.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { constexpr int32_t kMaxParameters = 128; absl::StatusOr<bool> SplitConcatenate(HloInstruction* concat, HloComputation* comp) { auto operands = concat->operands(); std::vector<HloInstruction*> operands_to_split(operands.begin(), operands.end()); while (operands_to_split.size() > 1) { std::vector<HloInstruction*> new_operands; absl::Span<HloInstruction*> operands_span(operands_to_split); for (int64_t offset = 0; offset < operands_to_split.size(); offset += kMaxParameters) { if (offset > 0 && offset + kMaxParameters > operands_to_split.size()) { new_operands.insert(new_operands.end(), operands_to_split.begin() + offset, operands_to_split.end()); } else { Shape new_shape = concat->shape(); int64_t concat_dimension_size = 0; for (int64_t i = 0; i < kMaxParameters && offset + i < operands_to_split.size(); ++i) { concat_dimension_size += operands_to_split[i + offset]->shape().dimensions( concat->concatenate_dimension()); } new_shape.set_dimensions(concat->concatenate_dimension(), concat_dimension_size); auto new_concat = comp->AddInstruction(concat->CloneWithNewOperands( new_shape, operands_span.subspan(offset, kMaxParameters))); new_operands.push_back(new_concat); } } operands_to_split = new_operands; } TF_RETURN_IF_ERROR(comp->ReplaceInstruction(concat, operands_to_split[0])); return true; } std::vector<HloInstruction*> GetRelevantVariadicOps(HloComputation* comp) { std::vector<HloInstruction*> ops; for (HloInstruction* instr : comp->instructions()) { if (instr->opcode() == HloOpcode::kConcatenate && instr->operand_count() > kMaxParameters) { ops.push_back(instr); } } return ops; } } absl::StatusOr<bool> VariadicOpSplitter::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool changed = false; for (HloComputation* comp : module->MakeNonfusionComputations(execution_threads)) { for (HloInstruction* op : GetRelevantVariadicOps(comp)) { TF_ASSIGN_OR_RETURN(bool result, SplitConcatenate(op, comp)); changed |= result; } } return changed; } } }
#include "xla/service/gpu/transforms/variadic_op_splitter.h" #include <cstdint> #include <vector> #include <gtest/gtest.h> #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/literal_util.h" #include "xla/service/hlo_parser.h" #include "xla/service/pattern_matcher.h" #include "xla/shape_util.h" #include "xla/tests/hlo_test_base.h" #include "xla/util.h" #include "xla/xla_data.pb.h" namespace xla { namespace gpu { namespace { using match::Concatenate; class VariadicOpSplitterTest : public HloTestBase {}; TEST_F(VariadicOpSplitterTest, DontSplit) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { p0 = f16[30,41] parameter(0) p1 = f16[30,41] parameter(1) ROOT result = f16[60, 41] concatenate(p0, p1), dimensions={0} })") .value(); EXPECT_FALSE(VariadicOpSplitter().Run(module.get()).value()); } TEST_F(VariadicOpSplitterTest, SplitInto2) { auto builder = HloComputation::Builder(TestName()); auto operand = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR1<int32_t>({42}))); std::vector<HloInstruction*> concat_operands(255, operand); builder.AddInstruction(HloInstruction::CreateConcatenate( ShapeUtil::MakeShape(S32, {255}), concat_operands, 0)); auto module = CreateNewVerifiedModule(); auto entry_computation = module->AddEntryComputation(builder.Build()); EXPECT_TRUE(VariadicOpSplitter().Run(module.get()).value()); EXPECT_TRUE(Match(entry_computation->root_instruction(), Concatenate().WithNumOperands(128).WithOperand( 0, Concatenate().WithNumOperands(128)))); } TEST_F(VariadicOpSplitterTest, SplitInto3) { auto builder = HloComputation::Builder(TestName()); auto operand = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR1<int32_t>({42}))); std::vector<HloInstruction*> concat_operands(256, operand); builder.AddInstruction(HloInstruction::CreateConcatenate( ShapeUtil::MakeShape(S32, {256}), concat_operands, 0)); auto module = CreateNewVerifiedModule(); auto entry_computation = module->AddEntryComputation(builder.Build()); EXPECT_TRUE(VariadicOpSplitter().Run(module.get()).value()); EXPECT_TRUE(Match(entry_computation->root_instruction(), Concatenate(Concatenate().WithNumOperands(128), Concatenate().WithNumOperands(128)))); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/variadic_op_splitter.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/variadic_op_splitter_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
ec5c2527-df06-412e-ae7d-996b8c3e762d
cpp
tensorflow/tensorflow
reduction_degenerate_dim_remover
third_party/xla/xla/service/gpu/transforms/reduction_degenerate_dim_remover.cc
third_party/xla/xla/service/gpu/transforms/reduction_degenerate_dim_remover_test.cc
#include "xla/service/gpu/transforms/reduction_degenerate_dim_remover.h" #include <cstdint> #include <memory> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_set.h" #include "absl/container/inlined_vector.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/dfs_hlo_visitor_with_default.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { class ReductionDegenerateDimRemoverVisitor : public DfsHloRewriteVisitor { public: absl::Status HandleReduce(HloInstruction *hlo) override { auto instr = Cast<HloReduceInstruction>(hlo); absl::InlinedVector<HloInstruction *, 2> input_reshapes; absl::InlinedVector<Shape, 2> canonical_reduce_shapes; int idx = -1; std::vector<int64_t> updated_reduced_dimensions; for (HloInstruction *reduced_op : instr->inputs()) { idx++; const Shape &input_shape = reduced_op->shape(); const Shape &reduce_shape = instr->shape().IsTuple() ? instr->shape().tuple_shapes(idx) : instr->shape(); if (!ShapeUtil::HasDegenerateDimensions(reduced_op->shape())) { return absl::OkStatus(); } Shape canonical_input_shape = ShapeUtil::DropDegenerateDimensions(input_shape); Shape canonical_reduce_shape = ShapeUtil::DropDegenerateDimensions(reduce_shape); auto reduced_dimensions = instr->dimensions(); int64_t shift = 0; for (int dim = 0; dim < input_shape.rank(); dim++) { if (input_shape.dimensions(dim) == 1) { shift++; } else { if (absl::c_linear_search(reduced_dimensions, dim) && idx == 0) { updated_reduced_dimensions.push_back(dim - shift); } } } if (updated_reduced_dimensions.empty()) { std::unique_ptr<HloInstruction> reshape = HloInstruction::CreateBitcast(reduce_shape, reduced_op); return ReplaceWithNewInstruction(instr, std::move(reshape)); } input_reshapes.push_back(instr->parent()->AddInstruction( HloInstruction::CreateBitcast(canonical_input_shape, reduced_op))); canonical_reduce_shapes.push_back(canonical_reduce_shape); } Shape canonical_reduce_shape = ShapeUtil::MakeMaybeTupleShape(canonical_reduce_shapes); const Shape &orig_reduce_shape = instr->shape(); std::unique_ptr<HloInstruction> new_reduce = HloInstruction::CreateReduce( canonical_reduce_shape, input_reshapes, instr->init_values(), updated_reduced_dimensions, instr->to_apply()); instr->SetupDerivedInstruction(new_reduce.get()); if (canonical_reduce_shape != instr->shape()) { HloInstruction *wrapped_reduce = instr->parent()->AddInstruction(std::move(new_reduce)); absl::InlinedVector<HloInstruction *, 2> out; if (!canonical_reduce_shape.IsTuple()) { new_reduce = HloInstruction::CreateBitcast(orig_reduce_shape, wrapped_reduce); } else { for (int oidx = 0; oidx < instr->input_count(); oidx++) { HloInstruction *gte = instr->parent()->AddInstruction( HloInstruction::CreateGetTupleElement(wrapped_reduce, oidx)); out.push_back( instr->parent()->AddInstruction(HloInstruction::CreateBitcast( orig_reduce_shape.tuple_shapes(oidx), gte))); } new_reduce = HloInstruction::CreateTuple(out); } } return ReplaceWithNewInstruction(instr, std::move(new_reduce)); } }; absl::StatusOr<bool> ReductionDegenerateDimRemover::Run( HloModule *module, const absl::flat_hash_set<absl::string_view> &execution_threads) { TF_ASSIGN_OR_RETURN(bool changed, ReductionDegenerateDimRemoverVisitor().RunOnModule( module, execution_threads)); return changed; } } }
#include "xla/service/gpu/transforms/reduction_degenerate_dim_remover.h" #include <optional> #include "absl/strings/string_view.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/test.h" namespace xla { namespace { class ReductionDegenerateDimRemoverTest : public HloTestBase { public: void CheckDegenerateDimRemover(absl::string_view hlo, std::optional<absl::string_view> expected) { RunAndFilecheckHloRewrite(hlo, gpu::ReductionDegenerateDimRemover{}, expected); } }; TEST_F(ReductionDegenerateDimRemoverTest, ReductionWithDegenerateDimensions) { const char* hlo = R"( HloModule ReduceWithDegenerateDimensions add { accum = f32[] parameter(0) op = f32[] parameter(1) ROOT out = f32[] add(accum, op) } ENTRY main { input = f32[1,3,1,4,1,5,1] parameter(0) zero = f32[] constant(0) ROOT out = f32[1,1,1,1] reduce(input, zero), dimensions={1,3,5}, to_apply=add } )"; CheckDegenerateDimRemover(hlo, R"( )"); } TEST_F(ReductionDegenerateDimRemoverTest, ReductionWithDegenerateDimensionsVariadic) { const char* hlo = R"( HloModule ReduceWithDegenerateDimensions argmax { running_max = f32[] parameter(0) running_max_idx = u32[] parameter(1) current_value = f32[] parameter(2) current_value_idx = u32[] parameter(3) current = (f32[], u32[]) tuple(running_max, running_max_idx) potential = (f32[], u32[]) tuple(current_value, current_value_idx) cmp_code = pred[] compare(current_value, running_max), direction=GT new_max = f32[] select(cmp_code, current_value, running_max) new_idx = u32[] select(cmp_code, current_value_idx, running_max_idx) ROOT out = (f32[], u32[]) tuple(new_max, new_idx) } ENTRY main { input = f32[1,3,1,4,1,5,1] parameter(0) idxs = u32[1,3,1,4,1,5,1] parameter(1) zero = f32[] constant(0) zero_idx = u32[] constant(0) ROOT out = (f32[1,1,1,1], u32[1,1,1,1]) reduce(input, idxs, zero, zero_idx), dimensions={1,3,5}, to_apply=argmax } )"; CheckDegenerateDimRemover(hlo, R"( )"); } TEST_F(ReductionDegenerateDimRemoverTest, DegenerateWithEmptyDimension) { const char* hlo = R"( HloModule ReduceWithDegenerateDimensions add { accum = f32[] parameter(0) op = f32[] parameter(1) ROOT out = f32[] add(accum, op) } ENTRY main { input = f32[1,3,1,4,1,5,1] parameter(0) zero = f32[] constant(0) ROOT out = f32[3,4,5,1] reduce(input, zero), dimensions={0,2,4}, to_apply=add } )"; CheckDegenerateDimRemover(hlo, R"( )"); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/reduction_degenerate_dim_remover.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/reduction_degenerate_dim_remover_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
6019610a-5519-4d8e-9c8b-b44273146c5f
cpp
tensorflow/tensorflow
transpose_dimension_grouper
third_party/xla/xla/service/gpu/transforms/transpose_dimension_grouper.cc
third_party/xla/xla/service/gpu/transforms/transpose_dimension_grouper_test.cc
#include "xla/service/gpu/transforms/transpose_dimension_grouper.h" #include <cstddef> #include <cstdint> #include <functional> #include <numeric> #include <vector> #include "absl/container/flat_hash_set.h" #include "absl/container/inlined_vector.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/hlo/ir/dfs_hlo_visitor_with_default.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/layout_util.h" #include "xla/permutation_util.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/util.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { absl::InlinedVector<size_t, 3> ConsecutiveSegments( absl::Span<const int64_t> xs) { absl::InlinedVector<size_t, 3> is = {0}; for (size_t i = 1; i < xs.size(); ++i) { if (1 != xs[i] - xs[i - 1]) { is.push_back(i); } } return is; } Shape MergeDimensions(absl::Span<const size_t> segs, const Shape &shape) { std::vector<int64_t> dimensions; const auto size = segs.size(); dimensions.reserve(size); for (size_t i = 1; i <= size; ++i) { dimensions.push_back(std::accumulate( shape.dimensions().begin() + segs[i - 1], shape.dimensions().begin() + (segs.size() == i ? shape.dimensions().size() : segs[i]), int64_t{1}, std::multiplies<int64_t>())); } return ShapeUtil::MakeShapeWithDescendingLayout(shape.element_type(), dimensions); } absl::InlinedVector<int64_t, 3> GetNormalizedTransposeShapeHelper( const Shape &output_shape, absl::Span<int64_t const> output_to_input, absl::InlinedVector<int64_t, 3> &permutation) { absl::InlinedVector<size_t, 3> segments = ConsecutiveSegments(output_to_input); Shape normalized_shape = MergeDimensions(segments, output_shape); absl::InlinedVector<int64_t, 3> normalized_dims( normalized_shape.dimensions().begin(), normalized_shape.dimensions().end()); if (segments.size() == 1) { return normalized_dims; } std::vector<int64_t> segment_to_normalized_dim(output_shape.rank(), -1); for (size_t segment : segments) { segment_to_normalized_dim[output_to_input[segment]] = 0; } int64_t normalized_dim = 0; for (int64_t i = 0; i < segment_to_normalized_dim.size(); ++i) { if (segment_to_normalized_dim[i] >= 0) { segment_to_normalized_dim[i] = normalized_dim++; } } permutation.reserve(segments.size()); for (int64_t i = 0; i < segments.size(); ++i) { permutation.push_back( segment_to_normalized_dim[output_to_input[segments[i]]]); } return normalized_dims; } absl::InlinedVector<int64_t, 3> GetNormalizedLogicalTransposeShape( const Shape &output_shape, absl::Span<int64_t const> dimensions, absl::InlinedVector<int64_t, 3> &permutation) { permutation.clear(); absl::InlinedVector<int64_t, 3> delta(output_shape.rank() + 1, 0); auto input_dimensions = ComposePermutations(output_shape.dimensions(), InversePermutation(dimensions)); for (int i = 0; i < output_shape.rank(); ++i) { delta[i + 1] = delta[i]; if (input_dimensions[i] == static_cast<int64_t>(1)) { ++delta[i + 1]; } } absl::InlinedVector<int64_t, 3> new_dimensions; for (int i = 0; i < dimensions.size(); i++) { if (output_shape.dimensions(i) != 1) { new_dimensions.push_back(dimensions[i] - delta[dimensions[i]]); } } return GetNormalizedTransposeShapeHelper( ShapeUtil::DropDegenerateDimensions(output_shape), new_dimensions, permutation); } class TransposeDimensionGroupVisitor : public DfsHloRewriteVisitor { public: absl::Status HandleTranspose(HloInstruction *transpose) override { VLOG(4) << "Input: " << transpose->ToString(); if (!LayoutUtil::IsMonotonicWithDim0Major(transpose->shape().layout()) || !LayoutUtil::IsMonotonicWithDim0Major( transpose->operand(0)->shape().layout())) { return FailedPrecondition( "Layout normalization should have assigned the default layout to " "transpose and its operand"); } absl::InlinedVector<int64_t, 3> permutation; auto normalized_dims = GetNormalizedLogicalTransposeShape( transpose->shape(), transpose->dimensions(), permutation); if (normalized_dims.size() == 1 || normalized_dims == transpose->shape().dimensions()) { return absl::OkStatus(); } auto normalized_operand_dims = ComposePermutations(normalized_dims, InversePermutation(permutation)); Shape grouped_operand_shape = ShapeUtil::MakeShapeWithDescendingLayout( transpose->shape().element_type(), normalized_operand_dims); auto new_operand = transpose->AddInstruction(HloInstruction::CreateBitcast( grouped_operand_shape, transpose->mutable_operand(0))); Shape grouped_shape = ShapeUtil::MakeShapeWithDescendingLayout( transpose->shape().element_type(), normalized_dims); auto new_transpose = transpose->AddInstruction(HloInstruction::CreateTranspose( grouped_shape, new_operand, permutation)); VLOG(5) << "Generated new transpose: " << new_transpose->ToString(); return ReplaceWithNewInstruction( transpose, HloInstruction::CreateBitcast(transpose->shape(), new_transpose)); } }; } absl::StatusOr<bool> TransposeDimensionGrouper::Run( HloModule *module, const absl::flat_hash_set<absl::string_view> &execution_threads) { TF_ASSIGN_OR_RETURN( bool changed, TransposeDimensionGroupVisitor().RunOnModule(module, execution_threads)); return changed; } } }
#include "xla/service/gpu/transforms/transpose_dimension_grouper.h" #include <optional> #include "absl/strings/string_view.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/errors.h" #include "tsl/platform/status_matchers.h" #include "tsl/platform/statusor.h" #include "tsl/platform/test.h" namespace xla { namespace gpu { namespace { using ::testing::HasSubstr; using ::tsl::testing::StatusIs; class TransposeDimensionGrouperTest : public HloTestBase { public: void CheckDimensionGrouper(absl::string_view hlo, std::optional<absl::string_view> expected) { RunAndFilecheckHloRewrite(hlo, TransposeDimensionGrouper{}, expected); } void CheckDimensionGrouperUnchanged(absl::string_view hlo) { CheckDimensionGrouper(hlo, std::nullopt); } }; TEST_F(TransposeDimensionGrouperTest, NoTranspose) { const char* hlo = R"( HloModule NoTranspose ENTRY main { input = f32[64,128,1]{2,1,0} parameter(0) ROOT out = f32[64,1,128]{2,1,0} transpose(input), dimensions={0,2,1} } )"; CheckDimensionGrouperUnchanged(hlo); } TEST_F(TransposeDimensionGrouperTest, NoTranspose2) { const char* hlo = R"( HloModule NoTranspose2 ENTRY main { input = f32[32,128,64]{2,1,0} parameter(0) ROOT out = f32[32,64,128]{0,1,2} transpose(input), dimensions={0,2,1} } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo)); TransposeDimensionGrouper dimension_grouper; EXPECT_THAT(dimension_grouper.Run(module.get()), StatusIs(tsl::error::FAILED_PRECONDITION, HasSubstr("Layout normalization"))); } TEST_F(TransposeDimensionGrouperTest, NoTranspose3) { const char* hlo = R"( HloModule NoTranspose3 ENTRY main { input = f32[32,128,64]{0,1,2} parameter(0) ROOT out = f32[32,64,128]{2,1,0} transpose(input), dimensions={0,2,1} } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo)); TransposeDimensionGrouper dimension_grouper; EXPECT_THAT(dimension_grouper.Run(module.get()), StatusIs(tsl::error::FAILED_PRECONDITION, HasSubstr("Layout normalization"))); } TEST_F(TransposeDimensionGrouperTest, Simple2D) { const char* hlo = R"( HloModule Simple2D ENTRY main { input = f32[128,64]{1,0} parameter(0) ROOT out = f32[64,128]{1,0} transpose(input), dimensions={1,0} } )"; CheckDimensionGrouperUnchanged(hlo); } TEST_F(TransposeDimensionGrouperTest, Simple3D_021) { const char* hlo = R"( HloModule Simple3D_021 ENTRY main { input = f32[8,32768,16]{2,1,0} parameter(0) ROOT out = f32[8,16,32768]{2,1,0} transpose(input), dimensions={0,2,1} } )"; CheckDimensionGrouperUnchanged(hlo); } TEST_F(TransposeDimensionGrouperTest, Simple3D_210) { const char* hlo = R"( HloModule Simple3D_210 ENTRY main { input = f32[8,32768,16]{2,1,0} parameter(0) ROOT out = f32[16,32768,8]{2,1,0} transpose(input), dimensions={2,1,0} } )"; CheckDimensionGrouperUnchanged(hlo); } TEST_F(TransposeDimensionGrouperTest, Simple4D) { const char* hlo = R"( HloModule Simple4D ENTRY main { input = f32[32768,4,16,8]{3,2,1,0} parameter(0) ROOT out = f32[16,32768,8,4]{3,2,1,0} transpose(input), dimensions={2,0,3,1} } )"; CheckDimensionGrouperUnchanged(hlo); } TEST_F(TransposeDimensionGrouperTest, NormalizeTo3D) { const char* hlo = R"( HloModule NormalizeTo3D ENTRY main { input = f32[8,32,32,32,16]{4,3,2,1,0} parameter(0) ROOT out = f32[8,16,32,32,32]{4,3,2,1,0} transpose(input), dimensions={0,4,1,2,3} } )"; CheckDimensionGrouper(hlo, R"( )"); } TEST_F(TransposeDimensionGrouperTest, LargeShapeSizeOverflow) { const char* hlo = R"( HloModule LargeShapeSizeOverflow ENTRY main { input = f32[4096,4096,128,16]{3,2,1,0} parameter(0) ROOT out = f32[16,4096,4096,128]{3,2,1,0} transpose(input), dimensions={3,0,1,2} } )"; CheckDimensionGrouper(hlo, R"( )"); } TEST_F(TransposeDimensionGrouperTest, DegenerateDims) { const char* hlo = R"( HloModule DegenerateDims ENTRY main { input = f32[1,32,1,3,1,64,1]{6,5,4,3,2,1,0} parameter(0) ROOT out = f32[1,32,1,64,1,3,1]{6,5,4,3,2,1,0} transpose(input), dimensions={6,1,4,5,2,3,0} } )"; CheckDimensionGrouper(hlo, R"( )"); } TEST_F(TransposeDimensionGrouperTest, TransposeWithGrouping) { const char* hlo = R"( HloModule TransposeWithGrouping ENTRY main { input = f32[100,1,10,32,2]{4,3,2,1,0} parameter(0) ROOT out = f32[10,1,32,100,2]{4,3,2,1,0} transpose(input), dimensions={2,1,3,0,4} } )"; CheckDimensionGrouper(hlo, R"( )"); } TEST_F(TransposeDimensionGrouperTest, NormalizeTo2D) { const char* hlo = R"( HloModule Normalize2DTo3D ENTRY main { input = f32[50,20,30]{2,1,0} parameter(0) ROOT out = f32[20,30,50]{2,1,0} transpose(input), dimensions={1,2,0} } )"; CheckDimensionGrouper(hlo, R"( )"); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/transpose_dimension_grouper.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/transpose_dimension_grouper_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
6c55f973-6e2e-407f-b8de-4c527b9f2072
cpp
tensorflow/tensorflow
pipelined_p2p_rewriter
third_party/xla/xla/service/gpu/transforms/pipelined_p2p_rewriter.cc
third_party/xla/xla/service/gpu/transforms/pipelined_p2p_rewriter_test.cc
#include "xla/service/gpu/transforms/pipelined_p2p_rewriter.h" #include <cstdint> #include <optional> #include <utility> #include <vector> #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/hlo/ir/dfs_hlo_visitor.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/ir/hlo_schedule.h" #include "xla/hlo/utils/hlo_query.h" #include "xla/service/collective_ops_utils.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/util.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { using CollectiveInComputation = absl::flat_hash_map<const HloComputation*, bool>; using InstructionVector = HloInstruction::InstructionVector; struct PipelinedP2PInfo { int64_t opnd_start; int64_t opnd_end; }; bool IsCollectiveOp(const HloInstruction* op) { HloOpcode opcode = op->opcode(); if (opcode == HloOpcode::kCustomCall) { return true; } return hlo_query::IsCollectiveCommunicationOp(opcode) || opcode == HloOpcode::kSend || opcode == HloOpcode::kRecv; } bool MayInvokeCollectiveOp( const HloInstruction* hlo, const CollectiveInComputation& collective_in_computation) { if (IsCollectiveOp(hlo)) { return true; } for (HloComputation* callee : hlo->called_computations()) { auto collective_in_comp = collective_in_computation.find(callee); CHECK(collective_in_comp != collective_in_computation.end()); if (collective_in_comp->second) { return true; } } return false; } HloInstruction* FindUniqueGTEUserWithIndex(const HloInstruction* op, int64_t idx) { CHECK(op->shape().IsTuple()); HloInstruction* gte = nullptr; for (auto user : op->users()) { if (user->opcode() != HloOpcode::kGetTupleElement) { continue; } if (user->tuple_index() == idx) { if (gte == nullptr) { gte = user; } else { return nullptr; } } } return gte; } bool HasGTEUserWithIndex(const HloInstruction* op, int64_t idx) { CHECK(op->shape().IsTuple()); for (auto user : op->users()) { if (user->opcode() != HloOpcode::kGetTupleElement) { continue; } if (user->tuple_index() == idx) { return true; } } return false; } HloInstruction* MaySkipTrivialTuple(HloInstruction* op) { if (op->opcode() != HloOpcode::kTuple) { return op; } HloInstruction* hidden_op = nullptr; for (auto opnd : op->mutable_operands()) { if (opnd->opcode() != HloOpcode::kGetTupleElement) { return op; } if (hidden_op == nullptr) { hidden_op = opnd->mutable_operand(0); } else if (opnd->mutable_operand(0) != hidden_op) { return op; } } return hidden_op; } const HloInstruction* MaySkipTrivialTuple(const HloInstruction* op) { return MaySkipTrivialTuple(const_cast<HloInstruction*>(op)); } std::optional<PipelinedP2PInfo> FindConsecutiveAndBalanceBlockOfSendDoneRecvDone( const HloInstruction* while_init) { PipelinedP2PInfo pipelined_p2p_info{0, 0}; auto has_started = [&]() { return pipelined_p2p_info.opnd_start != pipelined_p2p_info.opnd_end; }; int difference = 0; for (int64_t i = 0; i < while_init->operand_count(); ++i) { const HloInstruction* op = while_init->operand(i); if ((op->opcode() == HloOpcode::kRecvDone || op->opcode() == HloOpcode::kSendDone) && op->frontend_attributes().map().count(kSendRecvPipelineAttr) > 0) { if (op->opcode() == HloOpcode::kRecvDone) { difference++; } else { difference--; } if (!has_started()) { pipelined_p2p_info.opnd_start = i; } pipelined_p2p_info.opnd_end = i + 1; } else { if (has_started()) { VLOG(10) << "End a consecutive block"; break; } } } if (difference != 0) { VLOG(10) << "Mismatch number of SendDone and RecvDone: " << difference; return std::nullopt; } if (has_started()) { for (int64_t i = pipelined_p2p_info.opnd_end; i < while_init->operand_count(); ++i) { const HloInstruction* op = while_init->operand(i); if (op->opcode() == HloOpcode::kRecvDone || op->opcode() == HloOpcode::kSendDone) { VLOG(10) << "SendDone/RecvDone outside the consecutive block"; return std::nullopt; break; } } } if (!has_started()) { VLOG(10) << "No SendDone/RecvDone in while-init "; return std::nullopt; } return pipelined_p2p_info; } std::optional<PipelinedP2PInfo> FindPipelinedP2P( const HloInstruction* while_op) { VLOG(10) << "while_op: " << while_op->ToString(); const HloInstruction* while_init = while_op->while_init(); if (while_init->opcode() != HloOpcode::kTuple || while_init->user_count() != 1) { return std::nullopt; } const HloComputation* while_body = while_op->while_body(); const HloComputation* while_condition = while_op->while_condition(); if (while_body->num_parameters() != 1 || while_condition->num_parameters() != 1) { return std::nullopt; } std::optional<PipelinedP2PInfo> pipelined_p2p_info = FindConsecutiveAndBalanceBlockOfSendDoneRecvDone(while_init); if (!pipelined_p2p_info.has_value()) { return std::nullopt; } VLOG(10) << "opnd_start " << pipelined_p2p_info->opnd_start << " opnd_end " << pipelined_p2p_info->opnd_end; for (int64_t i = pipelined_p2p_info->opnd_start; i < pipelined_p2p_info->opnd_end; ++i) { const HloInstruction* op = while_init->operand(i); if (op->opcode() == HloOpcode::kRecvDone) { if (!FindUniqueGTEUserWithIndex(while_op, i)) { VLOG(10) << "While result get-tuple-element user with index " << i << " not unique"; return std::nullopt; } if (!FindUniqueGTEUserWithIndex(while_body->parameter_instruction(0), i)) { VLOG(10) << "While-body parameter get-tuple-element user with index " << i << " not unique"; return std::nullopt; } } else { CHECK(op->opcode() == HloOpcode::kSendDone); if (HasGTEUserWithIndex(while_op, i) || HasGTEUserWithIndex(while_body->parameter_instruction(0), i)) { VLOG(10) << "SendDone with index " << i << " has unexpected users"; return std::nullopt; } } } const HloInstruction* root = while_body->root_instruction(); for (int64_t i = pipelined_p2p_info->opnd_start; i < pipelined_p2p_info->opnd_end; ++i) { const HloInstruction* op_init = while_init->operand(i); const HloInstruction* op_root = root->operand(i); op_root = MaySkipTrivialTuple(op_root); if (op_init->opcode() != op_root->opcode()) { VLOG(10) << "Mismatching opcode, op_init: " << op_init->ToString() << " op_root: " << op_root->ToString(); return std::nullopt; } } return pipelined_p2p_info.value(); } absl::Status RemoveOpFromParent(HloInstruction* op) { TF_RETURN_IF_ERROR(op->DropAllControlDeps()); TF_RETURN_IF_ERROR(op->parent()->RemoveInstruction(op)); return absl::OkStatus(); } absl::Status ReplaceOpInSequence(HloInstruction* old_op, HloInstruction* new_op, HloInstructionSequence& instruction_sequence) { VLOG(10) << "old_op: " << old_op->ToString(); VLOG(10) << "new_op: " << new_op->ToString(); instruction_sequence.replace_instruction(old_op, new_op); return RemoveOpFromParent(old_op); } absl::Status ReplaceUsesAndUpdateSequence( HloInstruction* old_op, HloInstruction* new_op, HloInstructionSequence& instruction_sequence, bool diff_shape = false) { VLOG(10) << "old_op: " << old_op->ToString(); VLOG(10) << "new_op: " << new_op->ToString(); if (diff_shape) { TF_RETURN_IF_ERROR(old_op->ReplaceAllUsesWithDifferentShape(new_op)); } else { TF_RETURN_IF_ERROR(old_op->ReplaceAllUsesWith(new_op)); } return ReplaceOpInSequence(old_op, new_op, instruction_sequence); } absl::Status ReplaceUsesAndUpdateSequence( const InstructionVector& old_ops, const InstructionVector& new_ops, HloInstructionSequence& instruction_sequence) { CHECK(old_ops.size() == new_ops.size()); for (int64_t i = 0; i < old_ops.size(); ++i) { TF_RETURN_IF_ERROR(ReplaceUsesAndUpdateSequence(old_ops[i], new_ops[i], instruction_sequence)); } return absl::OkStatus(); } absl::Status RemoveDoneOpsAndUpdateSequence( const InstructionVector& ops, HloInstructionSequence& instruction_sequence) { auto remove_op = [&](HloInstruction* op) { VLOG(10) << "op: " << op->ToString(); TF_RETURN_IF_ERROR(RemoveOpFromParent(op)); instruction_sequence.remove_instruction(op); return absl::OkStatus(); }; for (auto op : ops) { if (op->opcode() == HloOpcode::kTuple) { InstructionVector to_remove; HloInstruction* tuple_op = op; op = MaySkipTrivialTuple(tuple_op); to_remove.push_back(tuple_op); for (auto opnd : tuple_op->mutable_operands()) { to_remove.push_back(opnd); } for (auto opnd : to_remove) { TF_RETURN_IF_ERROR(remove_op(opnd)); } } TF_RETURN_IF_ERROR(remove_op(op)); } return absl::OkStatus(); } bool InsertBeforeFirstCollectiveOp( const InstructionVector& ops, const CollectiveInComputation& collective_in_computation, HloInstructionSequence& instruction_sequence, int64_t& idx, int64_t& idx_tot) { bool inserted = false; while (idx < idx_tot) { HloInstruction* hlo = instruction_sequence.instructions()[idx]; if (MayInvokeCollectiveOp(hlo, collective_in_computation)) { for (auto op : ops) { instruction_sequence.insert_instruction(op, idx); idx++; idx_tot++; } inserted = true; break; } idx++; } return inserted; } void CopyInstructionInfo(const HloInstruction* old_op, HloInstruction* new_op) { new_op->SetAndSanitizeName(absl::StrCat(old_op->name(), ".clone")); new_op->set_metadata(old_op->metadata()); new_op->add_frontend_attributes(old_op->frontend_attributes()); new_op->CopyBackendConfigFrom(old_op); } HloInstruction* CreateRecvDoneFrom(const HloInstruction* old_recv_done, HloInstruction* recv, HloComputation* computation) { HloInstruction* recv_done = computation->AddInstruction(HloInstruction::CreateRecvDone( recv, old_recv_done->channel_id().value())); CopyInstructionInfo(old_recv_done, recv_done); return recv_done; } HloInstruction* CreateSendDoneFrom(const HloInstruction* old_send_done, HloInstruction* send, HloComputation* computation) { HloInstruction* send_done = computation->AddInstruction(HloInstruction::CreateSendDone( send, old_send_done->channel_id().value())); CopyInstructionInfo(old_send_done, send_done); return send_done; } absl::Status RewritePipelinedP2PWhileBody( const CollectiveInComputation& collective_in_computation, const std::vector<Shape>& new_parameter_shapes, HloInstruction* while_op, int64_t opnd_start, int64_t opnd_end) { HloComputation* computation = while_op->while_body(); HloInstruction* while_init = while_op->while_init(); HloInstruction* root = computation->root_instruction(); HloInstructionSequence& instruction_sequence = computation->parent()->schedule().GetOrCreateSequence(computation); HloInstruction* param = computation->parameter_instruction(0); *param->mutable_shape() = ShapeUtil::MakeTupleShape(new_parameter_shapes); InstructionVector recv_dones; InstructionVector new_recv_dones; InstructionVector new_send_dones; for (int64_t i = opnd_start; i < opnd_end; ++i) { const HloInstruction* op = root->operand(i); op = MaySkipTrivialTuple(op); if (op->opcode() == HloOpcode::kRecvDone) { HloInstruction* gte = FindUniqueGTEUserWithIndex(param, i); CHECK(gte != nullptr); recv_dones.push_back(gte); HloInstruction* recv = computation->AddInstruction( HloInstruction::CreateGetTupleElement(param, i)); HloInstruction* recv_done = CreateRecvDoneFrom(op, recv, computation); new_recv_dones.push_back(recv_done); continue; } CHECK(op->opcode() == HloOpcode::kSendDone); HloInstruction* send = computation->AddInstruction( HloInstruction::CreateGetTupleElement(param, i)); HloInstruction* send_done = CreateSendDoneFrom(op, send, computation); new_send_dones.push_back(send_done); } TF_RETURN_IF_ERROR(ReplaceUsesAndUpdateSequence(recv_dones, new_recv_dones, instruction_sequence)); InstructionVector done_ops; InstructionVector new_opnds; for (int64_t i = 0; i < while_init->operand_count(); ++i) { HloInstruction* op = root->mutable_operand(i); if (i >= opnd_start && i < opnd_end) { new_opnds.push_back(MaySkipTrivialTuple(op)->mutable_operand(0)); done_ops.push_back(op); } else { new_opnds.push_back(op); } } HloInstruction* new_root = computation->AddInstruction(HloInstruction::CreateTuple(new_opnds)); computation->set_root_instruction(new_root, true); TF_RETURN_IF_ERROR(computation->RemoveInstruction(root)); instruction_sequence.replace_instruction(root, new_root); TF_RETURN_IF_ERROR( RemoveDoneOpsAndUpdateSequence(done_ops, instruction_sequence)); int64_t idx = 0; int64_t idx_end = instruction_sequence.size(); bool inserted = InsertBeforeFirstCollectiveOp(new_send_dones, collective_in_computation, instruction_sequence, idx, idx_end); CHECK(inserted); CHECK(idx_end == instruction_sequence.size()); return absl::OkStatus(); } void RewritePipelinedP2PWhileCond( const std::vector<Shape>& new_parameter_shapes, HloInstruction* while_op) { HloComputation* computation = while_op->while_condition(); HloInstruction* param = computation->parameter_instruction(0); *param->mutable_shape() = ShapeUtil::MakeTupleShape(new_parameter_shapes); VLOG(10) << computation->ToString(); } absl::Status TransformLoop( const PipelinedP2PInfo& pipelined_info, const CollectiveInComputation& collective_in_computation, int64_t& idx, int64_t& idx_end, HloInstructionSequence& instruction_sequence, HloInstruction* while_op) { HloComputation* computation = while_op->parent(); int64_t opnd_start = pipelined_info.opnd_start; int64_t opnd_end = pipelined_info.opnd_end; VLOG(10) << "Transform pipelined while-op " << while_op->ToString(); HloInstruction* while_init = while_op->while_init(); InstructionVector new_while_init_opnds; std::vector<Shape> new_parameter_shapes; for (int64_t i = 0; i < while_init->operand_count(); ++i) { HloInstruction* op = while_init->mutable_operand(i); if (i >= opnd_start && i < opnd_end) { new_while_init_opnds.push_back(op->mutable_operand(0)); } else { new_while_init_opnds.push_back(op); } new_parameter_shapes.push_back(new_while_init_opnds.back()->shape()); } RewritePipelinedP2PWhileCond(new_parameter_shapes, while_op); TF_RETURN_IF_ERROR(RewritePipelinedP2PWhileBody( collective_in_computation, new_parameter_shapes, while_op, opnd_start, opnd_end)); HloInstruction* new_while_init = computation->AddInstruction( HloInstruction::CreateTuple(new_while_init_opnds), "while-init"); VLOG(10) << "new_while_init: " << new_while_init->ToString(); HloInstruction* new_while_op = computation->AddInstruction( HloInstruction::CreateWhile( while_op->while_body()->root_instruction()->shape(), while_op->while_condition(), while_op->while_body(), new_while_init), "while-result"); CopyInstructionInfo(while_op, new_while_op); VLOG(10) << "new_while_op: " << new_while_op->ToString(); InstructionVector recv_dones; InstructionVector new_recv_dones; InstructionVector new_send_dones; InstructionVector done_ops; for (int64_t i = opnd_start; i < opnd_end; ++i) { HloInstruction* op = while_init->mutable_operand(i); done_ops.push_back(op); if (op->opcode() == HloOpcode::kRecvDone) { HloInstruction* gte = FindUniqueGTEUserWithIndex(while_op, i); CHECK(gte != nullptr); recv_dones.push_back(gte); HloInstruction* recv = computation->AddInstruction( HloInstruction::CreateGetTupleElement(new_while_op, i)); HloInstruction* recv_done = computation->AddInstruction( HloInstruction::CreateRecvDone(recv, op->channel_id().value())); new_recv_dones.push_back(recv_done); CopyInstructionInfo(op, recv_done); continue; } CHECK(op->opcode() == HloOpcode::kSendDone); HloInstruction* send = computation->AddInstruction( HloInstruction::CreateGetTupleElement(new_while_op, i)); HloInstruction* send_done = computation->AddInstruction( HloInstruction::CreateSendDone(send, op->channel_id().value())); new_send_dones.push_back(send_done); CopyInstructionInfo(op, send_done); } TF_RETURN_IF_ERROR(ReplaceUsesAndUpdateSequence( while_op, new_while_op, instruction_sequence, true)); TF_RETURN_IF_ERROR( ReplaceOpInSequence(while_init, new_while_init, instruction_sequence)); TF_RETURN_IF_ERROR(ReplaceUsesAndUpdateSequence(recv_dones, new_recv_dones, instruction_sequence)); TF_RETURN_IF_ERROR( RemoveDoneOpsAndUpdateSequence(done_ops, instruction_sequence)); int64_t opnd_tot = opnd_end - opnd_start; CHECK(idx_end == instruction_sequence.size() + opnd_tot); CHECK(instruction_sequence.instructions()[idx - opnd_tot] == new_while_op); idx_end -= opnd_tot; idx = idx - opnd_tot + 1; bool inserted = InsertBeforeFirstCollectiveOp(new_send_dones, collective_in_computation, instruction_sequence, idx, idx_end); CHECK(idx_end == instruction_sequence.size()); if (!inserted) { CHECK(idx_end == idx); idx--; for (auto send_done : new_send_dones) { instruction_sequence.insert_instruction(send_done, idx++); } } return absl::OkStatus(); } absl::StatusOr<bool> ProcessComputation( HloModule* module, HloComputation* computation, CollectiveInComputation& collective_in_computation) { VLOG(10) << "Process compuation " << computation->name(); bool changed = false; HloInstructionSequence& instruction_sequence = module->schedule().GetOrCreateSequence(computation); int64_t idx = 0; int64_t idx_end = instruction_sequence.size(); while (idx < idx_end) { HloInstruction* hlo = instruction_sequence.instructions()[idx]; if (MayInvokeCollectiveOp(hlo, collective_in_computation)) { collective_in_computation[computation] = true; } if (hlo->opcode() != HloOpcode::kWhile) { idx++; continue; } std::optional<PipelinedP2PInfo> pipelined_info = FindPipelinedP2P(hlo); if (!pipelined_info.has_value()) { idx++; continue; } TF_RETURN_IF_ERROR(TransformLoop(pipelined_info.value(), collective_in_computation, idx, idx_end, instruction_sequence, hlo)); changed = true; } return changed; } } absl::StatusOr<bool> PipelinedP2PRewriter::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool changed = false; if (!module->has_schedule()) return changed; CollectiveInComputation collective_in_computation; for (auto* computation : module->MakeComputationPostOrder(execution_threads)) { if (computation->IsFusionComputation()) { collective_in_computation[computation] = false; continue; } TF_ASSIGN_OR_RETURN( bool cur_changed, ProcessComputation(module, computation, collective_in_computation)); changed |= cur_changed; } if (changed) { TF_RETURN_IF_ERROR(module->schedule().Update()); } return changed; } } }
#include "xla/service/gpu/transforms/pipelined_p2p_rewriter.h" #include <gtest/gtest.h> #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/tests/filecheck.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { class PipelinedP2pRewriterTest : public HloTestBase { protected: void DoFileCheck(const HloModule* module, absl::string_view expected) { HloPrintOptions options; options.set_print_operand_shape(false); options.set_print_result_shape(false); TF_ASSERT_OK_AND_ASSIGN(bool filecheck_matched, RunFileCheck(module->ToString(options), expected)); EXPECT_TRUE(filecheck_matched); } }; TEST_F(PipelinedP2pRewriterTest, SendRecUnpipelinedNotTransform) { const char* kModuleStr = R"( HloModule test cond { param = (u32[], u32[2]) parameter(0) count = get-tuple-element(%param), index=0 ub = u32[] constant(11) ROOT result = pred[] compare(count, ub), direction=LT } body { param = (u32[], u32[2]) parameter(0) count = get-tuple-element(param), index=0 send-data = u32[2] get-tuple-element(param), index=1 after-all.0.n = token[] after-all() recv.0 = (u32[2], u32[], token[]) recv(after-all.0.n), channel_id=1, frontend_attributes={ _xla_send_recv_source_target_pairs="{{3,0}}", _xla_send_recv_pipeline="0" } send.0 = (u32[2], u32[], token[]) send(send-data, after-all.0.n), channel_id=1, frontend_attributes={ _xla_send_recv_source_target_pairs="{{3,0}}", _xla_send_recv_pipeline="0" } recv-done.0 = (u32[2], token[]) recv-done(recv.0), channel_id=1, frontend_attributes={ _xla_send_recv_pipeline="0" } send-done.0 = token[] send-done(send.0), channel_id=1, frontend_attributes={ _xla_send_recv_pipeline="0" } recv-data = u32[2] get-tuple-element(recv-done.0), index=0 c1 = u32[] constant(1) new_count = u32[] add(count, c1) r = u32[2] broadcast(c1), dimensions={} s = u32[2] add(r, recv-data) ROOT result = (u32[], u32[2]) tuple(new_count, s) } ENTRY test_computation { c0 = u32[] constant(0) c1 = u32[] constant(1) r = u32[] replica-id() a = u32[] add(c1, r) init = u32[2] broadcast(a), dimensions={} while_init = (u32[], u32[2]) tuple(c0, init) while_result = (u32[], u32[2]) while(while_init), body=body, condition=cond, backend_config={"known_trip_count":{"n":"11"}} ROOT recv-data = u32[2] get-tuple-element(while_result), index=1 } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); PipelinedP2PRewriter rewriter; TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get())); EXPECT_FALSE(changed); } TEST_F(PipelinedP2pRewriterTest, SendRecvPipelined1) { const char* kModuleStr = R"( HloModule test, is_scheduled=true while-cond { param = (u32[], (f32[1,1024,1024], token[]), token[]) parameter(0) count = get-tuple-element(param), index=0 ub = u32[] constant(25) ROOT cond-result = pred[] compare(count, ub), direction=LT } while-body { param = (u32[], (f32[1,1024,1024], token[]), token[]) parameter(0) count = get-tuple-element(param), index=0 recv-done.q = (f32[1,1024,1024], token[]) get-tuple-element(param), index=1 recv-data = f32[1, 1024, 1024] get-tuple-element(recv-done.q), index=0 c1 = u32[] constant(1) new-count = u32[] add(count, c1) replica = u32[] replica-id() c10 = u32[] constant(10) sum = u32[] add(replica, c10) sum2 = u32[] add(sum, count) conv = f32[] convert(sum2) p = f32[1, 1024, 1024] broadcast(conv), dimensions={} b = f32[1, 1024, 1024] add(p, recv-data) c = f32[1, 1024, 1024] multiply(b, b) d = f32[1, 1024, 1024] tan(c) s = f32[1, 1024, 1024] dot(c, d), lhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1} send-data = f32[1, 1024, 1024] add(c, s) after-all = token[] after-all() recv = (f32[1, 1024, 1024], u32[], token[]) recv(after-all), channel_id=1, frontend_attributes={ _xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}", _xla_send_recv_pipeline="0" } send = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all), channel_id=1, frontend_attributes={ _xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}", _xla_send_recv_pipeline="0" } recv-done.p = (f32[1,1024,1024], token[]) recv-done(recv), channel_id=1, frontend_attributes={ _xla_send_recv_pipeline="0" } send-done.p = token[] send-done(send), channel_id=1, frontend_attributes={ _xla_send_recv_pipeline="0" } gte.0 = f32[1,1024,1024] get-tuple-element(recv-done.p), index=0 gte.1 = token[] get-tuple-element(recv-done.p), index=1 recv-done-tuple = (f32[1,1024,1024], token[]) tuple(gte.0, gte.1) ROOT body-result = (u32[], (f32[1,1024,1024], token[]), token[]) tuple(new-count, recv-done-tuple, send-done.p) } ENTRY main { c0 = u32[] constant(0) f0 = f32[] constant(0.0) init = f32[1, 1024, 1024] broadcast(f0), dimensions={} after-all.1 = token[] after-all() recv.1 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all.1), channel_id=1, frontend_attributes={ _xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}", _xla_send_recv_pipeline="0" } send.1 = (f32[1, 1024, 1024], u32[], token[]) send(init, after-all.1), channel_id=1, frontend_attributes={ _xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}", _xla_send_recv_pipeline="0" } recv-done.1.p = (f32[1,1024,1024], token[]) recv-done(recv.1), channel_id=1, frontend_attributes={ _xla_send_recv_pipeline="0" } send-done.1.p = token[] send-done(send.1), channel_id=1, frontend_attributes={ _xla_send_recv_pipeline="0" } while-init.p = (u32[], (f32[1,1024,1024], token[]), token[]) tuple(c0, recv-done.1.p, send-done.1.p) while-result.p = (u32[], (f32[1,1024,1024], token[]), token[]) while(while-init.p), body=while-body, condition=while-cond, backend_config={"known_trip_count":{"n":"25"}} recv-done.1.q = (f32[1,1024,1024], token[]) get-tuple-element(while-result.p), index=1 ROOT entry-result = f32[1, 1024, 1024] get-tuple-element(recv-done.1.q), index=0 } )"; const char* kExpected = R"( CHECK: %while-body (param.1: (u32[], (f32[1,1024,1024], u32[], token[]), (f32[1,1024,1024], u32[], token[]))) -> (u32[], (f32[1,1024,1024], u32[], token[]), (f32[1,1024,1024], u32[], token[])) { CHECK: %param.1 = parameter(0) CHECK: %get-tuple-element = get-tuple-element(%param.1), index=1 CHECK: %get-tuple-element.1 = get-tuple-element(%param.1), index=2 CHECK: %count.1 = get-tuple-element(%param.1), index=0 CHECK: %recv-done.p.clone = recv-done(%get-tuple-element), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0"} CHECK: %recv-data = get-tuple-element(%recv-done.p.clone), index=0 CHECK: %c1 = constant(1) CHECK: %new-count = add(%count.1, %c1) CHECK: %replica = replica-id() CHECK: %c10 = constant(10) CHECK: %sum = add(%replica, %c10) CHECK: %sum2 = add(%sum, %count.1) CHECK: %conv = convert(%sum2) CHECK: %p = broadcast(%conv), dimensions={} CHECK: %b = add(%p, %recv-data) CHECK: %c = multiply(%b, %b) CHECK: %d = tan(%c) CHECK: %s = dot(%c, %d), lhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1} CHECK: %send-data = add(%c, %s) CHECK: %after-all = after-all() CHECK: %send-done.p.clone = send-done(%get-tuple-element.1), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0"} CHECK{LITERAL}: %recv = recv(%after-all), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0",_xla_send_recv_source_target_pairs={{0,1}, {1,2}, {2,3}, {3,4}}} CHECK{LITERAL}: %send = send(%send-data, %after-all), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0",_xla_send_recv_source_target_pairs={{0,1}, {1,2}, {2,3}, {3,4}}} CHECK: ROOT %tuple = tuple(%new-count, %recv, %send) CHECK: } CHECK: %while-cond (param: (u32[], (f32[1,1024,1024], u32[], token[]), (f32[1,1024,1024], u32[], token[]))) -> pred[] { CHECK: %param = parameter(0) CHECK: %count = get-tuple-element(%param), index=0 CHECK: %ub = constant(25) CHECK: ROOT %cond-result = compare(%count, %ub), direction=LT CHECK: } CHECK: ENTRY %main () -> f32[1,1024,1024] { CHECK: %c0 = constant(0) CHECK: %f0 = constant(0) CHECK: %init = broadcast(%f0), dimensions={} CHECK: %after-all.1 = after-all() CHECK{LITERAL}: %recv.1 = recv(%after-all.1), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0",_xla_send_recv_source_target_pairs={{0,1}, {1,2}, {2,3}, {3,4}}} CHECK{LITERAL}: %send.1 = send(%init, %after-all.1), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0",_xla_send_recv_source_target_pairs={{0,1}, {1,2}, {2,3}, {3,4}}} CHECK: %while-init = tuple(%c0, %recv.1, %send.1) CHECK: %while-result.p.clone = while(%while-init), condition=%while-cond, body=%while-body, CHECK-SAME{LITERAL}: backend_config={"known_trip_count":{"n":"25"}} CHECK: %get-tuple-element.2 = get-tuple-element(%while-result.p.clone), index=1 CHECK: %get-tuple-element.3 = get-tuple-element(%while-result.p.clone), index=2 CHECK: %recv-done.1.p.clone = recv-done(%get-tuple-element.2), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0"} CHECK: %send-done.1.p.clone = send-done(%get-tuple-element.3), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0"} CHECK: ROOT %entry-result = get-tuple-element(%recv-done.1.p.clone), index=0 CHECK: })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); PipelinedP2PRewriter rewriter; TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get())); EXPECT_TRUE(changed); DoFileCheck(module.get(), kExpected); } TEST_F(PipelinedP2pRewriterTest, SendRecvTwoPipelinedWhileLoops) { const char* kModuleStr = R"( HloModule test, is_scheduled=true while-cond { param = (u32[], (f32[1,1024,1024], token[]), token[]) parameter(0) count = get-tuple-element(param), index=0 ub = u32[] constant(25) ROOT cond-result = pred[] compare(count, ub), direction=LT } while-body { param = (u32[], (f32[1,1024,1024], token[]), token[]) parameter(0) count = get-tuple-element(param), index=0 recv-done.q = (f32[1,1024,1024], token[]) get-tuple-element(param), index=1 send-data = f32[1, 1024, 1024] get-tuple-element(recv-done.q), index=0 c1 = u32[] constant(1) new-count = u32[] add(count, c1) after-all = token[] after-all() recv = (f32[1, 1024, 1024], u32[], token[]) recv(after-all), channel_id=1, frontend_attributes={ _xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}", _xla_send_recv_pipeline="0" } send = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all), channel_id=1, frontend_attributes={ _xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}", _xla_send_recv_pipeline="0" } recv-done.p = (f32[1,1024,1024], token[]) recv-done(recv), channel_id=1, frontend_attributes={ _xla_send_recv_pipeline="0" } send-done.p = token[] send-done(send), channel_id=1, frontend_attributes={ _xla_send_recv_pipeline="0" } gte.0 = f32[1,1024,1024] get-tuple-element(recv-done.p), index=0 gte.1 = token[] get-tuple-element(recv-done.p), index=1 recv-done-tuple = (f32[1,1024,1024], token[]) tuple(gte.0, gte.1) ROOT body-result = (u32[], (f32[1,1024,1024], token[]), token[]) tuple(new-count, recv-done-tuple, send-done.p) } while-cond-2 { param = (u32[], (f32[1,1024,1024], token[]), token[]) parameter(0) count = get-tuple-element(param), index=0 ub = u32[] constant(25) ROOT cond-result = pred[] compare(count, ub), direction=LT } while-body-2 { param = (u32[], (f32[1,1024,1024], token[]), token[]) parameter(0) count = get-tuple-element(param), index=0 recv-done.q = (f32[1,1024,1024], token[]) get-tuple-element(param), index=1 send-data = f32[1, 1024, 1024] get-tuple-element(recv-done.q), index=0 c1 = u32[] constant(1) new-count = u32[] add(count, c1) after-all = token[] after-all() recv = (f32[1, 1024, 1024], u32[], token[]) recv(after-all), channel_id=1, frontend_attributes={ _xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}", _xla_send_recv_pipeline="0" } send = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all), channel_id=1, frontend_attributes={ _xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}", _xla_send_recv_pipeline="0" } recv-done.p = (f32[1,1024,1024], token[]) recv-done(recv), channel_id=1, frontend_attributes={ _xla_send_recv_pipeline="0" } send-done.p = token[] send-done(send), channel_id=1, frontend_attributes={ _xla_send_recv_pipeline="0" } gte.0 = f32[1,1024,1024] get-tuple-element(recv-done.p), index=0 gte.1 = token[] get-tuple-element(recv-done.p), index=1 recv-done-tuple = (f32[1,1024,1024], token[]) tuple(gte.0, gte.1) ROOT body-result = (u32[], (f32[1,1024,1024], token[]), token[]) tuple(new-count, recv-done-tuple, send-done.p) } ENTRY main { c0 = u32[] constant(0) f0 = f32[] constant(0.0) init = f32[1, 1024, 1024] broadcast(f0), dimensions={} after-all.1 = token[] after-all() recv.1 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all.1), channel_id=1, frontend_attributes={ _xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}", _xla_send_recv_pipeline="0" } send.1 = (f32[1, 1024, 1024], u32[], token[]) send(init, after-all.1), channel_id=1, frontend_attributes={ _xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}", _xla_send_recv_pipeline="0" } recv-done.1.p = (f32[1,1024,1024], token[]) recv-done(recv.1), channel_id=1, frontend_attributes={ _xla_send_recv_pipeline="0" } send-done.1.p = token[] send-done(send.1), channel_id=1, frontend_attributes={ _xla_send_recv_pipeline="0" } while-init.p = (u32[], (f32[1,1024,1024], token[]), token[]) tuple(c0, recv-done.1.p, send-done.1.p) while-result.p = (u32[], (f32[1,1024,1024], token[]), token[]) while(while-init.p), body=while-body, condition=while-cond, backend_config={"known_trip_count":{"n":"25"}} recv-done.1.q = (f32[1,1024,1024], token[]) get-tuple-element(while-result.p), index=1 after-all-2.1 = token[] after-all() recv-2.1 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all-2.1), channel_id=2, frontend_attributes={ _xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}", _xla_send_recv_pipeline="0" } send-2.1 = (f32[1, 1024, 1024], u32[], token[]) send(recv-done.1.q, after-all-2.1), channel_id=2, frontend_attributes={ _xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}, {3,4}}", _xla_send_recv_pipeline="0" } recv-done-2.1.p = (f32[1,1024,1024], token[]) recv-done(recv-2.1), channel_id=2, frontend_attributes={ _xla_send_recv_pipeline="0" } send-done-2.1.p = token[] send-done(send-2.1), channel_id=2, frontend_attributes={ _xla_send_recv_pipeline="0" } while-init-2.p = (u32[], (f32[1,1024,1024], token[]), token[]) tuple(c0, recv-done-2.1.p, send-done-2.1.p) while-result-2.p = (u32[], (f32[1,1024,1024], token[]), token[]) while(while-init-2.p), body=while-body-2, condition=while-cond-2, backend_config={"known_trip_count":{"n":"25"}} recv-done-2.1.q = (f32[1,1024,1024], token[]) get-tuple-element(while-result-2.p), index=1 ROOT entry-result = f32[1, 1024, 1024] get-tuple-element(recv-done-2.1.q), index=0 } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); PipelinedP2PRewriter rewriter; TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get())); EXPECT_TRUE(changed); } TEST_F(PipelinedP2pRewriterTest, SendRecvPipelined2) { const char* kModuleStr = R"( HloModule test, is_scheduled=true while-cond { param = (u32[], (f32[1,1024,1024], token[]), token[], (f32[1,1024,1024], token[]), token[]) parameter(0) count = get-tuple-element(param), index=0 ub = u32[] constant(25) ROOT cond-result = pred[] compare(count, ub), direction=LT } while-body { param = (u32[], (f32[1,1024,1024], token[]), token[], (f32[1,1024,1024], token[]), token[]) parameter(0) count = get-tuple-element(param), index=0 recv-done.0.q = (f32[1,1024,1024], token[]) get-tuple-element(param), index=1 recv-data.0 = f32[1, 1024, 1024] get-tuple-element(recv-done.0.q), index=0 recv-done.1.q = (f32[1,1024,1024], token[]) get-tuple-element(param), index=3 recv-data.1 = f32[1, 1024, 1024] get-tuple-element(recv-done.1.q), index=0 replica = u32[] replica-id() constant0 = u32[] constant(0) compare0 = pred[] compare(replica, constant0), direction=EQ compare = pred[1, 1024, 1024] broadcast(compare0), dimensions={} recv-data = f32[1, 1024, 1024] select(compare, recv-data.0, recv-data.1) c1 = u32[] constant(1) new-count = u32[] add(count, c1) c10 = u32[] constant(10) sum = u32[] add(replica, c10) sum2 = u32[] add(sum, count) conv = f32[] convert(sum2) p = f32[1, 1024, 1024] broadcast(conv), dimensions={} b = f32[1, 1024, 1024] add(p, recv-data) c = f32[1, 1024, 1024] multiply(b, b) d = f32[1, 1024, 1024] tan(c) s = f32[1, 1024, 1024] dot(c, d), lhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1} send-data = f32[1, 1024, 1024] add(c, s) after-all = token[] after-all() recv = (f32[1, 1024, 1024], u32[], token[]) recv(after-all), channel_id=1, frontend_attributes={ _xla_send_recv_source_target_pairs="{{3,0}}", _xla_send_recv_pipeline="0" } send = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all), channel_id=1, frontend_attributes={ _xla_send_recv_source_target_pairs="{{3,0}}", _xla_send_recv_pipeline="0" } recv-done.p = (f32[1,1024,1024], token[]) recv-done(recv), channel_id=1, frontend_attributes={ _xla_send_recv_pipeline="0" } send-done.p = token[] send-done(send), channel_id=1, frontend_attributes={ _xla_send_recv_pipeline="0" } after-all.1 = token[] after-all() recv.1 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all.1), channel_id=2, frontend_attributes={ _xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}}", _xla_send_recv_pipeline="1" } send.1 = (f32[1, 1024, 1024], u32[], token[]) send(send-data, after-all.1), channel_id=2, frontend_attributes={ _xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}}", _xla_send_recv_pipeline="1" } recv-done.1.p = (f32[1,1024,1024], token[]) recv-done(recv.1), channel_id=2, frontend_attributes={ _xla_send_recv_pipeline="1" } send-done.1.p = token[] send-done(send.1), channel_id=2, frontend_attributes={ _xla_send_recv_pipeline="1" } ROOT body-result = (u32[], (f32[1,1024,1024], token[]), token[], (f32[1,1024,1024], token[]), token[]) tuple(new-count, recv-done.p, send-done.p, recv-done.1.p, send-done.1.p) } ENTRY main { c0 = u32[] constant(0) f0 = f32[] constant(0.0) init = f32[1, 1024, 1024] broadcast(f0), dimensions={} after-all.2 = token[] after-all() recv.2 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all.2), channel_id=1, frontend_attributes={ _xla_send_recv_source_target_pairs="{{3,0}}", _xla_send_recv_pipeline="0" } send.2 = (f32[1, 1024, 1024], u32[], token[]) send(init, after-all.2), channel_id=1, frontend_attributes={ _xla_send_recv_source_target_pairs="{{3,0}}", _xla_send_recv_pipeline="0" } recv-done.2.p = (f32[1,1024,1024], token[]) recv-done(recv.2), channel_id=1, frontend_attributes={ _xla_send_recv_pipeline="0" } send-done.2.p = token[] send-done(send.2), channel_id=1, frontend_attributes={ _xla_send_recv_pipeline="0" } after-all.3 = token[] after-all() recv.3 = (f32[1, 1024, 1024], u32[], token[]) recv(after-all.3), channel_id=2, frontend_attributes={ _xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}}", _xla_send_recv_pipeline="1" } send.3 = (f32[1, 1024, 1024], u32[], token[]) send(init, after-all.3), channel_id=2, frontend_attributes={ _xla_send_recv_source_target_pairs="{{0,1}, {1,2}, {2,3}}", _xla_send_recv_pipeline="1" } recv-done.3.p = (f32[1,1024,1024], token[]) recv-done(recv.3), channel_id=2, frontend_attributes={ _xla_send_recv_pipeline="1" } send-done.3.p = token[] send-done(send.3), channel_id=2, frontend_attributes={ _xla_send_recv_pipeline="1" } while-init.p = (u32[], (f32[1,1024,1024], token[]), token[], (f32[1,1024,1024], token[]), token[]) tuple(c0, recv-done.2.p, send-done.2.p, recv-done.3.p, send-done.3.p) while-result.p = (u32[], (f32[1,1024,1024], token[]), token[], (f32[1,1024,1024], token[]), token[]) while(while-init.p), body=while-body, condition=while-cond, backend_config={"known_trip_count":{"n":"25"}} recv-done.2.q = (f32[1,1024,1024], token[]) get-tuple-element(while-result.p), index=1 recv-data.2 = f32[1, 1024, 1024] get-tuple-element(recv-done.2.q), index=0 recv-done.3.q = (f32[1,1024,1024], token[]) get-tuple-element(while-result.p), index=3 recv-data.3 = f32[1, 1024, 1024] get-tuple-element(recv-done.3.q), index=0 replica = u32[] replica-id() constant0 = u32[] constant(0) compare0 = pred[] compare(replica, constant0), direction=EQ compare = pred[1, 1024, 1024] broadcast(compare0), dimensions={} ROOT entry-result = f32[1, 1024, 1024] select(compare, recv-data.2, recv-data.3) } )"; const char* kExpected = R"( CHECK: %while-body (param.1: (u32[], (f32[1,1024,1024], u32[], token[]), (f32[1,1024,1024], u32[], token[]), (f32[1,1024,1024], u32[], token[]), (f32[1,1024,1024], u32[], token[]))) -> (u32[], (f32[1,1024,1024], u32[], token[]), (f32[1,1024,1024], u32[], token[]), (f32[1,1024,1024], u32[], token[]), (f32[1,1024,1024], u32[], token[])) { CHECK: %param.1 = parameter(0) CHECK: %get-tuple-element = get-tuple-element(%param.1), index=1 CHECK: %get-tuple-element.1 = get-tuple-element(%param.1), index=2 CHECK: %get-tuple-element.2 = get-tuple-element(%param.1), index=3 CHECK: %get-tuple-element.3 = get-tuple-element(%param.1), index=4 CHECK: %count.1 = get-tuple-element(%param.1), index=0 CHECK: %recv-done.p.clone = recv-done(%get-tuple-element), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0"} CHECK: %recv-data.0 = get-tuple-element(%recv-done.p.clone), index=0 CHECK: %recv-done.1.p.clone = recv-done(%get-tuple-element.2), channel_id=2, frontend_attributes={_xla_send_recv_pipeline="1"} CHECK: %recv-data.1 = get-tuple-element(%recv-done.1.p.clone), index=0 CHECK: %replica = replica-id() CHECK: %constant0 = constant(0) CHECK: %compare0 = compare(%replica, %constant0), direction=EQ CHECK: %compare = broadcast(%compare0), dimensions={} CHECK: %recv-data.2 = select(%compare, %recv-data.0, %recv-data.1) CHECK: %c1 = constant(1) CHECK: %new-count = add(%count.1, %c1) CHECK: %c10 = constant(10) CHECK: %sum = add(%replica, %c10) CHECK: %sum2 = add(%sum, %count.1) CHECK: %conv = convert(%sum2) CHECK: %p = broadcast(%conv), dimensions={} CHECK: %b = add(%p, %recv-data.2) CHECK: %c = multiply(%b, %b) CHECK: %d = tan(%c) CHECK: %s = dot(%c, %d), lhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1} CHECK: %send-data = add(%c, %s) CHECK: %after-all = after-all() CHECK: %send-done.p.clone = send-done(%get-tuple-element.1), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0"} CHECK: %send-done.1.p.clone = send-done(%get-tuple-element.3), channel_id=2, frontend_attributes={_xla_send_recv_pipeline="1"} CHECK{LITERAL}: %recv = recv(%after-all), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0",_xla_send_recv_source_target_pairs={{3,0}}} CHECK{LITERAL}: %send = send(%send-data, %after-all), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0",_xla_send_recv_source_target_pairs={{3,0}}} CHECK: %after-all.1 = after-all() CHECK{LITERAL}: %recv.1 = recv(%after-all.1), channel_id=2, frontend_attributes={_xla_send_recv_pipeline="1",_xla_send_recv_source_target_pairs={{0,1}, {1,2}, {2,3}}} CHECK{LITERAL}: %send.1 = send(%send-data, %after-all.1), channel_id=2, frontend_attributes={_xla_send_recv_pipeline="1",_xla_send_recv_source_target_pairs={{0,1}, {1,2}, {2,3}}} CHECK: ROOT %tuple = tuple(%new-count, %recv, %send, %recv.1, %send.1) CHECK: } CHECK: %while-cond (param: (u32[], (f32[1,1024,1024], u32[], token[]), (f32[1,1024,1024], u32[], token[]), (f32[1,1024,1024], u32[], token[]), (f32[1,1024,1024], u32[], token[]))) -> pred[] { CHECK: %param = parameter(0) CHECK: %count = get-tuple-element(%param), index=0 CHECK: %ub = constant(25) CHECK: ROOT %cond-result = compare(%count, %ub), direction=LT CHECK: } CHECK: ENTRY %main () -> f32[1,1024,1024] { CHECK: %c0 = constant(0) CHECK: %f0 = constant(0) CHECK: %init = broadcast(%f0), dimensions={} CHECK: %after-all.2 = after-all() CHECK{LITERAL}: %recv.2 = recv(%after-all.2), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0",_xla_send_recv_source_target_pairs={{3,0}}} CHECK{LITERAL}: %send.2 = send(%init, %after-all.2), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0",_xla_send_recv_source_target_pairs={{3,0}}} CHECK: %after-all.3 = after-all() CHECK{LITERAL}: %recv.3 = recv(%after-all.3), channel_id=2, frontend_attributes={_xla_send_recv_pipeline="1",_xla_send_recv_source_target_pairs={{0,1}, {1,2}, {2,3}}} CHECK{LITERAL}: %send.3 = send(%init, %after-all.3), channel_id=2, frontend_attributes={_xla_send_recv_pipeline="1",_xla_send_recv_source_target_pairs={{0,1}, {1,2}, {2,3}}} CHECK: %while-init = tuple(%c0, %recv.2, %send.2, %recv.3, %send.3) CHECK{LITERAL}: %while-result.p.clone = while(%while-init), condition=%while-cond, body=%while-body, backend_config={"known_trip_count":{"n":"25"}} CHECK: %get-tuple-element.4 = get-tuple-element(%while-result.p.clone), index=1 CHECK: %get-tuple-element.5 = get-tuple-element(%while-result.p.clone), index=2 CHECK: %get-tuple-element.6 = get-tuple-element(%while-result.p.clone), index=3 CHECK: %get-tuple-element.7 = get-tuple-element(%while-result.p.clone), index=4 CHECK: %recv-done.2.p.clone = recv-done(%get-tuple-element.4), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0"} CHECK: %recv-data.3 = get-tuple-element(%recv-done.2.p.clone), index=0 CHECK: %recv-done.3.p.clone = recv-done(%get-tuple-element.6), channel_id=2, frontend_attributes={_xla_send_recv_pipeline="1"} CHECK: %recv-data.4 = get-tuple-element(%recv-done.3.p.clone), index=0 CHECK: %replica.1 = replica-id() CHECK: %constant0.1 = constant(0) CHECK: %compare0.1 = compare(%replica.1, %constant0.1), direction=EQ CHECK: %compare.1 = broadcast(%compare0.1), dimensions={} CHECK: %send-done.2.p.clone = send-done(%get-tuple-element.5), channel_id=1, frontend_attributes={_xla_send_recv_pipeline="0"} CHECK: %send-done.3.p.clone = send-done(%get-tuple-element.7), channel_id=2, frontend_attributes={_xla_send_recv_pipeline="1"} CHECK: ROOT %entry-result = select(%compare.1, %recv-data.3, %recv-data.4) CHECK: })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kModuleStr)); PipelinedP2PRewriter rewriter; TF_ASSERT_OK_AND_ASSIGN(bool changed, rewriter.Run(module.get())); EXPECT_TRUE(changed); DoFileCheck(module.get(), kExpected); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/pipelined_p2p_rewriter.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/pipelined_p2p_rewriter_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
1537adbd-f895-48a3-a962-980f34a94b42
cpp
tensorflow/tensorflow
cudnn_norm_rewriter
third_party/xla/xla/service/gpu/transforms/cudnn_norm_rewriter.cc
third_party/xla/xla/service/gpu/transforms/cudnn_norm_rewriter_test.cc
#include "xla/service/gpu/transforms/cudnn_norm_rewriter.h" #include <algorithm> #include <cstdint> #include <cstdlib> #include <functional> #include <iterator> #include <limits> #include <optional> #include <utility> #include <vector> #include "google/protobuf/wrappers.pb.h" #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/status/status.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/hlo/ir/dfs_hlo_visitor_with_default.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/layout_util.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/cublas_cudnn.h" #include "xla/service/hlo_creation_utils.h" #include "xla/service/pattern_matcher.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/stream_executor/device_description.h" #include "xla/tsl/protobuf/dnn.pb.h" #include "xla/types.h" #include "xla/util.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" #include "tsl/platform/statusor.h" #if GOOGLE_CUDA #include "third_party/gpus/cuda/include/cuda.h" #include "third_party/gpus/cudnn/cudnn.h" #include "third_party/gpus/cudnn/cudnn_version.h" #endif namespace xla { namespace gpu { namespace { namespace m = match; const HloInstruction* SkipUnaryOps(const HloInstruction* instr) { while (instr->opcode() == HloOpcode::kConvert || instr->opcode() == HloOpcode::kBitcast || instr->opcode() == HloOpcode::kReshape) { instr = instr->operand(0); } return instr; } void SkipUnaryOpsTopDownRecursive(HloInstruction* instr, std::vector<HloInstruction*>& instrs) { if (instr->opcode() == HloOpcode::kConvert || instr->opcode() == HloOpcode::kBitcast || instr->opcode() == HloOpcode::kReshape) { for (HloInstruction* user : instr->users()) { SkipUnaryOpsTopDownRecursive(user, instrs); } } else { instrs.emplace_back(instr); } } struct NormMetadata { HloInstruction *x_transpose, *y_transpose; std::vector<int64_t> norm_dims_adjusted, non_norm_dims_adjusted; }; using NormMetadataMap = absl::flat_hash_map<HloInstruction*, NormMetadata>; class UniqueHloInstruction { public: UniqueHloInstruction() : is_set_(false), instr_(nullptr), capture_or_verify_() {} HloInstruction* Instr() const { return instr_; } void SetInstr(HloInstruction* instr) { is_set_ = true; instr_ = instr; } bool CaptureOrVerify(HloInstruction* instr) { if (is_set_ && instr != instr_) { instr_ = nullptr; } if (!is_set_) { is_set_ = true; instr_ = instr; } return instr_; } std::function<bool(const HloInstruction*)> GetCaptureOrVerifyFn() { if (!capture_or_verify_) { capture_or_verify_ = [this](const HloInstruction* instr) -> bool { return CaptureOrVerify(const_cast<HloInstruction*>(instr)); }; } return capture_or_verify_; } private: bool is_set_; HloInstruction* instr_; std::function<bool(const HloInstruction*)> capture_or_verify_; }; absl::StatusOr<int64_t> CConstant( se::CudaComputeCapability cuda_compute_capability) { if (cuda_compute_capability.major == se::CudaComputeCapability::AMPERE) { return 32 * 128; } else if (cuda_compute_capability.major == se::CudaComputeCapability::HOPPER) { return 32 * 144; } return xla::Internal("Norm kernels require Ampere or Hopper architecture."); } bool CompatibleElementType(const HloInstruction* instr) { PrimitiveType element_type = instr->shape().element_type(); return element_type == BF16 || element_type == F16 || element_type == F32; } std::vector<int64_t> AdjustedDimensions(const Shape& shape, absl::Span<const int64_t> dimensions) { absl::flat_hash_map<int64_t, int64_t> dimension_map; for (int64_t dimension = 0, non_degen_dimension = 0; dimension < shape.rank(); ++dimension) { if (shape.dimensions(dimension) > 1) { dimension_map.insert({dimension, non_degen_dimension}); non_degen_dimension++; } } std::vector<int64_t> adjusted_dimensions; for (int64_t dimension : dimensions) { auto non_degenerate_dimension = dimension_map.find(dimension); if (non_degenerate_dimension != dimension_map.end()) { adjusted_dimensions.emplace_back(non_degenerate_dimension->second); } } return adjusted_dimensions; } std::vector<int64_t> AdjustedDimensions(const HloInstruction* instr) { Shape shape; if (instr->opcode() == HloOpcode::kBroadcast) { shape = instr->shape(); } else if (instr->opcode() == HloOpcode::kReduce) { shape = instr->operand(0)->shape(); } else { return {}; } return AdjustedDimensions(shape, instr->dimensions()); } bool AppliesAddReduce(const HloInstruction* instr, absl::Span<const int64_t> reduce_dims = {}) { if (instr->opcode() != HloOpcode::kReduce) { return false; } if (!reduce_dims.empty() && AdjustedDimensions(instr) != reduce_dims) { return false; } HloComputation* reduce_comp = instr->to_apply(); HloInstruction* reduce_comp_root = reduce_comp->root_instruction(); return instr->operand_count() == 2 && instr->operand(1)->opcode() == HloOpcode::kConstant && ShapeUtil::IsScalar(instr->operand(1)->shape()) && instr->operand(1)->literal().GetAsDouble({}) == 0. && reduce_comp_root->opcode() == HloOpcode::kAdd && reduce_comp_root->operand(0)->opcode() == HloOpcode::kParameter && reduce_comp_root->operand(1)->opcode() == HloOpcode::kParameter; } bool CalculatesExpectation(const HloInstruction* instr) { instr = SkipUnaryOps(instr); if (instr->opcode() != HloOpcode::kMultiply) { return false; } bool bcast_operand = instr->operand(0)->opcode() != HloOpcode::kBroadcast; const HloInstruction *broadcast = instr->operand(bcast_operand), *reduce = SkipUnaryOps(instr->operand(!bcast_operand)); if (reduce->opcode() != HloOpcode::kReduce || broadcast->opcode() != HloOpcode::kBroadcast || broadcast->operand(0)->opcode() != HloOpcode::kConstant) { return false; } float actual_r_nelems = broadcast->operand(0)->literal().GetAsDouble({}).value(); int64_t nelems = 1; for (int64_t norm_dim : reduce->dimensions()) { nelems *= reduce->operand(0)->shape().dimensions()[norm_dim]; } float r_nelems = 1. / static_cast<float>(nelems); float numerical_epsilon = std::numeric_limits<bfloat16>::epsilon(); return abs(actual_r_nelems - r_nelems) < ((actual_r_nelems + r_nelems) * numerical_epsilon); } bool FindTargetRecursive( const HloInstruction* instr, const HloInstruction* target, absl::flat_hash_set<const HloInstruction*>& visited_instrs, const HloInstruction* transpose) { visited_instrs.emplace(instr); const absl::flat_hash_set<HloOpcode> supported_ops = { HloOpcode::kConvert, HloOpcode::kBitcast, HloOpcode::kReshape}; if (instr == target) { return true; } for (HloInstruction* user : instr->users()) { if ((supported_ops.contains(user->opcode()) || user == transpose) && !visited_instrs.contains(user)) { return FindTargetRecursive(user, target, visited_instrs, transpose); } } if (supported_ops.contains(instr->opcode())) { return FindTargetRecursive(instr->operand(0), target, visited_instrs, transpose); } return false; } bool FindTarget(const HloInstruction* custom_call, const HloInstruction* instr, const HloInstruction* target, const NormMetadataMap& norm_metadata) { absl::flat_hash_set<const HloInstruction*> visited_instrs; auto custom_call_metadata = norm_metadata.find(custom_call); if (custom_call_metadata == norm_metadata.end()) { return false; } return FindTargetRecursive(instr, target, visited_instrs, custom_call_metadata->second.x_transpose); } std::vector<int64_t> MapDimensions(const Shape& original_shape, const Shape& reshaped_shape, const absl::Span<const int64_t> dimensions) { auto dimension_product = [](const Shape& shape, absl::Span<const int64_t> product_dimensions) -> int64_t { int64_t product = 1; for (int64_t product_dimension : product_dimensions) { product *= shape.dimensions(product_dimension); } return product; }; absl::flat_hash_map<int64_t, std::vector<int64_t>> dimensions_map; std::vector<int64_t> original_dimensions, reshaped_dimensions; for (int64_t original_dimension = 0, reshaped_dimension = 0; original_dimension < original_shape.rank(); ++original_dimension) { original_dimensions.emplace_back(original_dimension); while ((reshaped_dimensions.empty() || dimension_product(reshaped_shape, reshaped_dimensions) < dimension_product(original_shape, original_dimensions)) && reshaped_dimension < reshaped_shape.rank()) { reshaped_dimensions.emplace_back(reshaped_dimension++); } if (original_dimensions.size() > 1 && reshaped_dimensions.size() > 1) { return {}; } if (dimension_product(original_shape, original_dimensions) == dimension_product(reshaped_shape, reshaped_dimensions)) { std::vector<int64_t> original_dimensions_in_dimensions; std::set_intersection( original_dimensions.begin(), original_dimensions.end(), dimensions.begin(), dimensions.end(), std::back_inserter(original_dimensions_in_dimensions)); if (!original_dimensions_in_dimensions.empty() && original_dimensions_in_dimensions.size() != original_dimensions.size()) { return {}; } for (int64_t dimension : original_dimensions) { dimensions_map.insert({dimension, reshaped_dimensions}); } original_dimensions.clear(); reshaped_dimensions.clear(); } } std::vector<int64_t> mapped_dimensions; for (int64_t dimension : dimensions) { auto mapped_dimension = dimensions_map.find(dimension); if (mapped_dimension == dimensions_map.end()) { return {}; } mapped_dimensions.insert(mapped_dimensions.end(), mapped_dimension->second.begin(), mapped_dimension->second.end()); } mapped_dimensions.erase( std::unique(mapped_dimensions.begin(), mapped_dimensions.end()), mapped_dimensions.end()); return mapped_dimensions; } HloInstruction* FindAddReduceRecursive( HloInstruction* instr, const Shape& orig_instr_shape, const absl::Span<const int64_t> reduce_dims, absl::flat_hash_set<HloInstruction*>& visited_instrs) { visited_instrs.emplace(instr); const absl::flat_hash_set<HloOpcode> supported_ops = { HloOpcode::kConvert, HloOpcode::kBitcast, HloOpcode::kReshape}; for (HloInstruction* user : instr->users()) { if (user->opcode() == HloOpcode::kReduce) { std::vector<int64_t> mapped_reduce_dims = MapDimensions(orig_instr_shape, instr->shape(), reduce_dims); if (!mapped_reduce_dims.empty() && AppliesAddReduce(user, mapped_reduce_dims)) { return user; } } if (supported_ops.contains(user->opcode()) && !visited_instrs.contains(user)) { return FindAddReduceRecursive(user, orig_instr_shape, reduce_dims, visited_instrs); } } if (supported_ops.contains(instr->opcode())) { return FindAddReduceRecursive(instr->mutable_operand(0), orig_instr_shape, reduce_dims, visited_instrs); } return nullptr; } HloInstruction* FindAddReduce(HloInstruction* instr, const absl::Span<const int64_t> reduce_dims) { absl::flat_hash_set<HloInstruction*> visited_instrs; return FindAddReduceRecursive(instr, instr->shape(), reduce_dims, visited_instrs); } template <typename Pattern> auto SupportedConvert(Pattern pattern) { auto supported_convert = [](const HloInstruction* instr) -> bool { return CompatibleElementType(instr) && CompatibleElementType(instr->operand(0)); }; return m::Convert(pattern).WithPredicate(supported_convert); } template <typename Pattern> auto SupportedBitcastOrReshape(Pattern pattern) { auto supported_bitcast_or_reshape = [](const HloInstruction* instr) -> bool { return ShapeUtil::Equal( ShapeUtil::DropDegenerateDimensions(instr->shape()), ShapeUtil::DropDegenerateDimensions(instr->operand(0)->shape())); }; return m::AnyOf<HloInstruction>( m::Bitcast(pattern).WithPredicate(supported_bitcast_or_reshape), m::Reshape(pattern).WithPredicate(supported_bitcast_or_reshape)); } template <typename Pattern> auto OptionalSupportedTransform(Pattern pattern) { auto shared_subpattern = m::SharedSubpattern(pattern); return m::AnyOf<HloInstruction>( SupportedConvert(SupportedBitcastOrReshape(shared_subpattern)), SupportedBitcastOrReshape(SupportedConvert(shared_subpattern)), SupportedConvert(shared_subpattern), SupportedBitcastOrReshape(shared_subpattern), shared_subpattern); } template <typename Pattern> auto BitcastOrReshape(Pattern pattern) { return OptionalSupportedTransform( m::AnyOf<HloInstruction>(m::Bitcast(pattern), m::Reshape(pattern))); } template <typename Pattern> auto Transpose(Pattern pattern) { return OptionalSupportedTransform(m::Transpose(pattern)); } template <typename Pattern> auto Rsqrt(HloInstruction** rsqrt, Pattern pattern) { return OptionalSupportedTransform(m::Rsqrt(rsqrt, pattern)); } template <typename Pattern0, typename Pattern1> auto AddAnyOrder(Pattern0 pattern0, Pattern1 pattern1) { return OptionalSupportedTransform(m::AddAnyOrder(pattern0, pattern1)); } template <typename Pattern0, typename Pattern1> auto Subtract(Pattern0 pattern0, Pattern1 pattern1) { return OptionalSupportedTransform(m::Subtract(pattern0, pattern1)); } template <typename Pattern0, typename Pattern1> auto Subtract(HloInstruction** subtract, Pattern0 pattern0, Pattern1 pattern1) { return OptionalSupportedTransform(m::Subtract(subtract, pattern0, pattern1)); } template <typename Pattern0, typename Pattern1> auto MultiplyAnyOrder(Pattern0 pattern0, Pattern1 pattern1) { return OptionalSupportedTransform(m::MultiplyAnyOrder(pattern0, pattern1)); } template <typename Pattern0, typename Pattern1> auto MultiplyAnyOrder(HloInstruction** multiply, Pattern0 pattern0, Pattern1 pattern1) { return OptionalSupportedTransform( m::MultiplyAnyOrder(multiply, pattern0, pattern1)); } template <typename Pattern> auto Square(Pattern pattern) { return MultiplyAnyOrder(pattern, pattern) .WithPredicate([](const HloInstruction* instr) { return instr->unique_operands().size() == 1; }); } template <typename Pattern> auto Cube(Pattern pattern) { auto unique_cube = [](const HloInstruction* instr) -> bool { bool square_operand = instr->operand(0)->opcode() != HloOpcode::kMultiply; return instr->operand(!square_operand)->opcode() != HloOpcode::kMultiply && instr->operand(square_operand)->operand(0) == instr->operand(!square_operand); }; return MultiplyAnyOrder(Square(pattern), pattern).WithPredicate(unique_cube); } template <typename Pattern> auto AddReduce(Pattern pattern) { return OptionalSupportedTransform( m::Reduce(pattern, m::Op()) .WithPredicate([](const HloInstruction* instr) { return AppliesAddReduce(instr); })); } template <typename Pattern> auto AddReduce(HloInstruction** reduction, Pattern pattern) { return OptionalSupportedTransform( m::Reduce(reduction, pattern, m::Op()) .WithPredicate([](const HloInstruction* instr) { return AppliesAddReduce(instr); })); } template <typename Pattern> auto NegateAddReduce(HloInstruction** reduction, Pattern pattern) { return m::AnyOf<HloInstruction>(AddReduce(reduction, m::Negate(pattern)), m::Negate(AddReduce(reduction, pattern))); } template <typename Pattern> auto Expectation(Pattern pattern) { auto shared_subpattern = MultiplyAnyOrder(m::Broadcast(m::ConstantScalar()), AddReduce(pattern)) .WithPredicate([](const HloInstruction* instr) { return CalculatesExpectation(instr); }); return m::AnyOf<HloInstruction>(m::Broadcast(shared_subpattern), shared_subpattern); } template <typename Pattern> auto Expectation(UniqueHloInstruction* expectation, Pattern pattern) { auto shared_subpattern = OptionalSupportedTransform( m::MultiplyAnyOrder(m::Broadcast(m::ConstantScalar()), AddReduce(pattern)) .WithPredicate([](const HloInstruction* instr) { return CalculatesExpectation(instr); }) .WithPredicate(expectation->GetCaptureOrVerifyFn())); return m::AnyOf<HloInstruction>(m::Broadcast(shared_subpattern), shared_subpattern); } template <typename Pattern> auto Expectation(UniqueHloInstruction* expectation, HloInstruction** reduce, Pattern pattern) { auto shared_subpattern = OptionalSupportedTransform( m::MultiplyAnyOrder(m::Broadcast(m::ConstantScalar()), AddReduce(reduce, pattern)) .WithPredicate([](const HloInstruction* instr) { return CalculatesExpectation(instr); }) .WithPredicate(expectation->GetCaptureOrVerifyFn())); return m::AnyOf<HloInstruction>(m::Broadcast(shared_subpattern), shared_subpattern); } auto Variance(UniqueHloInstruction* variance, UniqueHloInstruction* expectation, UniqueHloInstruction* x) { return m::AnyOf<HloInstruction>( Subtract( Expectation(Square(OptionalSupportedTransform( m::Op().WithPredicate(x->GetCaptureOrVerifyFn())))), Square(Expectation(expectation, OptionalSupportedTransform(m::Op().WithPredicate( x->GetCaptureOrVerifyFn()))))) .WithPredicate(variance->GetCaptureOrVerifyFn()), Expectation( Square(Subtract( OptionalSupportedTransform( m::Op().WithPredicate(x->GetCaptureOrVerifyFn())), Expectation(expectation, OptionalSupportedTransform(m::Op().WithPredicate( x->GetCaptureOrVerifyFn())))))) .WithPredicate(variance->GetCaptureOrVerifyFn())); } auto NormFactor(HloInstruction** norm_factor, UniqueHloInstruction* x, UniqueHloInstruction* variance, UniqueHloInstruction* expectation, UniqueHloInstruction* epsilon) { auto shared_subpattern = m::SharedSubpattern(Rsqrt( norm_factor, AddAnyOrder(Variance(variance, expectation, x), m::Broadcast(m::ConstantScalar().WithPredicate( epsilon->GetCaptureOrVerifyFn()))))); return m::AnyOf<HloInstruction>(m::Broadcast(shared_subpattern), shared_subpattern); } template <typename P0, typename P1, typename P2> auto MultiplyMultiplyAnyOrder(P0 p0, P1 p1, P2 p2) { return m::AnyOf<HloInstruction>( MultiplyAnyOrder(p0, MultiplyAnyOrder(p1, p2)), MultiplyAnyOrder(p1, MultiplyAnyOrder(p0, p2)), MultiplyAnyOrder(p2, MultiplyAnyOrder(p0, p1))); } template <typename P0, typename P1, typename P2> auto AddAddAnyOrder(P0 p0, P1 p1, P2 p2) { return m::AnyOf<HloInstruction>(AddAnyOrder(p0, AddAnyOrder(p1, p2)), AddAnyOrder(p1, AddAnyOrder(p0, p2)), AddAnyOrder(p2, AddAnyOrder(p0, p1))); } template <typename P0, typename P1, typename P2> auto MultiplyAddAnyOrder(P0 p0, P1 p1, P2 p2) { return m::AnyOf<HloInstruction>( MultiplyAnyOrder(p0, AddAnyOrder(p1, p2)), AddAnyOrder(MultiplyAnyOrder(p0, p1), MultiplyAnyOrder(p0, p2))); } template <typename P0, typename P1, typename P2> auto SubtractAddAnyOrder(P0 p0, P1 p1, P2 p2) { return m::AnyOf<HloInstruction>(AddAnyOrder(Subtract(p0, p1), p2), AddAnyOrder(Subtract(p2, p1), p0), Subtract(AddAnyOrder(p0, p2), p1)); } template <typename P0, typename P1, typename P2, typename P3, typename P4> auto SubtractMultiplyAddAnyOrder(P0 p0, P1 p1, P2 p2, P3 p3, P4 p4) { return m::AnyOf<HloInstruction>( SubtractAddAnyOrder(MultiplyMultiplyAnyOrder(p0, p2, p3), MultiplyMultiplyAnyOrder(p1, p2, p3), p4), AddAnyOrder(MultiplyMultiplyAnyOrder(Subtract(p0, p1), p2, p3), p4)); } auto FusedExpectation(UniqueHloInstruction* custom_call) { auto shared_subpattern = m::SharedSubpattern(m::GetTupleElement( m::CustomCall({kCudnnNormCallTarget}) .WithPredicate(custom_call->GetCaptureOrVerifyFn()), 1)); return m::AnyOf<HloInstruction>(shared_subpattern, BitcastOrReshape(shared_subpattern)); } auto FusedExpectation(UniqueHloInstruction* fused_expectation, UniqueHloInstruction* custom_call) { auto shared_subpattern = m::SharedSubpattern( m::GetTupleElement( m::CustomCall({kCudnnNormCallTarget}) .WithPredicate(custom_call->GetCaptureOrVerifyFn()), 1) .WithPredicate(fused_expectation->GetCaptureOrVerifyFn())); return m::AnyOf<HloInstruction>(shared_subpattern, BitcastOrReshape(shared_subpattern)); } auto FusedNormFactor(UniqueHloInstruction* custom_call) { auto shared_subpattern = m::SharedSubpattern(m::GetTupleElement( m::CustomCall({kCudnnNormCallTarget}) .WithPredicate(custom_call->GetCaptureOrVerifyFn()), 2)); return m::AnyOf<HloInstruction>(shared_subpattern, BitcastOrReshape(shared_subpattern)); } auto FusedNormFactor(UniqueHloInstruction* fused_norm_factor, UniqueHloInstruction* custom_call) { auto shared_subpattern = m::SharedSubpattern( m::GetTupleElement( m::CustomCall({kCudnnNormCallTarget}) .WithPredicate(custom_call->GetCaptureOrVerifyFn()), 2) .WithPredicate(fused_norm_factor->GetCaptureOrVerifyFn())); return m::AnyOf<HloInstruction>(shared_subpattern, BitcastOrReshape(shared_subpattern)); } auto DNormFactor(UniqueHloInstruction* custom_call) { return MultiplyAnyOrder(m::Broadcast(m::ConstantScalar(-0.5)), Cube(FusedNormFactor(custom_call))); } auto XCenter(UniqueHloInstruction* x, UniqueHloInstruction* custom_call, const NormMetadataMap& norm_metadata) { auto capture_or_verify_x = [x, custom_call, &norm_metadata](const HloInstruction* instr) -> bool { return x->CaptureOrVerify( FindTarget(custom_call->Instr(), instr->operand(0), custom_call->Instr()->operand(0), norm_metadata) ? custom_call->Instr()->mutable_operand(0) : nullptr); }; return Subtract(m::Op(), m::Broadcast(FusedExpectation(custom_call))) .WithPredicate(capture_or_verify_x); } auto XCenter(UniqueHloInstruction* x_center, UniqueHloInstruction* x, UniqueHloInstruction* fused_expectation, UniqueHloInstruction* custom_call, const NormMetadataMap& norm_metadata) { auto capture_or_verify_x = [x, custom_call, &norm_metadata](const HloInstruction* instr) -> bool { return x->CaptureOrVerify( FindTarget(custom_call->Instr(), instr->operand(0), custom_call->Instr()->operand(0), norm_metadata) ? custom_call->Instr()->mutable_operand(0) : nullptr); }; return Subtract(m::Op(), m::Broadcast(FusedExpectation(fused_expectation, custom_call))) .WithPredicate(x_center->GetCaptureOrVerifyFn()) .WithPredicate(capture_or_verify_x); } auto F0(UniqueHloInstruction* custom_call, UniqueHloInstruction* scale, UniqueHloInstruction* dy, UniqueHloInstruction* x, HloInstruction** reduce, const NormMetadataMap& norm_metadata) { auto capture_or_verify_scale = [scale, custom_call, &norm_metadata]( const HloInstruction* instr) -> bool { return scale->CaptureOrVerify(FindTarget(custom_call->Instr(), instr, custom_call->Instr()->operand(1), norm_metadata) ? custom_call->Instr()->mutable_operand(1) : nullptr); }; return AddReduce( reduce, MultiplyMultiplyAnyOrder( XCenter(x, custom_call, norm_metadata), m::Broadcast(m::Op().WithPredicate(capture_or_verify_scale)), m::Op().WithPredicate(dy->GetCaptureOrVerifyFn()))); } auto F1(UniqueHloInstruction* x, UniqueHloInstruction* x_center, UniqueHloInstruction* fused_expectation, UniqueHloInstruction* custom_call, UniqueHloInstruction* scale, UniqueHloInstruction* dy, HloInstruction** reduce, const NormMetadataMap& norm_metadata) { auto broadcasts_two_over_nelems = [](const HloInstruction* instr) -> bool { const HloInstruction* multiply = SkipUnaryOps(instr->operand(0)); bool bcast_operand = multiply->operand(0)->opcode() != HloOpcode::kBroadcast; float actual_two_over_nelems = multiply->operand(bcast_operand) ->operand(0) ->literal() .GetAsDouble({}) .value(); int64_t nelems = 1; for (int i = 0; i < instr->shape().dimensions_size(); ++i) { if (!absl::c_linear_search(instr->dimensions(), i)) { nelems *= instr->shape().dimensions()[i]; } } float two_over_nelems = 2. / static_cast<float>(nelems); float numerical_epsilon = std::numeric_limits<bfloat16>::epsilon(); return abs(actual_two_over_nelems - two_over_nelems) < ((actual_two_over_nelems + two_over_nelems) * numerical_epsilon); }; return MultiplyAnyOrder( XCenter(x_center, x, fused_expectation, custom_call, norm_metadata), m::Broadcast( MultiplyAnyOrder(m::Broadcast(m::ConstantScalar()), MultiplyAnyOrder(DNormFactor(custom_call), F0(custom_call, scale, dy, x, reduce, norm_metadata)))) .WithPredicate(broadcasts_two_over_nelems)); } auto F2(UniqueHloInstruction* fused_norm_factor, UniqueHloInstruction* scale, UniqueHloInstruction* dy, UniqueHloInstruction* custom_call, const NormMetadataMap& norm_metadata) { auto capture_or_verify_scale = [scale, custom_call, &norm_metadata]( const HloInstruction* instr) -> bool { return scale->CaptureOrVerify( FindTarget(custom_call->Instr(), instr->operand(0), custom_call->Instr()->operand(1), norm_metadata) ? custom_call->Instr()->mutable_operand(1) : nullptr); }; return MultiplyAnyOrder( m::Broadcast( BitcastOrReshape(FusedNormFactor(fused_norm_factor, custom_call))), MultiplyAnyOrder(m::Broadcast().WithPredicate(capture_or_verify_scale), m::Op().WithPredicate(dy->GetCaptureOrVerifyFn()))); } class CudnnNormRewriterVisitor : public DfsHloRewriteVisitor { public: explicit CudnnNormRewriterVisitor( const se::CudaComputeCapability cuda_compute_capability) : cuda_compute_capability_(cuda_compute_capability) {} absl::Status HandleAdd(HloInstruction* instr) override { TF_RETURN_IF_ERROR(MatchLayerNorm(instr)); TF_RETURN_IF_ERROR(MatchLayerNormGradient(instr)); return absl::OkStatus(); } absl::Status HandleSubtract(HloInstruction* instr) override { return MatchLayerNorm(instr); } absl::Status MatchLayerNorm(HloInstruction* instr) { UniqueHloInstruction x, expectation, variance, epsilon; HloInstruction *scale, *bias, *reduce, *norm_factor, *broadcast_scale, *broadcast_bias; if (Match( instr, SubtractMultiplyAddAnyOrder( OptionalSupportedTransform( m::Op().WithPredicate(x.GetCaptureOrVerifyFn())), Expectation(&expectation, &reduce, OptionalSupportedTransform(m::Op().WithPredicate( x.GetCaptureOrVerifyFn()))), NormFactor(&norm_factor, &x, &variance, &expectation, &epsilon), m::Broadcast(&broadcast_scale, m::Op(&scale)), m::Broadcast(&broadcast_bias, m::Op(&bias))))) { #if CUDNN_VERSION < 8905 VLOG(1) << "Layer norm Custom Calls require cuDNN 8.9.5."; return absl::OkStatus(); #endif if (!instr->GetModule() ->config() .debug_options() .xla_gpu_enable_cudnn_layer_norm()) { VLOG(1) << "Layer norm Custom Calls disabled."; return absl::OkStatus(); } if (cuda_compute_capability_.major != se::CudaComputeCapability::AMPERE && cuda_compute_capability_.major != se::CudaComputeCapability::HOPPER) { VLOG(1) << "Layer norm Custom Calls require Ampere or Hopper " "architectures."; return absl::OkStatus(); } if (!x.Instr() || !expectation.Instr() || !variance.Instr() || !epsilon.Instr()) { VLOG(1) << "Layer norm operands not unique."; return absl::OkStatus(); } if (!LayoutUtil::IsMonotonicWithDim0Major(x.Instr()->shape().layout()) || !LayoutUtil::IsMonotonicWithDim0Major(scale->shape().layout()) || !LayoutUtil::IsMonotonicWithDim0Major(bias->shape().layout()) || !LayoutUtil::IsMonotonicWithDim0Major(instr->shape().layout())) { VLOG(1) << "Layer norm input and/or output layouts nor supported."; return absl::OkStatus(); } if (!CompatibleElementType(instr) || !CompatibleElementType(scale) || !CompatibleElementType(bias) || !ShapeUtil::SameElementType(instr->shape(), x.Instr()->shape()) || !ShapeUtil::Equal(scale->shape(), bias->shape())) { VLOG(1) << "Layer norm input types or shapes not supported."; return absl::OkStatus(); } std::vector<int64_t> norm_dims(reduce->dimensions().begin(), reduce->dimensions().end()); std::vector<int64_t> norm_dims_adjusted = AdjustedDimensions(reduce); if (norm_dims_adjusted.size() != ShapeUtil::DropDegenerateDimensions(scale->shape()) .dimensions_size()) { VLOG(1) << "Layer norm input dimensions not supported."; return absl::OkStatus(); } if (!ShapeUtil::EqualIgnoringElementType( ShapeUtil::DropDegenerateDimensions(reduce->operand(0)->shape()), ShapeUtil::DropDegenerateDimensions(broadcast_scale->shape())) || !ShapeUtil::EqualIgnoringElementType( ShapeUtil::DropDegenerateDimensions(reduce->operand(0)->shape()), ShapeUtil::DropDegenerateDimensions(broadcast_bias->shape())) || norm_dims_adjusted != AdjustedDimensions(broadcast_scale) || norm_dims_adjusted != AdjustedDimensions(broadcast_bias)) { VLOG(1) << "Layer norm operand broadcast not supported."; return absl::OkStatus(); } std::vector<int64_t> non_norm_dims; for (int64_t x_dim = 0; x_dim < x.Instr()->shape().rank(); ++x_dim) { if (std::find(norm_dims.begin(), norm_dims.end(), x_dim) == norm_dims.end()) { non_norm_dims.emplace_back(x_dim); } } std::vector<int64_t> non_norm_dims_adjusted = AdjustedDimensions(x.Instr()->shape(), non_norm_dims); std::vector<int64_t> x_transpose_order = non_norm_dims; x_transpose_order.insert(x_transpose_order.end(), norm_dims.begin(), norm_dims.end()); bool apply_transpose = false; for (int i = 0; i < x_transpose_order.size(); ++i) { if (x_transpose_order[i] != i) { apply_transpose = true; break; } } std::optional<HloInstruction*> x_transpose; std::vector<int64_t> y_transpose_order(x_transpose_order.size()); if (apply_transpose) { for (int k = 0; k < x_transpose_order.size(); ++k) { y_transpose_order[x_transpose_order[k]] = k; } TF_ASSIGN_OR_RETURN(x_transpose, MakeTransposeHlo(x.Instr(), x_transpose_order)); } std::vector<int64_t> reshaped_dims = {1}; for (auto non_norm_dim : non_norm_dims) { reshaped_dims[0] *= x.Instr()->shape().dimensions(non_norm_dim); } for (auto norm_dim : norm_dims) { reshaped_dims.emplace_back(x.Instr()->shape().dimensions(norm_dim)); } while (reshaped_dims.size() < 4) { reshaped_dims.emplace_back(1); } Shape reshaped_shape = ShapeUtil::MakeShape( x.Instr()->shape().element_type(), reshaped_dims); TF_ASSIGN_OR_RETURN( HloInstruction * x_reshape, MakeReshapeHlo(reshaped_shape, x_transpose.value_or(x.Instr()))); std::vector<int64_t> reshaped_scale_dims = reshaped_dims; reshaped_scale_dims[0] = 1; Shape scale_bias_shape = ShapeUtil::MakeShape( scale->shape().element_type(), reshaped_scale_dims); TF_ASSIGN_OR_RETURN(HloInstruction * scale_reshape, MakeReshapeHlo(scale_bias_shape, scale)); TF_ASSIGN_OR_RETURN(HloInstruction * bias_reshape, MakeReshapeHlo(scale_bias_shape, bias)); GpuBackendConfig gpu_backend_config; CudnnNormBackendConfig& backend_config = *gpu_backend_config.mutable_cudnn_norm_backend_config(); backend_config.set_epsilon( epsilon.Instr()->literal().GetAsDouble({}).value()); backend_config.set_kind(CudnnNormBackendConfig::LAYER_FWD_INFER); auto* algorithm = backend_config.mutable_algorithm(); algorithm->set_algo_id(0); algorithm->set_math_type(se::dnn::AlgorithmProto::TENSOR_OP_MATH); algorithm->set_is_cudnn_frontend(true); TF_ASSIGN_OR_RETURN(const int64_t c_constant, CConstant(cuda_compute_capability_)); const int64_t workspace_size = (2 * c_constant * (4 + 256)) + (2 * reshaped_dims[0] * 4) + 64; algorithm->mutable_workspace_size()->set_value(workspace_size); Shape custom_call_shape = ShapeUtil::MakeTupleShape( {x_reshape->shape(), ShapeUtil::MakeShape(U8, {workspace_size})}); HloInstruction* custom_call = instr->AddInstruction(HloInstruction::CreateCustomCall( custom_call_shape, {x_reshape, scale_reshape, bias_reshape}, kCudnnNormCallTarget)); TF_RETURN_IF_ERROR(custom_call->set_backend_config(gpu_backend_config)); TF_ASSIGN_OR_RETURN(HloInstruction * gte, MakeGetTupleElementHlo(custom_call, 0)); TF_ASSIGN_OR_RETURN( HloInstruction * y_reshape, MakeReshapeHlo(x_transpose.value_or(instr)->shape(), gte)); std::optional<HloInstruction*> y_transpose; if (apply_transpose) { TF_ASSIGN_OR_RETURN(y_transpose, MakeTransposeHlo(y_reshape, y_transpose_order)); } TF_RETURN_IF_ERROR( ReplaceInstruction(instr, y_transpose.value_or(y_reshape))); norm_metadata_.insert( {custom_call, NormMetadata({x_transpose.value_or(nullptr), y_transpose.value_or(nullptr), norm_dims_adjusted, non_norm_dims_adjusted})}); VLOG(1) << "Layer norm rewritten into Custom Call."; for (HloInstruction* user : norm_factor->users()) { if (user->opcode() == HloOpcode::kDivide && user->operand_index(norm_factor) == 0) { TF_ASSIGN_OR_RETURN(bool changed, MatchNormFactor(user, custom_call, variance, expectation, epsilon)); if (changed) { break; } } } } return absl::OkStatus(); } absl::StatusOr<bool> MatchNormFactor(HloInstruction* instr, HloInstruction* custom_call, UniqueHloInstruction& variance, UniqueHloInstruction& expectation, UniqueHloInstruction& epsilon) { HloInstruction* gte = custom_call->users()[0]; if (Match(instr, m::Divide( m::Op(), AddAnyOrder( m::Op().WithPredicate(variance.GetCaptureOrVerifyFn()), m::Broadcast(m::ConstantScalar().WithPredicate( epsilon.GetCaptureOrVerifyFn())))))) { if (!variance.Instr() || !epsilon.Instr()) { VLOG(1) << "Layer norm operands not unique."; return false; } if (!CompatibleElementType(instr) || !CompatibleElementType(expectation.Instr())) { VLOG(1) << "Layer norm input types not compatible."; return false; } auto norm_metadata = norm_metadata_.extract(custom_call); if (!norm_metadata) { VLOG(1) << "Unable to retrieve norm metadata of forward Custom Call."; return false; } auto make_compatible_shape = [](Shape shape) -> Shape { return ShapeUtil::MakeShape(shape.element_type(), {ShapeUtil::ElementsIn(shape), 1, 1, 1}); }; Shape expectation_shape = make_compatible_shape(expectation.Instr()->shape()); Shape norm_factor_shape = make_compatible_shape(instr->shape()); std::vector<Shape> tuple_shapes = custom_call->shape().tuple_shapes(); tuple_shapes.insert(tuple_shapes.begin() + 1, {expectation_shape, norm_factor_shape}); Shape custom_call_shape = ShapeUtil::MakeTupleShape(tuple_shapes); HloInstruction* new_custom_call = instr->AddInstruction( custom_call->CloneWithNewShape(custom_call_shape)); TF_ASSIGN_OR_RETURN( GpuBackendConfig gpu_backend_config, custom_call->backend_config<xla::gpu::GpuBackendConfig>()); CudnnNormBackendConfig& backend_config = *gpu_backend_config.mutable_cudnn_norm_backend_config(); backend_config.set_kind(CudnnNormBackendConfig::LAYER_FWD_TRAIN); TF_ASSIGN_OR_RETURN(const int64_t c_constant, CConstant(cuda_compute_capability_)); const int64_t workspace_size = (2 * c_constant * (4 + 256)) + 32; backend_config.mutable_algorithm()->mutable_workspace_size()->set_value( workspace_size); TF_RETURN_IF_ERROR( new_custom_call->set_backend_config(gpu_backend_config)); auto replace_with_new_cc = [new_custom_call, this]( HloInstruction* old_instr, int tuple_index) -> absl::Status { TF_ASSIGN_OR_RETURN( HloInstruction * new_gte, MakeGetTupleElementHlo(new_custom_call, tuple_index)); HloInstruction* new_instr = new_gte; if (!ShapeUtil::Equal(new_gte->shape(), old_instr->shape())) { TF_ASSIGN_OR_RETURN(new_instr, MakeReshapeHlo(old_instr->shape(), new_gte)); } if (old_instr->opcode() != HloOpcode::kDivide) { TF_RETURN_IF_ERROR(ReplaceInstruction(old_instr, new_instr)); } else { TF_RETURN_IF_ERROR( ReplaceInstruction(old_instr->mutable_operand(0), new_instr)); TF_ASSIGN_OR_RETURN( HloInstruction * new_multiply0, MakeBinaryHlo(HloOpcode::kMultiply, new_instr, new_instr)); TF_ASSIGN_OR_RETURN( HloInstruction * new_multiply1, MakeBinaryHlo(HloOpcode::kMultiply, new_multiply0, new_instr)); TF_RETURN_IF_ERROR(ReplaceInstruction(old_instr, new_multiply1)); } return absl::OkStatus(); }; TF_RETURN_IF_ERROR(replace_with_new_cc(gte, 0)); TF_RETURN_IF_ERROR(replace_with_new_cc(expectation.Instr(), 1)); TF_RETURN_IF_ERROR(replace_with_new_cc(instr, 2)); norm_metadata.key() = new_custom_call; norm_metadata_.insert(std::move(norm_metadata)); VLOG(1) << "Expectation and norm factor fused into layer norm Custom Call."; } return true; } absl::Status MatchLayerNormGradient(HloInstruction* instr) { UniqueHloInstruction fwd_custom_call, x, x_center, scale, dy, fused_expectation, fused_norm_factor; HloInstruction *broadcast, *scalar, *dscale, *dbias, *reduce0, *reduce1, *reduce2, *reduce3; if (Match(instr, AddAddAnyOrder( m::Broadcast( &broadcast, MultiplyAddAnyOrder( m::Broadcast(m::ConstantScalar(&scalar)), NegateAddReduce(&reduce0, F1(&x, &x_center, &fused_expectation, &fwd_custom_call, &scale, &dy, &reduce2, norm_metadata_)), NegateAddReduce( &reduce1, F2(&fused_norm_factor, &scale, &dy, &fwd_custom_call, norm_metadata_)))), F2(&fused_norm_factor, &scale, &dy, &fwd_custom_call, norm_metadata_), F1(&x, &x_center, &fused_expectation, &fwd_custom_call, &scale, &dy, &reduce3, norm_metadata_)))) { if (instr->user_count() == 1 && instr->users()[0]->opcode() == HloOpcode::kConvert && CompatibleElementType(instr->users()[0])) { instr = instr->users()[0]; } if (!fwd_custom_call.Instr() || !x.Instr() || !dy.Instr() || !x_center.Instr() || !scale.Instr() || !fused_expectation.Instr() || !fused_norm_factor.Instr()) { VLOG(1) << "Layer norm gradient inputs not unique."; return absl::OkStatus(); } auto norm_metadata = norm_metadata_.find(fwd_custom_call.Instr()); if (norm_metadata == norm_metadata_.end()) { VLOG(1) << "Unable to retrieve norm metadata of forward Custom Call."; return absl::OkStatus(); } if (AdjustedDimensions(reduce0) != norm_metadata->second.norm_dims_adjusted || AdjustedDimensions(reduce1) != norm_metadata->second.norm_dims_adjusted || AdjustedDimensions(reduce2) != norm_metadata->second.norm_dims_adjusted || AdjustedDimensions(reduce3) != norm_metadata->second.norm_dims_adjusted) { VLOG(1) << "Unexpected reductions dimensions in layer norm gradient."; return absl::OkStatus(); } float actual_r_nelems = scalar->literal().GetAsDouble({}).value(); int64_t nelems = 1; for (int i = 0; i < broadcast->shape().dimensions_size(); ++i) { if (!absl::c_linear_search(broadcast->dimensions(), i)) { nelems *= broadcast->shape().dimensions()[i]; } } float r_nelems = 1. / static_cast<float>(nelems); float numerical_epsilon = std::numeric_limits<bfloat16>::epsilon(); if (!(abs(actual_r_nelems - r_nelems) < ((actual_r_nelems + r_nelems) * numerical_epsilon))) { VLOG(1) << "Layer norm backward broadcast operand outside expected range."; return absl::OkStatus(); } auto find_dscale = [&fused_norm_factor, &norm_metadata]( const UniqueHloInstruction& factor0, const UniqueHloInstruction& factor1) -> HloInstruction* { for (HloInstruction* factor0_user : factor0.Instr()->users()) { std::vector<HloInstruction*> users; SkipUnaryOpsTopDownRecursive(factor0_user, users); for (HloInstruction* user : users) { if (Match(user, MultiplyAnyOrder( m::Op(), MultiplyAnyOrder( m::Broadcast(BitcastOrReshape(m::Op().Is( fused_norm_factor.Instr()))), m::Op().Is(factor1.Instr()))))) { for (HloInstruction* multiply_user : user->users()) { if (AppliesAddReduce( multiply_user, norm_metadata->second.non_norm_dims_adjusted)) { return multiply_user; } } } } } return nullptr; }; if (!(dscale = find_dscale(x_center, dy)) && !(dscale = find_dscale(dy, x_center))) { VLOG(1) << "Unable to identify Dscale in graph."; return absl::OkStatus(); } dbias = FindAddReduce(dy.Instr(), norm_metadata->second.non_norm_dims_adjusted); if (!LayoutUtil::IsMonotonicWithDim0Major(dy.Instr()->shape().layout()) || !LayoutUtil::IsMonotonicWithDim0Major(instr->shape().layout()) || !LayoutUtil::IsMonotonicWithDim0Major(dscale->shape().layout()) || (dbias && !LayoutUtil::IsMonotonicWithDim0Major(dbias->shape().layout()))) { VLOG(1) << "Layer norm input and/or output layouts nor supported."; return absl::OkStatus(); } if (x.Instr()->shape().element_type() != instr->shape().element_type()) { VLOG(1) << "The types of X and DX must match."; return absl::OkStatus(); } if (!ShapeUtil::Equal( ShapeUtil::DropDegenerateDimensions(scale.Instr()->shape()), ShapeUtil::DropDegenerateDimensions(dscale->shape())) || (dbias && !ShapeUtil::Equal( ShapeUtil::DropDegenerateDimensions(scale.Instr()->shape()), ShapeUtil::DropDegenerateDimensions(dbias->shape())))) { VLOG(1) << "Backward layer norm types not supported."; return absl::OkStatus(); } if (!CompatibleElementType(dy.Instr())) { VLOG(1) << "Backward layer norm types not supported."; return absl::OkStatus(); } if (ShapeUtil::ByteSizeOfPrimitiveType( x.Instr()->shape().element_type()) < ShapeUtil::ByteSizeOfPrimitiveType( dy.Instr()->shape().element_type()) || ShapeUtil::ByteSizeOfPrimitiveType( x.Instr()->shape().element_type()) < ShapeUtil::ByteSizeOfPrimitiveType( scale.Instr()->shape().element_type())) { VLOG(1) << "Backward layer norm types not supported."; return absl::OkStatus(); } HloInstruction* transposed_dy = dy.Instr(); if (norm_metadata->second.x_transpose) { TF_ASSIGN_OR_RETURN( transposed_dy, MakeTransposeHlo(dy.Instr(), norm_metadata->second.x_transpose->dimensions())); } TF_ASSIGN_OR_RETURN(HloInstruction * reshaped_dy, MakeReshapeHlo(x.Instr()->shape(), transposed_dy)); Shape dx_shape = ShapeUtil::MakeShape(instr->shape().element_type(), x.Instr()->shape().dimensions()); Shape dscale_dbias_shape = ShapeUtil::MakeShape( dscale->shape().element_type(), scale.Instr()->shape().dimensions()); GpuBackendConfig gpu_backend_config; CudnnNormBackendConfig& backend_config = *gpu_backend_config.mutable_cudnn_norm_backend_config(); backend_config.set_kind(CudnnNormBackendConfig::LAYER_BWD); auto* algorithm = backend_config.mutable_algorithm(); algorithm->set_algo_id(0); algorithm->set_math_type(se::dnn::AlgorithmProto::TENSOR_OP_MATH); algorithm->set_is_cudnn_frontend(true); TF_ASSIGN_OR_RETURN(const int64_t c_constant, CConstant(cuda_compute_capability_)); const int64_t workspace_size = (2 * c_constant * (4 + 256)) + (2 * x.Instr()->shape().dimensions(0) * 4) + 64; algorithm->mutable_workspace_size()->set_value(workspace_size); Shape custom_call_shape = ShapeUtil::MakeTupleShape( {dx_shape, dscale_dbias_shape, dscale_dbias_shape, ShapeUtil::MakeShape(U8, {workspace_size})}); HloInstruction* custom_call = instr->AddInstruction(HloInstruction::CreateCustomCall( custom_call_shape, {x.Instr(), scale.Instr(), reshaped_dy, fused_expectation.Instr(), fused_norm_factor.Instr()}, kCudnnNormCallTarget)); TF_RETURN_IF_ERROR(custom_call->set_backend_config(gpu_backend_config)); auto replace_with_cc = [custom_call, norm_metadata, transposed_dy, this]( HloInstruction* old_instr, int tuple_index) -> absl::Status { TF_ASSIGN_OR_RETURN(HloInstruction * gte, MakeGetTupleElementHlo(custom_call, tuple_index)); HloInstruction* new_instr; if (tuple_index == 0 && norm_metadata->second.y_transpose) { TF_ASSIGN_OR_RETURN(new_instr, MakeReshapeHlo(transposed_dy->shape(), gte)); TF_ASSIGN_OR_RETURN( new_instr, MakeTransposeHlo( new_instr, norm_metadata->second.y_transpose->dimensions())); } else { TF_ASSIGN_OR_RETURN(new_instr, MakeReshapeHlo(old_instr->shape(), gte)); } TF_RETURN_IF_ERROR(ReplaceInstruction(old_instr, new_instr)); return absl::OkStatus(); }; TF_RETURN_IF_ERROR(replace_with_cc(instr, 0)); TF_RETURN_IF_ERROR(replace_with_cc(dscale, 1)); if (dbias) { TF_RETURN_IF_ERROR(replace_with_cc(dbias, 2)); } VLOG(1) << "Gradients w.r.t. x" << (dbias ? ", scale and bias" : " and scale") << " rewritten into layer norm backward Custom Call."; } return absl::OkStatus(); } private: se::CudaComputeCapability cuda_compute_capability_; NormMetadataMap norm_metadata_; }; absl::StatusOr<bool> RunOnComputation( HloComputation* computation, se::CudaComputeCapability cuda_compute_capability) { CudnnNormRewriterVisitor visitor(cuda_compute_capability); TF_RETURN_IF_ERROR(computation->Accept(&visitor)); return visitor.changed(); } } CudnnNormRewriter::CudnnNormRewriter( se::CudaComputeCapability cuda_compute_capability) : cuda_compute_capability_(cuda_compute_capability) {} absl::StatusOr<bool> CudnnNormRewriter::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool changed = false; for (HloComputation* computation : module->MakeNonfusionComputations(execution_threads)) { TF_ASSIGN_OR_RETURN( bool result, RunOnComputation(computation, cuda_compute_capability_)); changed |= result; } return changed; } } }
#include <string> #include <gtest/gtest.h> #include "xla/error_spec.h" #include "xla/stream_executor/device_description.h" #if GOOGLE_CUDA #include "third_party/gpus/cuda/include/cuda.h" #include "third_party/gpus/cudnn/cudnn.h" #include "third_party/gpus/cudnn/cudnn_version.h" #endif #include "xla/service/gpu/tests/gpu_codegen_test.h" namespace xla { namespace gpu { namespace { class CudnnNormRewriterTest : public GpuCodegenTest { public: se::CudaComputeCapability GetCudaComputeCapability() { return backend() .default_stream_executor() ->GetDeviceDescription() .cuda_compute_capability(); } DebugOptions GetDebugOptionsForTest() override { DebugOptions debug_options = GpuCodegenTest::GetDebugOptionsForTest(); debug_options.set_xla_gpu_enable_cudnn_layer_norm(true); return debug_options; } protected: void SetUp() override { #if (CUDA_VERSION < 12000 || CUDNN_VERSION < 8905) GTEST_SKIP() << "Layer norm kernels require CUDA 12 and cuDNN 8.9.5."; #endif if (!(GetCudaComputeCapability().major == se::CudaComputeCapability::AMPERE) && !(GetCudaComputeCapability().major == se::CudaComputeCapability::HOPPER)) { GTEST_SKIP() << "Layer norm kernels require Ampere or Hopper architectures."; } } void TestNorm(std::string hlo_text, std::string optimized_hlo) { EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3})); MatchOptimizedHlo(hlo_text, optimized_hlo); } }; TEST_F(CudnnNormRewriterTest, LayerNorm2D1) { const char* hlo_text = R"( HloModule test apply { a = f32[] parameter(0) b = f32[] parameter(1) ROOT c = f32[] add(a,b) } ENTRY test { input = f32[2,4] parameter(0) input_square = f32[2,4] multiply(input, input) c0 = f32[] constant(0) input_square_sum = f32[2] reduce(input_square, c0), dimensions={1}, to_apply=apply r_nelems = f32[] constant(0.25) r_nelems_bcast = f32[2] broadcast(r_nelems), dimensions={} input_square_mean = f32[2] multiply(input_square_sum, r_nelems_bcast) input_sum = f32[2] reduce(input, c0),dimensions={1}, to_apply=apply input_mean = f32[2] multiply(input_sum, r_nelems_bcast) input_mean_square = f32[2] multiply(input_mean, input_mean) variance = f32[2] subtract(input_square_mean, input_mean_square) epsilon = f32[] constant(0.001) epsilon_bcast = f32[2] broadcast(epsilon), dimensions={} variance_plus_epsilon = f32[2] add(variance, epsilon_bcast) norm_factor = f32[2] rsqrt(variance_plus_epsilon) norm_factor_bcast = f32[2,4] broadcast(norm_factor), dimensions={0} input_mean_bcast = f32[2,4] broadcast(input_mean), dimensions={0} input_center = f32[2,4] subtract(input, input_mean_bcast) norm = f32[2,4] multiply(norm_factor_bcast, input_center) scale = f32[4] parameter(1) scale_bcast = f32[2,4] broadcast(scale), dimensions={1} norm_scale = f32[2,4] multiply(norm, scale_bcast) bias = f32[4] parameter(2) bias_broadcast = f32[2,4] broadcast(bias), dimensions={1} ROOT out = f32[2,4] add(norm_scale, bias_broadcast) })"; const char* optimized_hlo = R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4], {{.*}}: f32[4], {{.*}}: f32[4]) -> f32[2,4] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4]{1,0} parameter(0) ; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[2,4,1,1]{3,2,1,0} bitcast([[P0]]) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[4]{0} parameter(1) ; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P1]]) ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4]{0} parameter(2) ; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P2]]) ; CHECK-NEXT: [[CC:%[^ ]+]] = (f32[2,4,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]), ; CHECK: custom_call_target="__cudnn$norm", ; CHECK: backend_config={ ; CHECK-DAG: "epsilon":0.001 ; CHECK: } ; CHECK-NEXT: [[GTE:%[^ ]+]] = f32[2,4,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=0 ; CHECK-NEXT: ROOT [[GTE_BITCAST:%[^ ]+]] = f32[2,4]{1,0} bitcast([[GTE]]) )"; TestNorm(hlo_text, optimized_hlo); } TEST_F(CudnnNormRewriterTest, LayerNorm4D3) { const char* hlo_text = R"( HloModule test apply { a = f32[] parameter(0) b = f32[] parameter(1) ROOT c = f32[] add(a,b) } ENTRY test { input = f32[2,4,6,8] parameter(0) input_square = f32[2,4,6,8] multiply(input, input) c0 = f32[] constant(0) input_square_sum = f32[2,4,6] reduce(input_square, c0), dimensions={3}, to_apply=apply r_nelems = f32[] constant(0.125) r_nelems_bcast = f32[2,4,6] broadcast(r_nelems), dimensions={} input_square_mean = f32[2,4,6] multiply(input_square_sum, r_nelems_bcast) input_sum = f32[2,4,6] reduce(input, c0), dimensions={3}, to_apply=apply input_mean = f32[2,4,6] multiply(input_sum, r_nelems_bcast) input_mean_square = f32[2,4,6] multiply(input_mean, input_mean) variance = f32[2,4,6] subtract(input_square_mean, input_mean_square) epsilon = f32[] constant(0.001) epsilon_bcast = f32[2,4,6] broadcast(epsilon), dimensions={} variance_plus_epsilon = f32[2,4,6] add(variance, epsilon_bcast) norm_factor = f32[2,4,6] rsqrt(variance_plus_epsilon) norm_factor_bcast = f32[2,4,6,8] broadcast(norm_factor), dimensions={0,1,2} input_mean_bcast = f32[2,4,6,8] broadcast(input_mean), dimensions={0,1,2} input_center = f32[2,4,6,8] subtract(input, input_mean_bcast) norm = f32[2,4,6,8] multiply(norm_factor_bcast, input_center) scale = f32[8] parameter(1) scale_bcast = f32[2,4,6,8] broadcast(scale), dimensions={3} norm_scale = f32[2,4,6,8] multiply(norm, scale_bcast) bias = f32[8] parameter(2) bias_bcast = f32[2,4,6,8] broadcast(bias), dimensions={3} ROOT out = f32[2,4,6,8] add(norm_scale, bias_bcast) })"; const char* optimized_hlo = R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4,6,8], {{.*}}: f32[8], {{.*}}: f32[8]) -> f32[2,4,6,8] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} parameter(0) ; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[48,8,1,1]{3,2,1,0} bitcast([[P0]]) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[8]{0} parameter(1) ; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,8,1,1]{3,2,1,0} bitcast([[P1]]) ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[8]{0} parameter(2) ; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,8,1,1]{3,2,1,0} bitcast([[P2]]) ; CHECK-NEXT: [[CC:%[^ ]+]] = (f32[48,8,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]), ; CHECK: custom_call_target="__cudnn$norm", ; CHECK: backend_config={ ; CHECK-DAG: "epsilon":0.001 ; CHECK: } ; CHECK-NEXT: [[GTE:%[^ ]+]] = f32[48,8,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=0 ; CHECK-NEXT: ROOT [[GTE_BITCAST:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} bitcast([[GTE]]) )"; TestNorm(hlo_text, optimized_hlo); } TEST_F(CudnnNormRewriterTest, LayerNorm4D3Degenerate0) { const char* hlo_text = R"( HloModule test apply { a = f32[] parameter(0) b = f32[] parameter(1) ROOT c = f32[] add(a,b) } ENTRY test { input = f32[1,4,6,8] parameter(0) input_square = f32[1,4,6,8] multiply(input, input) c0 = f32[] constant(0) input_square_sum = f32[1,4,6] reduce(input_square, c0), dimensions={3}, to_apply=apply r_nelems = f32[] constant(0.125) r_nelems_bcast = f32[1,4,6] broadcast(r_nelems), dimensions={} input_square_mean = f32[1,4,6] multiply(input_square_sum, r_nelems_bcast) input_sum = f32[1,4,6] reduce(input, c0), dimensions={3}, to_apply=apply input_mean = f32[1,4,6] multiply(input_sum, r_nelems_bcast) input_mean_square = f32[1,4,6] multiply(input_mean, input_mean) variance = f32[1,4,6] subtract(input_square_mean, input_mean_square) epsilon = f32[] constant(0.001) epsilon_bcast = f32[1,4,6] broadcast(epsilon), dimensions={} variance_plus_epsilon = f32[1,4,6] add(variance, epsilon_bcast) norm_factor = f32[1,4,6] rsqrt(variance_plus_epsilon) norm_factor_bcast = f32[1,4,6,8] broadcast(norm_factor), dimensions={0,1,2} input_mean_bcast = f32[1,4,6,8] broadcast(input_mean), dimensions={0,1,2} input_center = f32[1,4,6,8] subtract(input, input_mean_bcast) norm = f32[1,4,6,8] multiply(norm_factor_bcast, input_center) scale = f32[8] parameter(1) scale_bcast = f32[1,4,6,8] broadcast(scale), dimensions={3} norm_scale = f32[1,4,6,8] multiply(norm, scale_bcast) bias = f32[8] parameter(2) bias_bcast = f32[1,4,6,8] broadcast(bias), dimensions={3} ROOT out = f32[1,4,6,8] add(norm_scale, bias_bcast) })"; const char* optimized_hlo = R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[1,4,6,8], {{.*}}: f32[8], {{.*}}: f32[8]) -> f32[1,4,6,8] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[1,4,6,8]{3,2,1,0} parameter(0) ; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[24,8,1,1]{3,2,1,0} bitcast([[P0]]) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[8]{0} parameter(1) ; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,8,1,1]{3,2,1,0} bitcast([[P1]]) ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[8]{0} parameter(2) ; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,8,1,1]{3,2,1,0} bitcast([[P2]]) ; CHECK-NEXT: [[CC:%[^ ]+]] = (f32[24,8,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]), ; CHECK: custom_call_target="__cudnn$norm", ; CHECK: backend_config={ ; CHECK-DAG: "epsilon":0.001 ; CHECK: } ; CHECK-NEXT: [[GTE:%[^ ]+]] = f32[24,8,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=0 ; CHECK-NEXT: ROOT [[GTE_BITCAST:%[^ ]+]] = f32[1,4,6,8]{3,2,1,0} bitcast([[GTE]]) )"; TestNorm(hlo_text, optimized_hlo); } TEST_F(CudnnNormRewriterTest, LayerNorm4D2) { const char* hlo_text = R"( HloModule test apply { a = f32[] parameter(0) b = f32[] parameter(1) ROOT c = f32[] add(a,b) } ENTRY test { input = f32[2,4,6,8] parameter(0) input_square = f32[2,4,6,8] multiply(input, input) c0 = f32[] constant(0) input_square_sum = f32[2,4,8] reduce(input_square, c0), dimensions={2}, to_apply=apply r_nelems = f32[] constant(0.166667) r_nelems_bcast = f32[2,4,8] broadcast(r_nelems), dimensions={} input_square_mean = f32[2,4,8] multiply(input_square_sum, r_nelems_bcast) reduce = f32[2,4,8] reduce(input, c0), dimensions={2}, to_apply=apply input_mean = f32[2,4,8] multiply(reduce, r_nelems_bcast) input_mean_square = f32[2,4,8] multiply(input_mean, input_mean) variance = f32[2,4,8] subtract(input_square_mean, input_mean_square) epsilon = f32[] constant(0.001) epsilon_bcast = f32[2,4,8] broadcast(epsilon), dimensions={} variance_plus_epsilon = f32[2,4,8] add(variance, epsilon_bcast) norm_factor = f32[2,4,8] rsqrt(variance_plus_epsilon) norm_factor_bcast = f32[2,4,6,8] broadcast(norm_factor), dimensions={0,1,3} input_mean_bcast = f32[2,4,6,8] broadcast(input_mean), dimensions={0,1,3} input_center = f32[2,4,6,8] subtract(input, input_mean_bcast) norm = f32[2,4,6,8] multiply(norm_factor_bcast, input_center) scale = f32[6] parameter(1) scale_bcast = f32[2,4,6,8] broadcast(scale), dimensions={2} norm_scale = f32[2,4,6,8] multiply(norm, scale_bcast) bias = f32[6] parameter(2) bias_broadcast = f32[2,4,6,8] broadcast(bias), dimensions={2} ROOT out = f32[2,4,6,8] add(norm_scale, bias_broadcast) })"; const char* optimized_hlo = R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4,6,8], {{.*}}: f32[6], {{.*}}: f32[6]) -> f32[2,4,6,8] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} parameter(0) ; CHECK-NEXT: [[TRANSPOSE:%[^ ]+]] = f32[8,8,6]{2,1,0} fusion([[P0]]), kind=kLoop, calls={{.*}} ; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[64,6,1,1]{3,2,1,0} bitcast([[TRANSPOSE]]) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[6]{0} parameter(1) ; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,6,1,1]{3,2,1,0} bitcast([[P1]]) ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[6]{0} parameter(2) ; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,6,1,1]{3,2,1,0} bitcast([[P2]]) ; CHECK-NEXT: [[CC:%[^ ]+]] = (f32[64,6,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]), ; CHECK: custom_call_target="__cudnn$norm", ; CHECK: backend_config={ ; CHECK-DAG: "epsilon":0.001 ; CHECK: } ; CHECK-NEXT: [[GTE:%[^ ]+]] = f32[64,6,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=0 ; CHECK-NEXT: [[FUSION:%[^ ]+]] = f32[8,6,8]{2,1,0} fusion([[GTE]]), kind=kLoop, calls=[[FUSED_COMPUTATION:%[^ ]+]] ; CHECK-NEXT: ROOT {{.*}} = f32[2,4,6,8]{3,2,1,0} bitcast([[FUSION]]) )"; TestNorm(hlo_text, optimized_hlo); } TEST_F(CudnnNormRewriterTest, LayerNorm4D2Degenerate1) { const char* hlo_text = R"( HloModule test apply { a = f32[] parameter(0) b = f32[] parameter(1) ROOT c = f32[] add(a,b) } ENTRY test { input = f32[2,1,6,8] parameter(0) input_square = f32[2,1,6,8] multiply(input, input) c0 = f32[] constant(0) input_square_sum = f32[2,1,8] reduce(input_square, c0), dimensions={2}, to_apply=apply r_nelems = f32[] constant(0.166667) r_nelems_bcast = f32[2,1,8] broadcast(r_nelems), dimensions={} input_square_mean = f32[2,1,8] multiply(input_square_sum, r_nelems_bcast) reduce = f32[2,1,8] reduce(input, c0), dimensions={2}, to_apply=apply input_mean = f32[2,1,8] multiply(reduce, r_nelems_bcast) input_mean_square = f32[2,1,8] multiply(input_mean, input_mean) variance = f32[2,1,8] subtract(input_square_mean, input_mean_square) epsilon = f32[] constant(0.001) epsilon_bcast = f32[2,1,8] broadcast(epsilon), dimensions={} variance_plus_epsilon = f32[2,1,8] add(variance, epsilon_bcast) norm_factor = f32[2,1,8] rsqrt(variance_plus_epsilon) norm_factor_bcast = f32[2,1,6,8] broadcast(norm_factor), dimensions={0,1,3} input_mean_bcast = f32[2,1,6,8] broadcast(input_mean), dimensions={0,1,3} input_center = f32[2,1,6,8] subtract(input, input_mean_bcast) norm = f32[2,1,6,8] multiply(norm_factor_bcast, input_center) scale = f32[6] parameter(1) scale_bcast = f32[2,1,6,8] broadcast(scale), dimensions={2} norm_scale = f32[2,1,6,8] multiply(norm, scale_bcast) bias = f32[6] parameter(2) bias_broadcast = f32[2,1,6,8] broadcast(bias), dimensions={2} ROOT out = f32[2,1,6,8] add(norm_scale, bias_broadcast) })"; const char* optimized_hlo = R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,1,6,8], {{.*}}: f32[6], {{.*}}: f32[6]) -> f32[2,1,6,8] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,1,6,8]{3,2,1,0} parameter(0) ; CHECK-NEXT: [[TRANSPOSE:%[^ ]+]] = f32[2,8,6]{2,1,0} fusion([[P0]]), kind=kLoop, calls={{.*}} ; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[16,6,1,1]{3,2,1,0} bitcast([[TRANSPOSE]]) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[6]{0} parameter(1) ; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,6,1,1]{3,2,1,0} bitcast([[P1]]) ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[6]{0} parameter(2) ; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,6,1,1]{3,2,1,0} bitcast([[P2]]) ; CHECK-NEXT: [[CC:%[^ ]+]] = (f32[16,6,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]), ; CHECK: custom_call_target="__cudnn$norm", ; CHECK: backend_config={ ; CHECK-DAG: "epsilon":0.001 ; CHECK: } ; CHECK-NEXT: [[GTE:%[^ ]+]] = f32[16,6,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=0 ; CHECK-NEXT: [[FUSION:%[^ ]+]] = f32[2,6,8]{2,1,0} fusion([[GTE]]), kind=kLoop, calls=[[FUSED_COMPUTATION:%[^ ]+]] ; CHECK-NEXT: ROOT {{.*}} = f32[2,1,6,8]{3,2,1,0} bitcast([[FUSION]]) )"; TestNorm(hlo_text, optimized_hlo); } TEST_F(CudnnNormRewriterTest, LayerNorm4D12) { const char* hlo_text = R"( HloModule test apply { a = f32[] parameter(0) b = f32[] parameter(1) ROOT c = f32[] add(a,b) } ENTRY test { input = f32[2,4,6,8] parameter(0) input_square = f32[2,4,6,8] multiply(input, input) c0 = f32[] constant(0) input_square_sum = f32[2,8] reduce(input_square, c0), dimensions={1,2}, to_apply=apply r_nelems = f32[] constant(0.041667) r_nelems_bcast = f32[2,8] broadcast(r_nelems), dimensions={} input_square_mean = f32[2,8] multiply(input_square_sum, r_nelems_bcast) reduce = f32[2,8] reduce(input, c0), dimensions={1,2}, to_apply=apply input_mean = f32[2,8] multiply(reduce, r_nelems_bcast) input_mean_square = f32[2,8] multiply(input_mean, input_mean) variance = f32[2,8] subtract(input_square_mean, input_mean_square) epsilon = f32[] constant(0.001) epsilon_bcast = f32[2,8] broadcast(epsilon), dimensions={} variance_plus_epsilon = f32[2,8] add(variance, epsilon_bcast) norm_factor = f32[2,8] rsqrt(variance_plus_epsilon) norm_factor_bcast = f32[2,4,6,8] broadcast(norm_factor), dimensions={0,3} input_mean_bcast = f32[2,4,6,8] broadcast(input_mean), dimensions={0,3} input_center = f32[2,4,6,8] subtract(input, input_mean_bcast) norm = f32[2,4,6,8] multiply(norm_factor_bcast, input_center) scale = f32[4,6] parameter(1) scale_bcast = f32[2,4,6,8] broadcast(scale), dimensions={1,2} norm_scale = f32[2,4,6,8] multiply(norm, scale_bcast) bias = f32[4,6] parameter(2) bias_broadcast = f32[2,4,6,8] broadcast(bias), dimensions={1,2} ROOT out = f32[2,4,6,8] add(norm_scale, bias_broadcast) })"; const char* optimized_hlo = R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4,6,8], {{.*}}: f32[4,6], {{.*}}: f32[4,6]) -> f32[2,4,6,8] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} parameter(0) ; CHECK-NEXT: [[TRANSPOSE:%[^ ]+]] = f32[2,8,24]{2,1,0} fusion([[P0]]), kind=kLoop, calls={{.*}} ; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[16,4,6,1]{3,2,1,0} bitcast([[TRANSPOSE]]) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[4,6]{1,0} parameter(1) ; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,4,6,1]{3,2,1,0} bitcast([[P1]]) ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4,6]{1,0} parameter(2) ; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,4,6,1]{3,2,1,0} bitcast([[P2]]) ; CHECK-NEXT: [[CC:%[^ ]+]] = (f32[16,4,6,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]), ; CHECK: custom_call_target="__cudnn$norm", ; CHECK: backend_config={ ; CHECK-DAG: "epsilon":0.001 ; CHECK: } ; CHECK-NEXT: [[GTE:%[^ ]+]] = f32[16,4,6,1]{3,2,1,0} get-tuple-element([[CC]]), index=0 ; CHECK-NEXT: [[FUSION:%[^ ]+]] = f32[2,24,8]{2,1,0} fusion([[GTE]]), kind=kLoop, calls=[[FUSED_COMPUTATION:%[^ ]+]] ; CHECK-NEXT: ROOT {{.*}} = f32[2,4,6,8]{3,2,1,0} bitcast([[FUSION]]) )"; TestNorm(hlo_text, optimized_hlo); } TEST_F(CudnnNormRewriterTest, LayerNorm4D12Degenerate2) { const char* hlo_text = R"( HloModule test apply { a = f32[] parameter(0) b = f32[] parameter(1) ROOT c = f32[] add(a,b) } ENTRY test { input = f32[2,4,1,8] parameter(0) input_square = f32[2,4,1,8] multiply(input, input) c0 = f32[] constant(0) input_square_sum = f32[2,8] reduce(input_square, c0), dimensions={1,2}, to_apply=apply r_nelems = f32[] constant(0.25) r_nelems_bcast = f32[2,8] broadcast(r_nelems), dimensions={} input_square_mean = f32[2,8] multiply(input_square_sum, r_nelems_bcast) reduce = f32[2,8] reduce(input, c0), dimensions={1,2}, to_apply=apply input_mean = f32[2,8] multiply(reduce, r_nelems_bcast) input_mean_square = f32[2,8] multiply(input_mean, input_mean) variance = f32[2,8] subtract(input_square_mean, input_mean_square) epsilon = f32[] constant(0.001) epsilon_bcast = f32[2,8] broadcast(epsilon), dimensions={} variance_plus_epsilon = f32[2,8] add(variance, epsilon_bcast) norm_factor = f32[2,8] rsqrt(variance_plus_epsilon) norm_factor_bcast = f32[2,4,1,8] broadcast(norm_factor), dimensions={0,3} input_mean_bcast = f32[2,4,1,8] broadcast(input_mean), dimensions={0,3} input_center = f32[2,4,1,8] subtract(input, input_mean_bcast) norm = f32[2,4,1,8] multiply(norm_factor_bcast, input_center) scale = f32[4,1] parameter(1) scale_bcast = f32[2,4,1,8] broadcast(scale), dimensions={1,2} norm_scale = f32[2,4,1,8] multiply(norm, scale_bcast) bias = f32[4,1] parameter(2) bias_broadcast = f32[2,4,1,8] broadcast(bias), dimensions={1,2} ROOT out = f32[2,4,1,8] add(norm_scale, bias_broadcast) })"; const char* optimized_hlo = R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4,1,8], {{.*}}: f32[4,1], {{.*}}: f32[4,1]) -> f32[2,4,1,8] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4,1,8]{3,2,1,0} parameter(0) ; CHECK-NEXT: [[TRANSPOSE:%[^ ]+]] = f32[2,8,4]{2,1,0} fusion([[P0]]), kind=kLoop, calls={{.*}} ; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[16,4,1,1]{3,2,1,0} bitcast([[TRANSPOSE]]) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[4,1]{1,0} parameter(1) ; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P1]]) ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4,1]{1,0} parameter(2) ; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P2]]) ; CHECK-NEXT: [[CC:%[^ ]+]] = (f32[16,4,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]), ; CHECK: custom_call_target="__cudnn$norm", ; CHECK: backend_config={ ; CHECK-DAG: "epsilon":0.001 ; CHECK: } ; CHECK-NEXT: [[GTE:%[^ ]+]] = f32[16,4,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=0 ; CHECK-NEXT: [[FUSION:%[^ ]+]] = f32[2,4,8]{2,1,0} fusion([[GTE]]), kind=kLoop, calls=[[FUSED_COMPUTATION:%[^ ]+]] ; CHECK-NEXT: ROOT {{.*}} = f32[2,4,1,8]{3,2,1,0} bitcast([[FUSION]]) )"; TestNorm(hlo_text, optimized_hlo); } TEST_F(CudnnNormRewriterTest, LayerNorm4D3IncorrectScaleBroadcast) { const char* hlo_text = R"( HloModule test apply { a = f32[] parameter(0) b = f32[] parameter(1) ROOT c = f32[] add(a,b) } ENTRY test { input = f32[2,2,2,2] parameter(0) input_square = f32[2,2,2,2] multiply(input, input) c0 = f32[] constant(0) input_square_sum = f32[2,2,2] reduce(input_square, c0), dimensions={3}, to_apply=apply r_nelems = f32[] constant(0.5) r_nelems_bcast = f32[2,2,2] broadcast(r_nelems), dimensions={} input_square_mean = f32[2,2,2] multiply(input_square_sum, r_nelems_bcast) input_sum = f32[2,2,2] reduce(input, c0), dimensions={3}, to_apply=apply input_mean = f32[2,2,2] multiply(input_sum, r_nelems_bcast) input_mean_square = f32[2,2,2] multiply(input_mean, input_mean) variance = f32[2,2,2] subtract(input_square_mean, input_mean_square) epsilon = f32[] constant(0.001) epsilon_bcast = f32[2,2,2] broadcast(epsilon), dimensions={} variance_plus_epsilon = f32[2,2,2] add(variance, epsilon_bcast) norm_factor = f32[2,2,2] rsqrt(variance_plus_epsilon) norm_factor_bcast = f32[2,2,2,2] broadcast(norm_factor), dimensions={0,1,2} input_mean_bcast = f32[2,2,2,2] broadcast(input_mean), dimensions={0,1,2} input_center = f32[2,2,2,2] subtract(input, input_mean_bcast) norm = f32[2,2,2,2] multiply(norm_factor_bcast, input_center) scale = f32[2] parameter(1) scale_bcast = f32[2,2,2,2] broadcast(scale), dimensions={2} norm_scale = f32[2,2,2,2] multiply(norm, scale_bcast) bias = f32[2] parameter(2) bias_bcast = f32[2,2,2,2] broadcast(bias), dimensions={3} ROOT out = f32[2,2,2,2] add(norm_scale, bias_bcast) })"; const char* optimized_hlo = R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,2,2,2], {{.*}}: f32[2], {{.*}}: f32[2]) -> f32[2,2,2,2] { ; CHECK-NOT: custom_call_target="__cudnn$norm" )"; TestNorm(hlo_text, optimized_hlo); } TEST_F(CudnnNormRewriterTest, LayerNorm4D3InputOutputTypeMismatch) { const char* hlo_text = R"( HloModule test apply { a = f32[] parameter(0) b = f32[] parameter(1) ROOT c = f32[] add(a,b) } ENTRY test { input = f16[2,4,6,8] parameter(0) input_f32 = f32[2,4,6,8] convert(input) input_square = f32[2,4,6,8] multiply(input_f32, input_f32) c0 = f32[] constant(0) input_square_sum = f32[2,4,6] reduce(input_square, c0), dimensions={3}, to_apply=apply r_nelems = f32[] constant(0.125) r_nelems_bcast = f32[2,4,6] broadcast(r_nelems), dimensions={} input_square_mean = f32[2,4,6] multiply(input_square_sum, r_nelems_bcast) input_sum = f32[2,4,6] reduce(input_f32, c0), dimensions={3}, to_apply=apply input_mean = f32[2,4,6] multiply(input_sum, r_nelems_bcast) input_mean_square = f32[2,4,6] multiply(input_mean, input_mean) variance = f32[2,4,6] subtract(input_square_mean, input_mean_square) epsilon = f32[] constant(0.001) epsilon_bcast = f32[2,4,6] broadcast(epsilon), dimensions={} variance_plus_epsilon = f32[2,4,6] add(variance, epsilon_bcast) norm_factor = f32[2,4,6] rsqrt(variance_plus_epsilon) norm_factor_bcast = f32[2,4,6,8] broadcast(norm_factor), dimensions={0,1,2} input_mean_bcast = f32[2,4,6,8] broadcast(input_mean), dimensions={0,1,2} input_center = f32[2,4,6,8] subtract(input_f32, input_mean_bcast) norm = f32[2,4,6,8] multiply(norm_factor_bcast, input_center) scale = f32[8] parameter(1) scale_bcast = f32[2,4,6,8] broadcast(scale), dimensions={3} norm_scale = f32[2,4,6,8] multiply(norm, scale_bcast) bias = f32[8] parameter(2) bias_bcast = f32[2,4,6,8] broadcast(bias), dimensions={3} ROOT out = f32[2,4,6,8] add(norm_scale, bias_bcast) })"; const char* optimized_hlo = R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f16[2,4,6,8], {{.*}}: f32[8], {{.*}}: f32[8]) -> f32[2,4,6,8] { ; CHECK-NOT: custom_call_target="__cudnn$norm" )"; TestNorm(hlo_text, optimized_hlo); } TEST_F(CudnnNormRewriterTest, LayerNormTrain2D1) { const char* hlo_text = R"( HloModule test apply { a = f32[] parameter(0) b = f32[] parameter(1) ROOT c = f32[] add(a,b) } ENTRY test { input = f32[2,4] parameter(0) input_square = f32[2,4] multiply(input, input) c0 = f32[] constant(0) input_square_sum = f32[2] reduce(input_square, c0), dimensions={1}, to_apply=apply r_nelems = f32[] constant(0.25) r_nelems_bcast = f32[2] broadcast(r_nelems), dimensions={} input_square_mean = f32[2] multiply(input_square_sum,r_nelems_bcast) reduce = f32[2] reduce(input, c0), dimensions={1}, to_apply=apply input_mean = f32[2] multiply(reduce,r_nelems_bcast) input_mean_square = f32[2] multiply(input_mean,input_mean) variance = f32[2] subtract(input_square_mean,input_mean_square) epsilon = f32[] constant(0.001) epsilon_bcast = f32[2] broadcast(epsilon), dimensions={} variance_plus_epsilon = f32[2] add(variance, epsilon_bcast) norm_factor = f32[2] rsqrt(variance_plus_epsilon) norm_factor_bcast = f32[2,4] broadcast(norm_factor), dimensions={0} input_mean_bcast = f32[2,4] broadcast(input_mean), dimensions={0} input_center = f32[2,4] subtract(input,input_mean_bcast) norm = f32[2,4] multiply(norm_factor_bcast,input_center) scale = f32[4] parameter(1) scale_bcast = f32[2,4] broadcast(scale), dimensions={1} norm_scale = f32[2,4] multiply(norm,scale_bcast) bias = f32[4] parameter(2) bias_broadcast = f32[2,4] broadcast(bias), dimensions={1} norm_scale_bias = f32[2,4] add(norm_scale, bias_broadcast) norm_factor_cube = f32[2] divide(norm_factor, variance_plus_epsilon) ROOT out = (f32[2,4], f32[2], f32[2], f32[2]) tuple(norm_scale_bias, input_mean, norm_factor, norm_factor_cube) })"; const char* optimized_hlo = R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4], {{.*}}: f32[4], {{.*}}: f32[4]) -> (f32[2,4], f32[2], f32[2], f32[2]) { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4]{1,0} parameter(0) ; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[2,4,1,1]{3,2,1,0} bitcast([[P0]]) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[4]{0} parameter(1) ; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P1]]) ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4]{0} parameter(2) ; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P2]]) ; CHECK-NEXT: [[CC:%[^ ]+]] = (f32[2,4,1,1]{3,2,1,0}, f32[2,1,1,1]{3,2,1,0}, f32[2,1,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]), ; CHECK: custom_call_target="__cudnn$norm", ; CHECK: backend_config={ ; CHECK-DAG: "epsilon":0.001 ; CHECK: } ; CHECK-NEXT: [[GTE0:%[^ ]+]] = f32[2,4,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=0 ; CHECK-NEXT: [[GTE0_BITCAST:%[^ ]+]] = f32[2,4]{1,0} bitcast([[GTE0]]) ; CHECK-NEXT: [[GTE1:%[^ ]+]] = f32[2,1,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=1 ; CHECK-NEXT: [[GTE1_BITCAST:%[^ ]+]] = f32[2]{0} bitcast([[GTE1]]) ; CHECK-NEXT: [[GTE2:%[^ ]+]] = f32[2,1,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=2 ; CHECK-NEXT: [[GTE2_BITCAST:%[^ ]+]] = f32[2]{0} bitcast([[GTE2]]) ; CHECK-NEXT: [[FUSION:%[^ ]+]] = f32[2]{0} fusion([[GTE2]]), kind=kLoop, calls=[[FUSED_COMPUTATION:%[^ ]+]] ; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = (f32[2,4]{1,0}, f32[2]{0}, f32[2]{0}, f32[2]{0}) tuple([[GTE0_BITCAST]], [[GTE1_BITCAST]], [[GTE2_BITCAST]], [[FUSION]]) )"; TestNorm(hlo_text, optimized_hlo); } TEST_F(CudnnNormRewriterTest, LayerNormTrain4D3) { const char* hlo_text = R"( HloModule test apply { a = f32[] parameter(0) b = f32[] parameter(1) ROOT c = f32[] add(a,b) } ENTRY test { input = f32[2,4,6,8] parameter(0) input_square = f32[2,4,6,8] multiply(input, input) c0 = f32[] constant(0) input_square_sum = f32[2,4,6] reduce(input_square, c0), dimensions={3}, to_apply=apply r_nelems = f32[] constant(0.125) r_nelems_bcast = f32[2,4,6] broadcast(r_nelems), dimensions={} input_square_mean = f32[2,4,6] multiply(input_square_sum, r_nelems_bcast) reduce = f32[2,4,6] reduce(input, c0), dimensions={3}, to_apply=apply input_mean = f32[2,4,6] multiply(reduce, r_nelems_bcast) input_mean_square = f32[2,4,6] multiply(input_mean, input_mean) variance = f32[2,4,6] subtract(input_square_mean, input_mean_square) epsilon = f32[] constant(0.001) epsilon_bcast = f32[2,4,6] broadcast(epsilon), dimensions={} variance_plus_epsilon = f32[2,4,6] add(variance, epsilon_bcast) norm_factor = f32[2,4,6] rsqrt(variance_plus_epsilon) norm_factor_bcast = f32[2,4,6,8] broadcast(norm_factor), dimensions={0,1,2} input_mean_bcast = f32[2,4,6,8] broadcast(input_mean), dimensions={0,1,2} input_center = f32[2,4,6,8] subtract(input, input_mean_bcast) norm = f32[2,4,6,8] multiply(norm_factor_bcast, input_center) scale = f32[8] parameter(1) scale_bcast = f32[2,4,6,8] broadcast(scale), dimensions={3} norm_scale = f32[2,4,6,8] multiply(norm,scale_bcast) bias = f32[8] parameter(2) bias_broadcast = f32[2,4,6,8] broadcast(bias), dimensions={3} norm_scale_bias = f32[2,4,6,8] add(norm_scale, bias_broadcast) norm_factor_cube = f32[2,4,6] divide(norm_factor, variance_plus_epsilon) ROOT out = (f32[2,4,6,8], f32[2,4,6], f32[2,4,6], f32[2,4,6]) tuple(norm_scale_bias, input_mean, norm_factor, norm_factor_cube) })"; const char* optimized_hlo = R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4,6,8], {{.*}}: f32[8], {{.*}}: f32[8]) -> (f32[2,4,6,8], f32[2,4,6], f32[2,4,6], f32[2,4,6]) { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} parameter(0) ; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[48,8,1,1]{3,2,1,0} bitcast([[P0]]) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[8]{0} parameter(1) ; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,8,1,1]{3,2,1,0} bitcast([[P1]]) ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[8]{0} parameter(2) ; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,8,1,1]{3,2,1,0} bitcast([[P2]]) ; CHECK-NEXT: [[CC:%[^ ]+]] = (f32[48,8,1,1]{3,2,1,0}, f32[48,1,1,1]{3,2,1,0}, f32[48,1,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]), ; CHECK: custom_call_target="__cudnn$norm", ; CHECK: backend_config={ ; CHECK-DAG: "epsilon":0.001 ; CHECK: } ; CHECK-NEXT: [[GTE0:%[^ ]+]] = f32[48,8,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=0 ; CHECK-NEXT: [[GTE0_BITCAST:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} bitcast([[GTE0]]) ; CHECK-NEXT: [[GTE1:%[^ ]+]] = f32[48,1,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=1 ; CHECK-NEXT: [[GTE1_BITCAST:%[^ ]+]] = f32[2,4,6]{2,1,0} bitcast([[GTE1]]) ; CHECK-NEXT: [[GTE2:%[^ ]+]] = f32[48,1,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=2 ; CHECK-NEXT: [[GTE2_BITCAST:%[^ ]+]] = f32[2,4,6]{2,1,0} bitcast([[GTE2]]) ; CHECK-NEXT: [[FUSION:%[^ ]+]] = f32[2,4,6]{2,1,0} fusion([[GTE2]]), kind=kLoop, calls=[[FUSED_COMPUTATION:%[^ ]+]] ; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = (f32[2,4,6,8]{3,2,1,0}, f32[2,4,6]{2,1,0}, f32[2,4,6]{2,1,0}, f32[2,4,6]{2,1,0}) tuple([[GTE0_BITCAST]], [[GTE1_BITCAST]], [[GTE2_BITCAST]], [[FUSION]]) )"; TestNorm(hlo_text, optimized_hlo); } TEST_F(CudnnNormRewriterTest, LayerNormTrain4D12) { const char* hlo_text = R"( HloModule test apply { a = f32[] parameter(0) b = f32[] parameter(1) ROOT c = f32[] add(a,b) } ENTRY test { input = f32[2,4,6,8] parameter(0) input_square = f32[2,4,6,8] multiply(input, input) c0 = f32[] constant(0) input_square_sum = f32[2,8] reduce(input_square, c0), dimensions={1,2}, to_apply=apply r_nelems = f32[] constant(0.041667) r_nelems_bcast = f32[2,8] broadcast(r_nelems), dimensions={} input_square_mean = f32[2,8] multiply(input_square_sum, r_nelems_bcast) reduce = f32[2,8] reduce(input, c0), dimensions={1,2}, to_apply=apply input_mean = f32[2,8] multiply(reduce, r_nelems_bcast) input_mean_square = f32[2,8] multiply(input_mean, input_mean) variance = f32[2,8] subtract(input_square_mean, input_mean_square) epsilon = f32[] constant(0.001) epsilon_bcast = f32[2,8] broadcast(epsilon), dimensions={} variance_plus_epsilon = f32[2,8] add(variance, epsilon_bcast) norm_factor = f32[2,8] rsqrt(variance_plus_epsilon) norm_factor_bcast = f32[2,4,6,8] broadcast(norm_factor), dimensions={0,3} input_mean_bcast = f32[2,4,6,8] broadcast(input_mean), dimensions={0,3} input_center = f32[2,4,6,8] subtract(input, input_mean_bcast) norm = f32[2,4,6,8] multiply(norm_factor_bcast, input_center) scale = f32[4,6] parameter(1) scale_bcast = f32[2,4,6,8] broadcast(scale), dimensions={1,2} norm_scale = f32[2,4,6,8] multiply(norm, scale_bcast) bias = f32[4,6] parameter(2) bias_broadcast = f32[2,4,6,8] broadcast(bias), dimensions={1,2} norm_scale_bias = f32[2,4,6,8] add(norm_scale, bias_broadcast) norm_factor_cube = f32[2,8] divide(norm_factor, variance_plus_epsilon) ROOT out = (f32[2,4,6,8], f32[2,8], f32[2,8], f32[2,8]) tuple(norm_scale_bias, input_mean, norm_factor, norm_factor_cube) })"; const char* optimized_hlo = R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4,6,8], {{.*}}: f32[4,6], {{.*}}: f32[4,6]) -> (f32[2,4,6,8], f32[2,8], f32[2,8], f32[2,8]) { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} parameter(0) ; CHECK-NEXT: [[TRANSPOSE:%[^ ]+]] = f32[2,8,24]{2,1,0} fusion([[P0]]), kind=kLoop, calls={{.*}} ; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[16,4,6,1]{3,2,1,0} bitcast([[TRANSPOSE]]) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[4,6]{1,0} parameter(1) ; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,4,6,1]{3,2,1,0} bitcast([[P1]]) ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4,6]{1,0} parameter(2) ; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,4,6,1]{3,2,1,0} bitcast([[P2]]) ; CHECK-NEXT: [[CC:%[^ ]+]] = (f32[16,4,6,1]{3,2,1,0}, f32[16,1,1,1]{3,2,1,0}, f32[16,1,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]), ; CHECK: custom_call_target="__cudnn$norm", ; CHECK: backend_config={ ; CHECK-DAG: "epsilon":0.001 ; CHECK: } ; CHECK-NEXT: [[GTE0:%[^ ]+]] = f32[16,4,6,1]{3,2,1,0} get-tuple-element([[CC]]), index=0 ; CHECK-NEXT: [[FUSION0:%[^ ]+]] = f32[2,24,8]{2,1,0} fusion([[GTE0]]), kind=kLoop, calls=[[FUSED_COMPUTATION0:%[^ ]+]] ; CHECK-NEXT: [[BITCAST:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} bitcast([[FUSION0]]) ; CHECK-NEXT: [[GTE1:%[^ ]+]] = f32[16,1,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=1 ; CHECK-NEXT: [[GTE1_BITCAST:%[^ ]+]] = f32[2,8]{1,0} bitcast([[GTE1]]) ; CHECK-NEXT: [[GTE2:%[^ ]+]] = f32[16,1,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=2 ; CHECK-NEXT: [[GTE2_BITCAST:%[^ ]+]] = f32[2,8]{1,0} bitcast([[GTE2]]) ; CHECK-NEXT: [[FUSION1:%[^ ]+]] = f32[2,8]{1,0} fusion([[GTE2]]), kind=kLoop, calls=[[FUSED_COMPUTATION1:%[^ ]+]] ; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = (f32[2,4,6,8]{3,2,1,0}, f32[2,8]{1,0}, f32[2,8]{1,0}, f32[2,8]{1,0}) tuple([[BITCAST]], [[GTE1_BITCAST]], [[GTE2_BITCAST]], [[FUSION1]]) )"; TestNorm(hlo_text, optimized_hlo); } TEST_F(CudnnNormRewriterTest, LayerNormTrain4D12Degenerate2) { const char* hlo_text = R"( HloModule test apply { a = f32[] parameter(0) b = f32[] parameter(1) ROOT c = f32[] add(a,b) } ENTRY test { input = f32[2,4,1,8] parameter(0) input_square = f32[2,4,1,8] multiply(input, input) c0 = f32[] constant(0) input_square_sum = f32[2,8] reduce(input_square, c0), dimensions={1,2}, to_apply=apply r_nelems = f32[] constant(0.25) r_nelems_bcast = f32[2,8] broadcast(r_nelems), dimensions={} input_square_mean = f32[2,8] multiply(input_square_sum, r_nelems_bcast) reduce = f32[2,8] reduce(input, c0), dimensions={1,2}, to_apply=apply input_mean = f32[2,8] multiply(reduce, r_nelems_bcast) input_mean_square = f32[2,8] multiply(input_mean, input_mean) variance = f32[2,8] subtract(input_square_mean, input_mean_square) epsilon = f32[] constant(0.001) epsilon_bcast = f32[2,8] broadcast(epsilon), dimensions={} variance_plus_epsilon = f32[2,8] add(variance, epsilon_bcast) norm_factor = f32[2,8] rsqrt(variance_plus_epsilon) norm_factor_bcast = f32[2,4,1,8] broadcast(norm_factor), dimensions={0,3} input_mean_bcast = f32[2,4,1,8] broadcast(input_mean), dimensions={0,3} input_center = f32[2,4,1,8] subtract(input, input_mean_bcast) norm = f32[2,4,1,8] multiply(norm_factor_bcast, input_center) scale = f32[4,1] parameter(1) scale_bcast = f32[2,4,1,8] broadcast(scale), dimensions={1,2} norm_scale = f32[2,4,1,8] multiply(norm, scale_bcast) bias = f32[4,1] parameter(2) bias_broadcast = f32[2,4,1,8] broadcast(bias), dimensions={1,2} norm_scale_bias = f32[2,4,1,8] add(norm_scale, bias_broadcast) norm_factor_cube = f32[2,8] divide(norm_factor, variance_plus_epsilon) ROOT out = (f32[2,4,1,8], f32[2,8], f32[2,8], f32[2,8]) tuple(norm_scale_bias, input_mean, norm_factor, norm_factor_cube) })"; const char* optimized_hlo = R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4,1,8], {{.*}}: f32[4,1], {{.*}}: f32[4,1]) -> (f32[2,4,1,8], f32[2,8], f32[2,8], f32[2,8]) { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4,1,8]{3,2,1,0} parameter(0) ; CHECK-NEXT: [[TRANSPOSE:%[^ ]+]] = f32[2,8,4]{2,1,0} fusion([[P0]]), kind=kLoop, calls={{.*}} ; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[16,4,1,1]{3,2,1,0} bitcast([[TRANSPOSE]]) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[4,1]{1,0} parameter(1) ; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P1]]) ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4,1]{1,0} parameter(2) ; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P2]]) ; CHECK-NEXT: [[CC:%[^ ]+]] = (f32[16,4,1,1]{3,2,1,0}, f32[16,1,1,1]{3,2,1,0}, f32[16,1,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]), ; CHECK: custom_call_target="__cudnn$norm", ; CHECK: backend_config={ ; CHECK-DAG: "epsilon":0.001 ; CHECK: } ; CHECK-NEXT: [[GTE0:%[^ ]+]] = f32[16,4,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=0 ; CHECK-NEXT: [[FUSION0:%[^ ]+]] = f32[2,4,8]{2,1,0} fusion([[GTE0]]), kind=kLoop, calls=[[FUSED_COMPUTATION0:%[^ ]+]] ; CHECK-NEXT: [[BITCAST:%[^ ]+]] = f32[2,4,1,8]{3,2,1,0} bitcast([[FUSION0]]) ; CHECK-NEXT: [[GTE1:%[^ ]+]] = f32[16,1,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=1 ; CHECK-NEXT: [[GTE1_BITCAST:%[^ ]+]] = f32[2,8]{1,0} bitcast([[GTE1]]) ; CHECK-NEXT: [[GTE2:%[^ ]+]] = f32[16,1,1,1]{3,2,1,0} get-tuple-element([[CC]]), index=2 ; CHECK-NEXT: [[GTE2_BITCAST:%[^ ]+]] = f32[2,8]{1,0} bitcast([[GTE2]]) ; CHECK-NEXT: [[FUSION1:%[^ ]+]] = f32[2,8]{1,0} fusion([[GTE2]]), kind=kLoop, calls=[[FUSED_COMPUTATION1:%[^ ]+]] ; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = (f32[2,4,1,8]{3,2,1,0}, f32[2,8]{1,0}, f32[2,8]{1,0}, f32[2,8]{1,0}) tuple([[BITCAST]], [[GTE1_BITCAST]], [[GTE2_BITCAST]], [[FUSION1]]) )"; TestNorm(hlo_text, optimized_hlo); } TEST_F(CudnnNormRewriterTest, LayerNormTrainBackward2D1) { const char* hlo_text = R"( HloModule test apply { a = f32[] parameter(0) b = f32[] parameter(1) ROOT c = f32[] add(a,b) } ENTRY test { input = f32[2,4] parameter(0) input_square = f32[2,4] multiply(input, input) c0 = f32[] constant(0) input_square_sum = f32[2] reduce(input_square, c0), dimensions={1}, to_apply=apply reduce = f32[2] reduce(input, c0), dimensions={1}, to_apply=apply r_nelems = f32[] constant(0.25) r_nelems_bcast = f32[2] broadcast(r_nelems), dimensions={} input_square_mean = f32[2] multiply(input_square_sum,r_nelems_bcast) input_mean = f32[2] multiply(reduce, r_nelems_bcast) input_mean_square = f32[2] multiply(input_mean,input_mean) variance = f32[2] subtract(input_square_mean,input_mean_square) epsilon = f32[] constant(0.001) epsilon_bcast = f32[2] broadcast(epsilon), dimensions={} variance_plus_epsilon = f32[2] add(variance, epsilon_bcast) norm_factor = f32[2] rsqrt(variance_plus_epsilon) norm_factor_bcast = f32[2,4] broadcast(norm_factor), dimensions={0} input_mean_bcast = f32[2,4] broadcast(input_mean), dimensions={0} input_center = f32[2,4] subtract(input, input_mean_bcast) norm = f32[2,4] multiply(input_center, norm_factor_bcast) scale = f32[4] parameter(1) scale_bcast = f32[2,4] broadcast(scale), dimensions={1} norm_scale = f32[2,4] multiply(norm, scale_bcast) bias = f32[4] parameter(2) bias_bcast = f32[2,4] broadcast(bias), dimensions={1} norm_scale_bias = f32[2,4] add(norm_scale, bias_bcast) doutput = f32[2,4] parameter(3) dbias = f32[4] reduce(doutput, c0), dimensions={0}, to_apply=apply norm_doutput = f32[2,4] multiply(norm, doutput) dscale = f32[4] reduce(norm_doutput, c0), dimensions={0}, to_apply=apply scale_doutput = f32[2,4] multiply(scale_bcast, doutput) input_center_scale_doutput = f32[2,4] multiply(input_center, scale_doutput) f0 = f32[2] reduce(input_center_scale_doutput, c0), dimensions={1}, to_apply=apply norm_factor_cube = f32[2] divide(norm_factor, variance_plus_epsilon) c1 = f32[] constant(-0.5) c1_bcast = f32[2] broadcast(c1), dimensions={} dnorm_factor = f32[2] multiply(norm_factor_cube, c1_bcast) f0_dnorm_factor = f32[2] multiply(f0, dnorm_factor) c2 = f32[] constant(0.5) c2_bcast = f32[2] broadcast(c2), dimensions={} f0_dnorm_factor_scaled = f32[2] multiply(f0_dnorm_factor, c2_bcast) f0_dnorm_factor_scaled_bcast = f32[2,4] broadcast(f0_dnorm_factor_scaled), dimensions={0} f1 = f32[2,4] multiply(input_center, f0_dnorm_factor_scaled_bcast) minus_f1 = f32[2,4] negate(f1) minus_f1_sum = f32[2] reduce(minus_f1, c0), dimensions={1}, to_apply=apply f2 = f32[2,4] multiply(norm_factor_bcast, scale_doutput) minus_f2 = f32[2,4] negate(f2) minus_f2_sum = f32[2] reduce(minus_f2, c0), dimensions={1}, to_apply=apply minus_f1_f2_sum = f32[2] add(minus_f1_sum, minus_f2_sum) minus_f1_f2_sum_scaled = f32[2] multiply(minus_f1_f2_sum, r_nelems_bcast) minus_f1_f2_sum_scaled_bcast = f32[2,4] broadcast(minus_f1_f2_sum_scaled), dimensions={0} f1_f2 = f32[2,4] add(f1, f2) dinput = f32[2,4] add(f1_f2, minus_f1_f2_sum_scaled_bcast) ROOT out = (f32[2,4], f32[2,4], f32[4], f32[4]) tuple(norm_scale_bias, dinput, dscale, dbias) })"; const char* optimized_hlo = R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4], {{.*}}: f32[4], {{.*}}: f32[4], {{.*}}: f32[2,4]) -> (f32[2,4], f32[2,4], f32[4], f32[4]) { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4]{1,0} parameter(0) ; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[2,4,1,1]{3,2,1,0} bitcast([[P0]]) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[4]{0} parameter(1) ; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P1]]) ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4]{0} parameter(2) ; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P2]]) ; CHECK-NEXT: [[CC0:%[^ ]+]] = (f32[2,4,1,1]{3,2,1,0}, f32[2,1,1,1]{3,2,1,0}, f32[2,1,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]), ; CHECK: custom_call_target="__cudnn$norm", ; CHECK: backend_config={ ; CHECK-DAG: "epsilon":0.001 ; CHECK-DAG: "kind":"LAYER_FWD_TRAIN" ; CHECK: } ; CHECK-DAG: [[GTE0:%[^ ]+]] = f32[2,4,1,1]{3,2,1,0} get-tuple-element([[CC0]]), index=0 ; CHECK-DAG: [[GTE0_BITCAST:%[^ ]+]] = f32[2,4]{1,0} bitcast([[GTE0]]) ; CHECK-DAG: [[P3:%[^ ]+]] = f32[2,4]{1,0} parameter(3) ; CHECK-DAG: [[P3_BITCAST:%[^ ]+]] = f32[2,4,1,1]{3,2,1,0} bitcast([[P3]]) ; CHECK-DAG: [[GTE1:%[^ ]+]] = f32[2,1,1,1]{3,2,1,0} get-tuple-element([[CC0]]), index=1 ; CHECK-DAG: [[GTE2:%[^ ]+]] = f32[2,1,1,1]{3,2,1,0} get-tuple-element([[CC0]]), index=2 ; CHECK-NEXT: [[CC1:%[^ ]+]] = (f32[2,4,1,1]{3,2,1,0}, f32[1,4,1,1]{3,2,1,0}, f32[1,4,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P3_BITCAST]], [[GTE1]], [[GTE2]]), ; CHECK: custom_call_target="__cudnn$norm", ; CHECK: backend_config={ ; CHECK-DAG: "epsilon":0 ; CHECK-DAG: "kind":"LAYER_BWD" ; CHECK: } ; CHECK-DAG: [[GTE3:%[^ ]+]] = f32[2,4,1,1]{3,2,1,0} get-tuple-element([[CC1]]), index=0 ; CHECK-DAG: [[GTE3_BITCAST:%[^ ]+]] = f32[2,4]{1,0} bitcast([[GTE3]]) ; CHECK-DAG: [[GTE4:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} get-tuple-element([[CC1]]), index=1 ; CHECK-DAG: [[GTE4_BITCAST:%[^ ]+]] = f32[4]{0} bitcast([[GTE4]]) ; CHECK-DAG: [[GTE5:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} get-tuple-element([[CC1]]), index=2 ; CHECK-DAG: [[GTE5_BITCAST:%[^ ]+]] = f32[4]{0} bitcast([[GTE5]]) ; CHECK-DAG: ROOT [[OUT:%[^ ]+]] = (f32[2,4]{1,0}, f32[2,4]{1,0}, f32[4]{0}, f32[4]{0}) tuple([[GTE0_BITCAST]], [[GTE3_BITCAST]], [[GTE4_BITCAST]], [[GTE5_BITCAST]]) )"; TestNorm(hlo_text, optimized_hlo); } TEST_F(CudnnNormRewriterTest, LayerNormTrainBackward4D3) { const char* hlo_text = R"( HloModule test apply { a = f32[] parameter(0) b = f32[] parameter(1) ROOT c = f32[] add(a,b) } ENTRY test { input = f32[2,4,6,8] parameter(0) input_square = f32[2,4,6,8] multiply(input, input) c0 = f32[] constant(0) input_square_sum = f32[2,4,6] reduce(input_square, c0), dimensions={3}, to_apply=apply reduce = f32[2,4,6] reduce(input, c0), dimensions={3}, to_apply=apply r_nelems = f32[] constant(0.125) r_nelems_bcast = f32[2,4,6] broadcast(r_nelems), dimensions={} input_square_mean = f32[2,4,6] multiply(input_square_sum,r_nelems_bcast) input_mean = f32[2,4,6] multiply(reduce, r_nelems_bcast) input_mean_square = f32[2,4,6] multiply(input_mean,input_mean) variance = f32[2,4,6] subtract(input_square_mean,input_mean_square) epsilon = f32[] constant(0.001) epsilon_bcast = f32[2,4,6] broadcast(epsilon), dimensions={} variance_plus_epsilon = f32[2,4,6] add(variance, epsilon_bcast) norm_factor = f32[2,4,6] rsqrt(variance_plus_epsilon) norm_factor_bcast = f32[2,4,6,8] broadcast(norm_factor), dimensions={0,1,2} input_mean_bcast = f32[2,4,6,8] broadcast(input_mean), dimensions={0,1,2} input_center = f32[2,4,6,8] subtract(input, input_mean_bcast) norm = f32[2,4,6,8] multiply(input_center, norm_factor_bcast) scale = f32[8] parameter(1) scale_bcast = f32[2,4,6,8] broadcast(scale), dimensions={3} norm_scale = f32[2,4,6,8] multiply(norm, scale_bcast) bias = f32[8] parameter(2) bias_bcast = f32[2,4,6,8] broadcast(bias), dimensions={3} norm_scale_bias = f32[2,4,6,8] add(norm_scale, bias_bcast) doutput = f32[2,4,6,8] parameter(3) dbias = f32[8] reduce(doutput, c0), dimensions={0,1,2}, to_apply=apply norm_doutput = f32[2,4,6,8] multiply(norm, doutput) dscale = f32[8] reduce(norm_doutput, c0), dimensions={0,1,2}, to_apply=apply scale_doutput = f32[2,4,6,8] multiply(scale_bcast, doutput) input_center_scale_doutput = f32[2,4,6,8] multiply(input_center, scale_doutput) f0 = f32[2,4,6] reduce(input_center_scale_doutput, c0), dimensions={3}, to_apply=apply norm_factor_cube = f32[2,4,6] divide(norm_factor, variance_plus_epsilon) c1 = f32[] constant(-0.5) c1_bcast = f32[2,4,6] broadcast(c1), dimensions={} dnorm_factor = f32[2,4,6] multiply(norm_factor_cube, c1_bcast) f0_dnorm_factor = f32[2,4,6] multiply(f0, dnorm_factor) c2 = f32[] constant(0.25) c2_bcast = f32[2,4,6] broadcast(c2), dimensions={} f0_dnorm_factor_scaled = f32[2,4,6] multiply(f0_dnorm_factor, c2_bcast) f0_dnorm_factor_scaled_bcast = f32[2,4,6,8] broadcast(f0_dnorm_factor_scaled), dimensions={0,1,2} f1 = f32[2,4,6,8] multiply(input_center, f0_dnorm_factor_scaled_bcast) minus_f1 = f32[2,4,6,8] negate(f1) minus_f1_sum = f32[2,4,6] reduce(minus_f1, c0), dimensions={3}, to_apply=apply f2 = f32[2,4,6,8] multiply(norm_factor_bcast, scale_doutput) minus_f2 = f32[2,4,6,8] negate(f2) minus_f2_sum = f32[2,4,6] reduce(minus_f2, c0), dimensions={3}, to_apply=apply minus_f1_f2_sum = f32[2,4,6] add(minus_f1_sum, minus_f2_sum) minus_f1_f2_sum_scaled = f32[2,4,6] multiply(minus_f1_f2_sum, r_nelems_bcast) minus_f1_f2_sum_scaled_bcast = f32[2,4,6,8] broadcast(minus_f1_f2_sum_scaled), dimensions={0,1,2} f1_f2 = f32[2,4,6,8] add(f1, f2) dinput = f32[2,4,6,8] add(f1_f2, minus_f1_f2_sum_scaled_bcast) ROOT out = (f32[2,4,6,8], f32[2,4,6,8], f32[8], f32[8]) tuple(norm_scale_bias, dinput, dscale, dbias) })"; const char* optimized_hlo = R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4,6,8], {{.*}}: f32[8], {{.*}}: f32[8], {{.*}}: f32[2,4,6,8]) -> (f32[2,4,6,8], f32[2,4,6,8], f32[8], f32[8]) { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} parameter(0) ; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[48,8,1,1]{3,2,1,0} bitcast([[P0]]) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[8]{0} parameter(1) ; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,8,1,1]{3,2,1,0} bitcast([[P1]]) ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[8]{0} parameter(2) ; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,8,1,1]{3,2,1,0} bitcast([[P2]]) ; CHECK-NEXT: [[CC0:%[^ ]+]] = (f32[48,8,1,1]{3,2,1,0}, f32[48,1,1,1]{3,2,1,0}, f32[48,1,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]), ; CHECK: custom_call_target="__cudnn$norm", ; CHECK: backend_config={ ; CHECK-DAG: "epsilon":0.001 ; CHECK-DAG: "kind":"LAYER_FWD_TRAIN" ; CHECK: } ; CHECK-DAG: [[GTE0:%[^ ]+]] = f32[48,8,1,1]{3,2,1,0} get-tuple-element([[CC0]]), index=0 ; CHECK-DAG: [[GTE0_BITCAST:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} bitcast([[GTE0]]) ; CHECK-DAG: [[P3:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} parameter(3) ; CHECK-DAG: [[P3_BITCAST:%[^ ]+]] = f32[48,8,1,1]{3,2,1,0} bitcast([[P3]]) ; CHECK-DAG: [[GTE1:%[^ ]+]] = f32[48,1,1,1]{3,2,1,0} get-tuple-element([[CC0]]), index=1 ; CHECK-DAG: [[GTE2:%[^ ]+]] = f32[48,1,1,1]{3,2,1,0} get-tuple-element([[CC0]]), index=2 ; CHECK-NEXT: [[CC1:%[^ ]+]] = (f32[48,8,1,1]{3,2,1,0}, f32[1,8,1,1]{3,2,1,0}, f32[1,8,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P3_BITCAST]], [[GTE1]], [[GTE2]]), ; CHECK: custom_call_target="__cudnn$norm", ; CHECK: backend_config={ ; CHECK-DAG: "epsilon":0 ; CHECK-DAG: "kind":"LAYER_BWD" ; CHECK: } ; CHECK-DAG: [[GTE3:%[^ ]+]] = f32[48,8,1,1]{3,2,1,0} get-tuple-element([[CC1]]), index=0 ; CHECK-DAG: [[GTE3_BITCAST:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} bitcast([[GTE3]]) ; CHECK-DAG: [[GTE4:%[^ ]+]] = f32[1,8,1,1]{3,2,1,0} get-tuple-element([[CC1]]), index=1 ; CHECK-DAG: [[GTE4_BITCAST:%[^ ]+]] = f32[8]{0} bitcast([[GTE4]]) ; CHECK-DAG: [[GTE5:%[^ ]+]] = f32[1,8,1,1]{3,2,1,0} get-tuple-element([[CC1]]), index=2 ; CHECK-DAG: [[GTE5_BITCAST:%[^ ]+]] = f32[8]{0} bitcast([[GTE5]]) ; CHECK-DAG: ROOT [[OUT:%[^ ]+]] = (f32[2,4,6,8]{3,2,1,0}, f32[2,4,6,8]{3,2,1,0}, f32[8]{0}, f32[8]{0}) tuple([[GTE0_BITCAST]], [[GTE3_BITCAST]], [[GTE4_BITCAST]], [[GTE5_BITCAST]]) )"; TestNorm(hlo_text, optimized_hlo); } TEST_F(CudnnNormRewriterTest, LayerNormTrainBackward4D2) { const char* hlo_text = R"( HloModule test apply { a = f32[] parameter(0) b = f32[] parameter(1) ROOT c = f32[] add(a,b) } ENTRY test { input = f32[2,4,6,8] parameter(0) input_square = f32[2,4,6,8] multiply(input, input) c0 = f32[] constant(0) input_square_sum = f32[2,4,8] reduce(input_square, c0), dimensions={2}, to_apply=apply reduce = f32[2,4,8] reduce(input, c0), dimensions={2}, to_apply=apply r_nelems = f32[] constant(0.166667) r_nelems_bcast = f32[2,4,8] broadcast(r_nelems), dimensions={} input_square_mean = f32[2,4,8] multiply(input_square_sum,r_nelems_bcast) input_mean = f32[2,4,8] multiply(reduce, r_nelems_bcast) input_mean_square = f32[2,4,8] multiply(input_mean,input_mean) variance = f32[2,4,8] subtract(input_square_mean,input_mean_square) epsilon = f32[] constant(0.001) epsilon_bcast = f32[2,4,8] broadcast(epsilon), dimensions={} variance_plus_epsilon = f32[2,4,8] add(variance, epsilon_bcast) norm_factor = f32[2,4,8] rsqrt(variance_plus_epsilon) norm_factor_bcast = f32[2,4,6,8] broadcast(norm_factor), dimensions={0,1,3} input_mean_bcast = f32[2,4,6,8] broadcast(input_mean), dimensions={0,1,3} input_center = f32[2,4,6,8] subtract(input, input_mean_bcast) norm = f32[2,4,6,8] multiply(input_center, norm_factor_bcast) scale = f32[6] parameter(1) scale_bcast = f32[2,4,6,8] broadcast(scale), dimensions={2} norm_scale = f32[2,4,6,8] multiply(norm, scale_bcast) bias = f32[6] parameter(2) bias_bcast = f32[2,4,6,8] broadcast(bias), dimensions={2} norm_scale_bias = f32[2,4,6,8] add(norm_scale, bias_bcast) doutput = f32[2,4,6,8] parameter(3) dbias = f32[6] reduce(doutput, c0), dimensions={0,1,3}, to_apply=apply norm_doutput = f32[2,4,6,8] multiply(norm, doutput) dscale = f32[6] reduce(norm_doutput, c0), dimensions={0,1,3}, to_apply=apply scale_doutput = f32[2,4,6,8] multiply(scale_bcast, doutput) input_center_scale_doutput = f32[2,4,6,8] multiply(input_center, scale_doutput) f0 = f32[2,4,8] reduce(input_center_scale_doutput, c0), dimensions={2}, to_apply=apply norm_factor_cube = f32[2,4,8] divide(norm_factor, variance_plus_epsilon) c1 = f32[] constant(-0.5) c1_bcast = f32[2,4,8] broadcast(c1), dimensions={} dnorm_factor = f32[2,4,8] multiply(norm_factor_cube, c1_bcast) f0_dnorm_factor = f32[2,4,8] multiply(f0, dnorm_factor) c2 = f32[] constant(0.333333) c2_bcast = f32[2,4,8] broadcast(c2), dimensions={} f0_dnorm_factor_scaled = f32[2,4,8] multiply(f0_dnorm_factor, c2_bcast) f0_dnorm_factor_scaled_bcast = f32[2,4,6,8] broadcast(f0_dnorm_factor_scaled), dimensions={0,1,3} f1 = f32[2,4,6,8] multiply(input_center, f0_dnorm_factor_scaled_bcast) minus_f1 = f32[2,4,6,8] negate(f1) minus_f1_sum = f32[2,4,8] reduce(minus_f1, c0), dimensions={2}, to_apply=apply f2 = f32[2,4,6,8] multiply(norm_factor_bcast, scale_doutput) minus_f2 = f32[2,4,6,8] negate(f2) minus_f2_sum = f32[2,4,8] reduce(minus_f2, c0), dimensions={2}, to_apply=apply minus_f1_f2_sum = f32[2,4,8] add(minus_f1_sum, minus_f2_sum) minus_f1_f2_sum_scaled = f32[2,4,8] multiply(minus_f1_f2_sum, r_nelems_bcast) minus_f1_f2_sum_scaled_bcast = f32[2,4,6,8] broadcast(minus_f1_f2_sum_scaled), dimensions={0,1,3} f1_f2 = f32[2,4,6,8] add(f1, f2) dinput = f32[2,4,6,8] add(f1_f2, minus_f1_f2_sum_scaled_bcast) ROOT out = (f32[2,4,6,8], f32[2,4,6,8], f32[6], f32[6]) tuple(norm_scale_bias, dinput, dscale, dbias) })"; const char* optimized_hlo = R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4,6,8], {{.*}}: f32[6], {{.*}}: f32[6], {{.*}}: f32[2,4,6,8]) -> (f32[2,4,6,8], f32[2,4,6,8], f32[6], f32[6]) { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} parameter(0) ; CHECK-NEXT: [[TRANSPOSE0:%[^ ]+]] = f32[8,8,6]{2,1,0} fusion([[P0]]), kind=kLoop, calls={{.*}} ; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[64,6,1,1]{3,2,1,0} bitcast([[TRANSPOSE0]]) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[6]{0} parameter(1) ; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,6,1,1]{3,2,1,0} bitcast([[P1]]) ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[6]{0} parameter(2) ; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,6,1,1]{3,2,1,0} bitcast([[P2]]) ; CHECK-NEXT: [[CC0:%[^ ]+]] = (f32[64,6,1,1]{3,2,1,0}, f32[64,1,1,1]{3,2,1,0}, f32[64,1,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]), ; CHECK: custom_call_target="__cudnn$norm", ; CHECK: backend_config={ ; CHECK-DAG: "epsilon":0.001 ; CHECK-DAG: "kind":"LAYER_FWD_TRAIN" ; CHECK: } ; CHECK-DAG: [[GTE0:%[^ ]+]] = f32[64,6,1,1]{3,2,1,0} get-tuple-element([[CC0]]), index=0 ; CHECK-DAG: [[TRANSPOSE1:%[^ ]+]] = f32[8,6,8]{2,1,0} fusion([[GTE0]]), kind=kLoop, calls={{.*}} ; CHECK-DAG: [[BITCAST:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} bitcast([[TRANSPOSE1]]) ; CHECK-DAG: [[P3:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} parameter(3) ; CHECK-NEXT: [[TRANSPOSE2:%[^ ]+]] = f32[8,8,6]{2,1,0} fusion([[P3]]), kind=kLoop, calls{{.*}} ; CHECK-DAG: [[P3_BITCAST:%[^ ]+]] = f32[64,6,1,1]{3,2,1,0} bitcast([[TRANSPOSE2]]) ; CHECK-DAG: [[GTE1:%[^ ]+]] = f32[64,1,1,1]{3,2,1,0} get-tuple-element([[CC0]]), index=1 ; CHECK-DAG: [[GTE2:%[^ ]+]] = f32[64,1,1,1]{3,2,1,0} get-tuple-element([[CC0]]), index=2 ; CHECK-NEXT: [[CC1:%[^ ]+]] = (f32[64,6,1,1]{3,2,1,0}, f32[1,6,1,1]{3,2,1,0}, f32[1,6,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P3_BITCAST]], [[GTE1]], [[GTE2]]), ; CHECK: custom_call_target="__cudnn$norm", ; CHECK: backend_config={ ; CHECK-DAG: "epsilon":0 ; CHECK-DAG: "kind":"LAYER_BWD" ; CHECK: } ; CHECK-DAG: [[GTE3:%[^ ]+]] = f32[64,6,1,1]{3,2,1,0} get-tuple-element([[CC1]]), index=0 ; CHECK-DAG: [[FUSION:%[^ ]+]] = f32[8,6,8]{2,1,0} fusion([[GTE3]]), kind=kLoop, calls=[[FUSED_COMPUTATION:%[^ ]+]] ; CHECK-DAG: [[BITCAST2:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} bitcast([[FUSION]]) ; CHECK-DAG: [[GTE4:%[^ ]+]] = f32[1,6,1,1]{3,2,1,0} get-tuple-element([[CC1]]), index=1 ; CHECK-DAG: [[GTE4_BITCAST:%[^ ]+]] = f32[6]{0} bitcast([[GTE4]]) ; CHECK-DAG: [[GTE5:%[^ ]+]] = f32[1,6,1,1]{3,2,1,0} get-tuple-element([[CC1]]), index=2 ; CHECK-DAG: [[GTE5_BITCAST:%[^ ]+]] = f32[6]{0} bitcast([[GTE5]]) ; CHECK-DAG: ROOT [[OUT:%[^ ]+]] = (f32[2,4,6,8]{3,2,1,0}, f32[2,4,6,8]{3,2,1,0}, f32[6]{0}, f32[6]{0}) tuple([[BITCAST]], [[BITCAST2]], [[GTE4_BITCAST]], [[GTE5_BITCAST]]) )"; TestNorm(hlo_text, optimized_hlo); } TEST_F(CudnnNormRewriterTest, LayerNormTrainBackward4D12) { const char* hlo_text = R"( HloModule test apply { a = f32[] parameter(0) b = f32[] parameter(1) ROOT c = f32[] add(a,b) } ENTRY test { input = f32[2,4,6,8] parameter(0) input_square = f32[2,4,6,8] multiply(input, input) c0 = f32[] constant(0) input_square_sum = f32[2,8] reduce(input_square, c0), dimensions={1,2}, to_apply=apply reduce = f32[2,8] reduce(input, c0), dimensions={1,2}, to_apply=apply r_nelems = f32[] constant(0.041667) r_nelems_bcast = f32[2,8] broadcast(r_nelems), dimensions={} input_square_mean = f32[2,8] multiply(input_square_sum,r_nelems_bcast) input_mean = f32[2,8] multiply(reduce, r_nelems_bcast) input_mean_square = f32[2,8] multiply(input_mean,input_mean) variance = f32[2,8] subtract(input_square_mean,input_mean_square) epsilon = f32[] constant(0.001) epsilon_bcast = f32[2,8] broadcast(epsilon), dimensions={} variance_plus_epsilon = f32[2,8] add(variance, epsilon_bcast) norm_factor = f32[2,8] rsqrt(variance_plus_epsilon) norm_factor_bcast = f32[2,4,6,8] broadcast(norm_factor), dimensions={0,3} input_mean_bcast = f32[2,4,6,8] broadcast(input_mean), dimensions={0,3} input_center = f32[2,4,6,8] subtract(input, input_mean_bcast) norm = f32[2,4,6,8] multiply(input_center, norm_factor_bcast) scale = f32[4,6] parameter(1) scale_bcast = f32[2,4,6,8] broadcast(scale), dimensions={1,2} norm_scale = f32[2,4,6,8] multiply(norm, scale_bcast) bias = f32[4,6] parameter(2) bias_bcast = f32[2,4,6,8] broadcast(bias), dimensions={1,2} norm_scale_bias = f32[2,4,6,8] add(norm_scale, bias_bcast) doutput = f32[2,4,6,8] parameter(3) dbias = f32[4,6] reduce(doutput, c0), dimensions={0,3}, to_apply=apply norm_doutput = f32[2,4,6,8] multiply(norm, doutput) dscale = f32[4,6] reduce(norm_doutput, c0), dimensions={0,3}, to_apply=apply scale_doutput = f32[2,4,6,8] multiply(scale_bcast, doutput) input_center_scale_doutput = f32[2,4,6,8] multiply(input_center, scale_doutput) f0 = f32[2,8] reduce(input_center_scale_doutput, c0), dimensions={1,2}, to_apply=apply norm_factor_cube = f32[2,8] divide(norm_factor, variance_plus_epsilon) c1 = f32[] constant(-0.5) c1_bcast = f32[2,8] broadcast(c1), dimensions={} dnorm_factor = f32[2,8] multiply(norm_factor_cube, c1_bcast) f0_dnorm_factor = f32[2,8] multiply(f0, dnorm_factor) c2 = f32[] constant(0.083333) c2_bcast = f32[2,8] broadcast(c2), dimensions={} f0_dnorm_factor_scaled = f32[2,8] multiply(f0_dnorm_factor, c2_bcast) f0_dnorm_factor_scaled_bcast = f32[2,4,6,8] broadcast(f0_dnorm_factor_scaled), dimensions={0,3} f1 = f32[2,4,6,8] multiply(input_center, f0_dnorm_factor_scaled_bcast) minus_f1 = f32[2,4,6,8] negate(f1) minus_f1_sum = f32[2,8] reduce(minus_f1, c0), dimensions={1,2}, to_apply=apply f2 = f32[2,4,6,8] multiply(norm_factor_bcast, scale_doutput) minus_f2 = f32[2,4,6,8] negate(f2) minus_f2_sum = f32[2,8] reduce(minus_f2, c0), dimensions={1,2}, to_apply=apply minus_f1_f2_sum = f32[2,8] add(minus_f1_sum, minus_f2_sum) minus_f1_f2_sum_scaled = f32[2,8] multiply(minus_f1_f2_sum, r_nelems_bcast) minus_f1_f2_sum_scaled_bcast = f32[2,4,6,8] broadcast(minus_f1_f2_sum_scaled), dimensions={0,3} f1_f2 = f32[2,4,6,8] add(f1, f2) dinput = f32[2,4,6,8] add(f1_f2, minus_f1_f2_sum_scaled_bcast) ROOT out = (f32[2,4,6,8], f32[2,4,6,8], f32[4,6], f32[4,6]) tuple(norm_scale_bias, dinput, dscale, dbias) })"; const char* optimized_hlo = R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4,6,8], {{.*}}: f32[4,6], {{.*}}: f32[4,6], {{.*}}: f32[2,4,6,8]) -> (f32[2,4,6,8], f32[2,4,6,8], f32[4,6], f32[4,6]) { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} parameter(0) ; CHECK-NEXT: [[TRANSPOSE0:%[^ ]+]] = f32[2,8,24]{2,1,0} fusion([[P0]]), kind=kLoop, calls={{.*}} ; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[16,4,6,1]{3,2,1,0} bitcast([[TRANSPOSE0]]) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[4,6]{1,0} parameter(1) ; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,4,6,1]{3,2,1,0} bitcast([[P1]]) ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4,6]{1,0} parameter(2) ; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,4,6,1]{3,2,1,0} bitcast([[P2]]) ; CHECK-NEXT: [[CC0:%[^ ]+]] = (f32[16,4,6,1]{3,2,1,0}, f32[16,1,1,1]{3,2,1,0}, f32[16,1,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]), ; CHECK: custom_call_target="__cudnn$norm", ; CHECK: backend_config={ ; CHECK-DAG: "epsilon":0.001 ; CHECK-DAG: "kind":"LAYER_FWD_TRAIN" ; CHECK: } ; CHECK-DAG: [[GTE0:%[^ ]+]] = f32[16,4,6,1]{3,2,1,0} get-tuple-element([[CC0]]), index=0 ; CHECK-DAG: [[TRANSPOSE1:%[^ ]+]] = f32[2,24,8]{2,1,0} fusion([[GTE0]]), kind=kLoop, calls={{.*}} ; CHECK-DAG: [[BITCAST:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} bitcast([[TRANSPOSE1]]) ; CHECK-DAG: [[P3:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} parameter(3) ; CHECK-NEXT: [[TRANSPOSE2:%[^ ]+]] = f32[2,8,24]{2,1,0} fusion([[P3]]), kind=kLoop, calls={{.*}} ; CHECK-DAG: [[P3_BITCAST:%[^ ]+]] = f32[16,4,6,1]{3,2,1,0} bitcast([[TRANSPOSE2]]) ; CHECK-DAG: [[GTE1:%[^ ]+]] = f32[16,1,1,1]{3,2,1,0} get-tuple-element([[CC0]]), index=1 ; CHECK-DAG: [[GTE2:%[^ ]+]] = f32[16,1,1,1]{3,2,1,0} get-tuple-element([[CC0]]), index=2 ; CHECK-NEXT: [[CC1:%[^ ]+]] = (f32[16,4,6,1]{3,2,1,0}, f32[1,4,6,1]{3,2,1,0}, f32[1,4,6,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P3_BITCAST]], [[GTE1]], [[GTE2]]), ; CHECK: custom_call_target="__cudnn$norm", ; CHECK: backend_config={ ; CHECK-DAG: "epsilon":0 ; CHECK-DAG: "kind":"LAYER_BWD" ; CHECK: } ; CHECK-DAG: [[GTE3:%[^ ]+]] = f32[16,4,6,1]{3,2,1,0} get-tuple-element([[CC1]]), index=0 ; CHECK-DAG: [[FUSION:%[^ ]+]] = f32[2,24,8]{2,1,0} fusion([[GTE3]]), kind=kLoop, calls=[[FUSED_COMPUTATION:%[^ ]+]] ; CHECK-DAG: [[BITCAST2:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} bitcast([[FUSION]]) ; CHECK-DAG: [[GTE4:%[^ ]+]] = f32[1,4,6,1]{3,2,1,0} get-tuple-element([[CC1]]), index=1 ; CHECK-DAG: [[GTE4_BITCAST:%[^ ]+]] = f32[4,6]{1,0} bitcast([[GTE4]]) ; CHECK-DAG: [[GTE5:%[^ ]+]] = f32[1,4,6,1]{3,2,1,0} get-tuple-element([[CC1]]), index=2 ; CHECK-DAG: [[GTE5_BITCAST:%[^ ]+]] = f32[4,6]{1,0} bitcast([[GTE5]]) ; CHECK-DAG: ROOT [[OUT:%[^ ]+]] = (f32[2,4,6,8]{3,2,1,0}, f32[2,4,6,8]{3,2,1,0}, f32[4,6]{1,0}, f32[4,6]{1,0}) tuple([[BITCAST]], [[BITCAST2]], [[GTE4_BITCAST]], [[GTE5_BITCAST]]) )"; TestNorm(hlo_text, optimized_hlo); } TEST_F(CudnnNormRewriterTest, LayerNormTrainBackward4D12Degenerate2) { const char* hlo_text = R"( HloModule test apply { a = f32[] parameter(0) b = f32[] parameter(1) ROOT c = f32[] add(a,b) } ENTRY test { input = f32[2,4,1,8] parameter(0) input_square = f32[2,4,1,8] multiply(input, input) c0 = f32[] constant(0) input_square_sum = f32[2,8] reduce(input_square, c0), dimensions={1,2}, to_apply=apply reduce = f32[2,8] reduce(input, c0), dimensions={1,2}, to_apply=apply r_nelems = f32[] constant(0.25) r_nelems_bcast = f32[2,8] broadcast(r_nelems), dimensions={} input_square_mean = f32[2,8] multiply(input_square_sum,r_nelems_bcast) input_mean = f32[2,8] multiply(reduce, r_nelems_bcast) input_mean_square = f32[2,8] multiply(input_mean,input_mean) variance = f32[2,8] subtract(input_square_mean,input_mean_square) epsilon = f32[] constant(0.001) epsilon_bcast = f32[2,8] broadcast(epsilon), dimensions={} variance_plus_epsilon = f32[2,8] add(variance, epsilon_bcast) norm_factor = f32[2,8] rsqrt(variance_plus_epsilon) norm_factor_bcast = f32[2,4,1,8] broadcast(norm_factor), dimensions={0,3} input_mean_bcast = f32[2,4,1,8] broadcast(input_mean), dimensions={0,3} input_center = f32[2,4,1,8] subtract(input, input_mean_bcast) norm = f32[2,4,1,8] multiply(input_center, norm_factor_bcast) scale = f32[4,1] parameter(1) scale_bcast = f32[2,4,1,8] broadcast(scale), dimensions={1,2} norm_scale = f32[2,4,1,8] multiply(norm, scale_bcast) bias = f32[4,1] parameter(2) bias_bcast = f32[2,4,1,8] broadcast(bias), dimensions={1,2} norm_scale_bias = f32[2,4,1,8] add(norm_scale, bias_bcast) doutput = f32[2,4,1,8] parameter(3) dbias = f32[4,1] reduce(doutput, c0), dimensions={0,3}, to_apply=apply norm_doutput = f32[2,4,1,8] multiply(norm, doutput) dscale = f32[4,1] reduce(norm_doutput, c0), dimensions={0,3}, to_apply=apply scale_doutput = f32[2,4,1,8] multiply(scale_bcast, doutput) input_center_scale_doutput = f32[2,4,1,8] multiply(input_center, scale_doutput) f0 = f32[2,8] reduce(input_center_scale_doutput, c0), dimensions={1,2}, to_apply=apply norm_factor_cube = f32[2,8] divide(norm_factor, variance_plus_epsilon) c1 = f32[] constant(-0.5) c1_bcast = f32[2,8] broadcast(c1), dimensions={} dnorm_factor = f32[2,8] multiply(norm_factor_cube, c1_bcast) f0_dnorm_factor = f32[2,8] multiply(f0, dnorm_factor) c2 = f32[] constant(0.5) c2_bcast = f32[2,8] broadcast(c2), dimensions={} f0_dnorm_factor_scaled = f32[2,8] multiply(f0_dnorm_factor, c2_bcast) f0_dnorm_factor_scaled_bcast = f32[2,4,1,8] broadcast(f0_dnorm_factor_scaled), dimensions={0,3} f1 = f32[2,4,1,8] multiply(input_center, f0_dnorm_factor_scaled_bcast) minus_f1 = f32[2,4,1,8] negate(f1) minus_f1_sum = f32[2,8] reduce(minus_f1, c0), dimensions={1,2}, to_apply=apply f2 = f32[2,4,1,8] multiply(norm_factor_bcast, scale_doutput) minus_f2 = f32[2,4,1,8] negate(f2) minus_f2_sum = f32[2,8] reduce(minus_f2, c0), dimensions={1,2}, to_apply=apply minus_f1_f2_sum = f32[2,8] add(minus_f1_sum, minus_f2_sum) minus_f1_f2_sum_scaled = f32[2,8] multiply(minus_f1_f2_sum, r_nelems_bcast) minus_f1_f2_sum_scaled_bcast = f32[2,4,1,8] broadcast(minus_f1_f2_sum_scaled), dimensions={0,3} f1_f2 = f32[2,4,1,8] add(f1, f2) dinput = f32[2,4,1,8] add(f1_f2, minus_f1_f2_sum_scaled_bcast) ROOT out = (f32[2,4,1,8], f32[2,4,1,8], f32[4,1], f32[4,1]) tuple(norm_scale_bias, dinput, dscale, dbias) })"; const char* optimized_hlo = R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4,1,8], {{.*}}: f32[4,1], {{.*}}: f32[4,1], {{.*}}: f32[2,4,1,8]) -> (f32[2,4,1,8], f32[2,4,1,8], f32[4,1], f32[4,1]) { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4,1,8]{3,2,1,0} parameter(0) ; CHECK-NEXT: [[TRANSPOSE0:%[^ ]+]] = f32[2,8,4]{2,1,0} fusion([[P0]]), kind=kLoop, calls={{.*}} ; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[16,4,1,1]{3,2,1,0} bitcast([[TRANSPOSE0]]) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[4,1]{1,0} parameter(1) ; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P1]]) ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4,1]{1,0} parameter(2) ; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P2]]) ; CHECK-NEXT: [[CC0:%[^ ]+]] = (f32[16,4,1,1]{3,2,1,0}, f32[16,1,1,1]{3,2,1,0}, f32[16,1,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]), ; CHECK: custom_call_target="__cudnn$norm", ; CHECK: backend_config={ ; CHECK-DAG: "epsilon":0.001 ; CHECK-DAG: "kind":"LAYER_FWD_TRAIN" ; CHECK: } ; CHECK-DAG: [[GTE0:%[^ ]+]] = f32[16,4,1,1]{3,2,1,0} get-tuple-element([[CC0]]), index=0 ; CHECK-DAG: [[TRANSPOSE1:%[^ ]+]] = f32[2,4,8]{2,1,0} fusion([[GTE0]]), kind=kLoop, calls={{.*}} ; CHECK-DAG: [[BITCAST:%[^ ]+]] = f32[2,4,1,8]{3,2,1,0} bitcast([[TRANSPOSE1]]) ; CHECK-DAG: [[P3:%[^ ]+]] = f32[2,4,1,8]{3,2,1,0} parameter(3) ; CHECK-NEXT: [[TRANSPOSE2:%[^ ]+]] = f32[2,8,4]{2,1,0} fusion([[P3]]), kind=kLoop, calls={{.*}} ; CHECK-DAG: [[P3_BITCAST:%[^ ]+]] = f32[16,4,1,1]{3,2,1,0} bitcast([[TRANSPOSE2]]) ; CHECK-DAG: [[GTE1:%[^ ]+]] = f32[16,1,1,1]{3,2,1,0} get-tuple-element([[CC0]]), index=1 ; CHECK-DAG: [[GTE2:%[^ ]+]] = f32[16,1,1,1]{3,2,1,0} get-tuple-element([[CC0]]), index=2 ; CHECK-NEXT: [[CC1:%[^ ]+]] = (f32[16,4,1,1]{3,2,1,0}, f32[1,4,1,1]{3,2,1,0}, f32[1,4,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P3_BITCAST]], [[GTE1]], [[GTE2]]), ; CHECK: custom_call_target="__cudnn$norm", ; CHECK: backend_config={ ; CHECK-DAG: "epsilon":0 ; CHECK-DAG: "kind":"LAYER_BWD" ; CHECK: } ; CHECK-DAG: [[GTE3:%[^ ]+]] = f32[16,4,1,1]{3,2,1,0} get-tuple-element([[CC1]]), index=0 ; CHECK-DAG: [[FUSION0:%[^ ]+]] = f32[2,4,8]{2,1,0} fusion([[GTE3]]), kind=kLoop, calls=[[FUSED_COMPUTATION0:%[^ ]+]] ; CHECK-DAG: [[BITCAST2:%[^ ]+]] = f32[2,4,1,8]{3,2,1,0} bitcast([[FUSION0]]) ; CHECK-DAG: [[GTE4:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} get-tuple-element([[CC1]]), index=1 ; CHECK-DAG: [[GTE4_BITCAST:%[^ ]+]] = f32[4,1]{1,0} bitcast([[GTE4]]) ; CHECK-DAG: [[GTE5:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} get-tuple-element([[CC1]]), index=2 ; CHECK-DAG: [[GTE5_BITCAST:%[^ ]+]] = f32[4,1]{1,0} bitcast([[GTE5]]) ; CHECK-DAG: ROOT [[OUT:%[^ ]+]] = (f32[2,4,1,8]{3,2,1,0}, f32[2,4,1,8]{3,2,1,0}, f32[4,1]{1,0}, f32[4,1]{1,0}) tuple([[BITCAST]], [[BITCAST2]], [[GTE4_BITCAST]], [[GTE5_BITCAST]]) )"; TestNorm(hlo_text, optimized_hlo); } TEST_F(CudnnNormRewriterTest, DISABLED_LayerNormTrainBackward4D1DoutputReshapeSplit) { const char* hlo_text = R"( HloModule test apply { a = f32[] parameter(0) b = f32[] parameter(1) ROOT c = f32[] add(a,b) } ENTRY test { input = f32[2,4,6,8] parameter(0) input_square = f32[2,4,6,8] multiply(input, input) c0 = f32[] constant(0) input_square_sum = f32[2,6,8] reduce(input_square, c0), dimensions={1}, to_apply=apply reduce = f32[2,6,8] reduce(input, c0), dimensions={1}, to_apply=apply r_nelems = f32[] constant(0.25) r_nelems_bcast = f32[2,6,8] broadcast(r_nelems), dimensions={} input_square_mean = f32[2,6,8] multiply(input_square_sum,r_nelems_bcast) input_mean = f32[2,6,8] multiply(reduce, r_nelems_bcast) input_mean_square = f32[2,6,8] multiply(input_mean,input_mean) variance = f32[2,6,8] subtract(input_square_mean,input_mean_square) epsilon = f32[] constant(0.001) epsilon_bcast = f32[2,6,8] broadcast(epsilon), dimensions={} variance_plus_epsilon = f32[2,6,8] add(variance, epsilon_bcast) norm_factor = f32[2,6,8] rsqrt(variance_plus_epsilon) norm_factor_bcast = f32[2,4,6,8] broadcast(norm_factor), dimensions={0,2,3} input_mean_bcast = f32[2,4,6,8] broadcast(input_mean), dimensions={0,2,3} input_center = f32[2,4,6,8] subtract(input, input_mean_bcast) norm = f32[2,4,6,8] multiply(input_center, norm_factor_bcast) scale = f32[4] parameter(1) scale_bcast = f32[2,4,6,8] broadcast(scale), dimensions={1} norm_scale = f32[2,4,6,8] multiply(norm, scale_bcast) bias = f32[4] parameter(2) bias_bcast = f32[2,4,6,8] broadcast(bias), dimensions={1} norm_scale_bias = f32[2,4,6,8] add(norm_scale, bias_bcast) doutput = f32[2,4,48] parameter(3) dbias = f32[4] reduce(doutput, c0), dimensions={0,2}, to_apply=apply doutput_bitcast = f32[2,4,6,8] reshape(doutput) norm_doutput = f32[2,4,6,8] multiply(norm, doutput_bitcast) dscale = f32[4] reduce(norm_doutput, c0), dimensions={0,2,3}, to_apply=apply scale_doutput = f32[2,4,6,8] multiply(scale_bcast, doutput_bitcast) input_center_scale_doutput = f32[2,4,6,8] multiply(input_center, scale_doutput) f0 = f32[2,6,8] reduce(input_center_scale_doutput, c0), dimensions={1}, to_apply=apply norm_factor_cube = f32[2,6,8] divide(norm_factor, variance_plus_epsilon) c1 = f32[] constant(-0.5) c1_bcast = f32[2,6,8] broadcast(c1), dimensions={} dnorm_factor = f32[2,6,8] multiply(norm_factor_cube, c1_bcast) f0_dnorm_factor = f32[2,6,8] multiply(f0, dnorm_factor) c2 = f32[] constant(0.5) c2_bcast = f32[2,6,8] broadcast(c2), dimensions={} f0_dnorm_factor_scaled = f32[2,6,8] multiply(f0_dnorm_factor, c2_bcast) f0_dnorm_factor_scaled_bcast = f32[2,4,6,8] broadcast(f0_dnorm_factor_scaled), dimensions={0,2,3} f1 = f32[2,4,6,8] multiply(input_center, f0_dnorm_factor_scaled_bcast) minus_f1 = f32[2,4,6,8] negate(f1) minus_f1_sum = f32[2,6,8] reduce(minus_f1, c0), dimensions={1}, to_apply=apply f2 = f32[2,4,6,8] multiply(norm_factor_bcast, scale_doutput) minus_f2 = f32[2,4,6,8] negate(f2) minus_f2_sum = f32[2,6,8] reduce(minus_f2, c0), dimensions={1}, to_apply=apply minus_f1_f2_sum = f32[2,6,8] add(minus_f1_sum, minus_f2_sum) minus_f1_f2_sum_scaled = f32[2,6,8] multiply(minus_f1_f2_sum, r_nelems_bcast) minus_f1_f2_sum_scaled_bcast = f32[2,4,6,8] broadcast(minus_f1_f2_sum_scaled), dimensions={0,2,3} f1_f2 = f32[2,4,6,8] add(f1, f2) dinput = f32[2,4,6,8] add(f1_f2, minus_f1_f2_sum_scaled_bcast) ROOT out = (f32[2,4,6,8], f32[2,4,6,8], f32[4], f32[4]) tuple(norm_scale_bias, dinput, dscale, dbias) })"; const char* optimized_hlo = R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4,6,8], {{.*}}: f32[4], {{.*}}: f32[4], {{.*}}: f32[2,4,48]) -> (f32[2,4,6,8], f32[2,4,6,8], f32[4], f32[4]) { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} parameter(0) ; CHECK-NEXT: [[TRANSPOSE0:%[^ ]+]] = f32[2,6,8,4]{3,2,1,0} transpose([[P0]]), dimensions={0,2,3,1} ; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[96,4,1,1]{3,2,1,0} bitcast([[TRANSPOSE0]]) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[4]{0} parameter(1) ; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P1]]) ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4]{0} parameter(2) ; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P2]]) ; CHECK-NEXT: [[CC0:%[^ ]+]] = (f32[96,4,1,1]{3,2,1,0}, f32[96,1,1,1]{3,2,1,0}, f32[96,1,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]), ; CHECK: custom_call_target="__cudnn$norm", ; CHECK: backend_config={ ; CHECK-DAG: "epsilon":0.001 ; CHECK-DAG: "kind":"LAYER_FWD_TRAIN" ; CHECK: } ; CHECK-DAG: [[GTE0:%[^ ]+]] = f32[96,4,1,1]{3,2,1,0} get-tuple-element([[CC0]]), index=0 ; CHECK-DAG: [[P3:%[^ ]+]] = f32[2,4,48]{2,1,0} parameter(3) ; CHECK-DAG: [[FUSION0:%[^ ]+]] = f32[2,6,8,4]{3,2,1,0} fusion([[P3]]), kind=kLoop, calls=[[FUSED_COMPUTATION0:%[^ ]+]] ; CHECK-DAG: [[FUSION0_BITCAST:%[^ ]+]] = f32[96,4,1,1]{3,2,1,0} bitcast([[FUSION0]]) ; CHECK-DAG: [[GTE1:%[^ ]+]] = f32[96,1,1,1]{3,2,1,0} get-tuple-element([[CC0]]), index=1 ; CHECK-DAG: [[GTE2:%[^ ]+]] = f32[96,1,1,1]{3,2,1,0} get-tuple-element([[CC0]]), index=2 ; CHECK-NEXT: [[CC1:%[^ ]+]] = (f32[96,4,1,1]{3,2,1,0}, f32[1,4,1,1]{3,2,1,0}, f32[1,4,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[FUSION0_BITCAST]], [[GTE1]], [[GTE2]]), ; CHECK: custom_call_target="__cudnn$norm", ; CHECK: backend_config={ ; CHECK-DAG: "epsilon":0 ; CHECK-DAG: "kind":"LAYER_BWD" ; CHECK: } ; CHECK-DAG: [[GTE3:%[^ ]+]] = f32[96,4,1,1]{3,2,1,0} get-tuple-element([[CC1]]), index=0 ; CHECK-DAG: [[FUSION1:%[^ ]+]] = (f32[2,4,6,8]{3,2,1,0}, f32[2,4,6,8]{3,2,1,0}) fusion([[GTE0]], [[GTE3]]), kind=kLoop, calls=[[FUSED_COMPUTATION1:%[^ ]+]] ; CHECK-DAG: [[GTEF1:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} get-tuple-element([[FUSION1]]), index=0 ; CHECK-DAG: [[GTEF2:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} get-tuple-element([[FUSION1]]), index=1 ; CHECK-DAG: [[GTE4:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} get-tuple-element([[CC1]]), index=1 ; CHECK-DAG: [[GTE4_BITCAST:%[^ ]+]] = f32[4]{0} bitcast([[GTE4]]) ; CHECK-DAG: [[GTE5:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} get-tuple-element([[CC1]]), index=2 ; CHECK-DAG: [[GTE5_BITCAST:%[^ ]+]] = f32[4]{0} bitcast([[GTE5]]) ; CHECK-DAG: ROOT [[OUT:%[^ ]+]] = (f32[2,4,6,8]{3,2,1,0}, f32[2,4,6,8]{3,2,1,0}, f32[4]{0}, f32[4]{0}) tuple([[GTEF1]], [[GTEF2]], [[GTE4_BITCAST]], [[GTE5_BITCAST]]) )"; TestNorm(hlo_text, optimized_hlo); } TEST_F(CudnnNormRewriterTest, DISABLED_LayerNormTrainBackward4D1DoutputReshapeCombine) { const char* hlo_text = R"( HloModule test apply { a = f32[] parameter(0) b = f32[] parameter(1) ROOT c = f32[] add(a,b) } ENTRY test { input = f32[2,4,6,8] parameter(0) input_square = f32[2,4,6,8] multiply(input, input) c0 = f32[] constant(0) input_square_sum = f32[2,6,8] reduce(input_square, c0), dimensions={1}, to_apply=apply reduce = f32[2,6,8] reduce(input, c0), dimensions={1}, to_apply=apply r_nelems = f32[] constant(0.25) r_nelems_bcast = f32[2,6,8] broadcast(r_nelems), dimensions={} input_square_mean = f32[2,6,8] multiply(input_square_sum,r_nelems_bcast) input_mean = f32[2,6,8] multiply(reduce, r_nelems_bcast) input_mean_square = f32[2,6,8] multiply(input_mean,input_mean) variance = f32[2,6,8] subtract(input_square_mean,input_mean_square) epsilon = f32[] constant(0.001) epsilon_bcast = f32[2,6,8] broadcast(epsilon), dimensions={} variance_plus_epsilon = f32[2,6,8] add(variance, epsilon_bcast) norm_factor = f32[2,6,8] rsqrt(variance_plus_epsilon) norm_factor_bcast = f32[2,4,6,8] broadcast(norm_factor), dimensions={0,2,3} input_mean_bcast = f32[2,4,6,8] broadcast(input_mean), dimensions={0,2,3} input_center = f32[2,4,6,8] subtract(input, input_mean_bcast) norm = f32[2,4,6,8] multiply(input_center, norm_factor_bcast) scale = f32[4] parameter(1) scale_bcast = f32[2,4,6,8] broadcast(scale), dimensions={1} norm_scale = f32[2,4,6,8] multiply(norm, scale_bcast) bias = f32[4] parameter(2) bias_bcast = f32[2,4,6,8] broadcast(bias), dimensions={1} norm_scale_bias = f32[2,4,6,8] add(norm_scale, bias_bcast) doutput = f32[2,4,6,2,2,2] parameter(3) dbias = f32[4] reduce(doutput, c0), dimensions={0,2,3,4,5}, to_apply=apply doutput_bitcast = f32[2,4,6,8] reshape(doutput) norm_doutput = f32[2,4,6,8] multiply(norm, doutput_bitcast) dscale = f32[4] reduce(norm_doutput, c0), dimensions={0,2,3}, to_apply=apply scale_doutput = f32[2,4,6,8] multiply(scale_bcast, doutput_bitcast) input_center_scale_doutput = f32[2,4,6,8] multiply(input_center, scale_doutput) f0 = f32[2,6,8] reduce(input_center_scale_doutput, c0), dimensions={1}, to_apply=apply norm_factor_cube = f32[2,6,8] divide(norm_factor, variance_plus_epsilon) c1 = f32[] constant(-0.5) c1_bcast = f32[2,6,8] broadcast(c1), dimensions={} dnorm_factor = f32[2,6,8] multiply(norm_factor_cube, c1_bcast) f0_dnorm_factor = f32[2,6,8] multiply(f0, dnorm_factor) c2 = f32[] constant(0.5) c2_bcast = f32[2,6,8] broadcast(c2), dimensions={} f0_dnorm_factor_scaled = f32[2,6,8] multiply(f0_dnorm_factor, c2_bcast) f0_dnorm_factor_scaled_bcast = f32[2,4,6,8] broadcast(f0_dnorm_factor_scaled), dimensions={0,2,3} f1 = f32[2,4,6,8] multiply(input_center, f0_dnorm_factor_scaled_bcast) minus_f1 = f32[2,4,6,8] negate(f1) minus_f1_sum = f32[2,6,8] reduce(minus_f1, c0), dimensions={1}, to_apply=apply f2 = f32[2,4,6,8] multiply(norm_factor_bcast, scale_doutput) minus_f2 = f32[2,4,6,8] negate(f2) minus_f2_sum = f32[2,6,8] reduce(minus_f2, c0), dimensions={1}, to_apply=apply minus_f1_f2_sum = f32[2,6,8] add(minus_f1_sum, minus_f2_sum) minus_f1_f2_sum_scaled = f32[2,6,8] multiply(minus_f1_f2_sum, r_nelems_bcast) minus_f1_f2_sum_scaled_bcast = f32[2,4,6,8] broadcast(minus_f1_f2_sum_scaled), dimensions={0,2,3} f1_f2 = f32[2,4,6,8] add(f1, f2) dinput = f32[2,4,6,8] add(f1_f2, minus_f1_f2_sum_scaled_bcast) ROOT out = (f32[2,4,6,8], f32[2,4,6,8], f32[4], f32[4]) tuple(norm_scale_bias, dinput, dscale, dbias) })"; const char* optimized_hlo = R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,4,6,8], {{.*}}: f32[4], {{.*}}: f32[4], {{.*}}: f32[2,4,6,2,2,2]) -> (f32[2,4,6,8], f32[2,4,6,8], f32[4], f32[4]) { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} parameter(0) ; CHECK-NEXT: [[TRANSPOSE0:%[^ ]+]] = f32[2,6,8,4]{3,2,1,0} transpose([[P0]]), dimensions={0,2,3,1} ; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[96,4,1,1]{3,2,1,0} bitcast([[TRANSPOSE0]]) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[4]{0} parameter(1) ; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P1]]) ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4]{0} parameter(2) ; CHECK-NEXT: [[P2_BITCAST:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} bitcast([[P2]]) ; CHECK-NEXT: [[CC0:%[^ ]+]] = (f32[96,4,1,1]{3,2,1,0}, f32[96,1,1,1]{3,2,1,0}, f32[96,1,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[P2_BITCAST]]), ; CHECK: custom_call_target="__cudnn$norm", ; CHECK: backend_config={ ; CHECK-DAG: "epsilon":0.001 ; CHECK-DAG: "kind":"LAYER_FWD_TRAIN" ; CHECK: } ; CHECK-DAG: [[GTE0:%[^ ]+]] = f32[96,4,1,1]{3,2,1,0} get-tuple-element([[CC0]]), index=0 ; CHECK-DAG: [[P3:%[^ ]+]] = f32[2,4,6,2,2,2]{5,4,3,2,1,0} parameter(3) ; CHECK-DAG: [[FUSION0:%[^ ]+]] = f32[2,6,8,4]{3,2,1,0} fusion([[P3]]), kind=kLoop, calls=[[FUSED_COMPUTATION0:%[^ ]+]] ; CHECK-DAG: [[FUSION0_BITCAST:%[^ ]+]] = f32[96,4,1,1]{3,2,1,0} bitcast([[FUSION0]]) ; CHECK-DAG: [[GTE1:%[^ ]+]] = f32[96,1,1,1]{3,2,1,0} get-tuple-element([[CC0]]), index=1 ; CHECK-DAG: [[GTE2:%[^ ]+]] = f32[96,1,1,1]{3,2,1,0} get-tuple-element([[CC0]]), index=2 ; CHECK-NEXT: [[CC1:%[^ ]+]] = (f32[96,4,1,1]{3,2,1,0}, f32[1,4,1,1]{3,2,1,0}, f32[1,4,1,1]{3,2,1,0}, u8[{{.*}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]], [[FUSION0_BITCAST]], [[GTE1]], [[GTE2]]), ; CHECK: custom_call_target="__cudnn$norm", ; CHECK: backend_config={ ; CHECK-DAG: "epsilon":0 ; CHECK-DAG: "kind":"LAYER_BWD" ; CHECK: } ; CHECK-DAG: [[GTE3:%[^ ]+]] = f32[96,4,1,1]{3,2,1,0} get-tuple-element([[CC1]]), index=0 ; CHECK-DAG: [[FUSION1:%[^ ]+]] = (f32[2,4,6,8]{3,2,1,0}, f32[2,4,6,8]{3,2,1,0}) fusion([[GTE0]], [[GTE3]]), kind=kLoop, calls=[[FUSED_COMPUTATION1:%[^ ]+]] ; CHECK-DAG: [[GTEF1:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} get-tuple-element([[FUSION1]]), index=0 ; CHECK-DAG: [[GTEF2:%[^ ]+]] = f32[2,4,6,8]{3,2,1,0} get-tuple-element([[FUSION1]]), index=1 ; CHECK-DAG: [[GTE4:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} get-tuple-element([[CC1]]), index=1 ; CHECK-DAG: [[GTE4_BITCAST:%[^ ]+]] = f32[4]{0} bitcast([[GTE4]]) ; CHECK-DAG: [[GTE5:%[^ ]+]] = f32[1,4,1,1]{3,2,1,0} get-tuple-element([[CC1]]), index=2 ; CHECK-DAG: [[GTE5_BITCAST:%[^ ]+]] = f32[4]{0} bitcast([[GTE5]]) ; CHECK-DAG: ROOT [[OUT:%[^ ]+]] = (f32[2,4,6,8]{3,2,1,0}, f32[2,4,6,8]{3,2,1,0}, f32[4]{0}, f32[4]{0}) tuple([[GTEF1]], [[GTEF2]], [[GTE4_BITCAST]], [[GTE5_BITCAST]]) )"; TestNorm(hlo_text, optimized_hlo); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/cudnn_norm_rewriter.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/cudnn_norm_rewriter_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
1417bb56-2ee3-4b23-9a08-1dd6220d0e60
cpp
tensorflow/tensorflow
reduction_layout_normalizer
third_party/xla/xla/service/gpu/transforms/reduction_layout_normalizer.cc
third_party/xla/xla/service/gpu/transforms/reduction_layout_normalizer_test.cc
#include "xla/service/gpu/transforms/reduction_layout_normalizer.h" #include <cstdint> #include <memory> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_set.h" #include "absl/container/inlined_vector.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/dfs_hlo_visitor_with_default.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/layout.h" #include "xla/layout_util.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/status_macros.h" #include "xla/util.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { class EnforceMinorToMajorReduceOpVisitor : public DfsHloRewriteVisitor { absl::Status HandleReduce(HloInstruction *hlo) override { auto reduce = Cast<HloReduceInstruction>(hlo); VLOG(5) << "Input: " << reduce->ToString(); int operand_idx = -1; absl::InlinedVector<HloInstruction *, 2> canonical_reduce_inputs; absl::InlinedVector<Shape, 2> new_reduce_shapes; DimensionVector out_reduce_dimensions; const Shape &first_instruction_shape = reduce->inputs()[0]->shape(); for (HloInstruction *operand : reduce->inputs()) { operand_idx++; if (operand_idx != 0 && operand->shape().layout() != first_instruction_shape.layout()) { return FailedPrecondition( "Layout assignment should have assigned the same layout to all " "reduce inputs"); } const Shape &operand_shape = operand->shape(); const Layout &operand_layout = operand_shape.layout(); const Shape &reduce_shape = reduce->shape().IsTuple() ? reduce->shape().tuple_shapes(operand_idx) : reduce->shape(); DimensionVector new_reduce_dimensions; DimensionVector new_operand_shape_data; DimensionVector new_reduce_shape_data; DimensionVector new_reduce_shape_layout(reduce_shape.rank()); std::vector<int64_t> reduce_shape_logical_to_physical = LayoutUtil::MakeLogicalToPhysical(reduce_shape.layout()); auto to_reduce_logical_dim = [&](int64_t op_logical_dim) { return op_logical_dim - absl::c_count_if(reduce->dimensions(), [&](int64_t dim) { CHECK(dim != op_logical_dim); return dim < op_logical_dim; }); }; for (int i = 0; i < operand_shape.rank(); i++) { int64_t major_to_minor_dim_idx = operand_shape.rank() - i - 1; int64_t logical_dim = operand_layout.minor_to_major(major_to_minor_dim_idx); int64_t dim_size = operand_shape.dimensions(logical_dim); VLOG(5) << "Processing logical dimension " << logical_dim << " of size " << dim_size; new_operand_shape_data.push_back(dim_size); if (absl::c_linear_search(reduce->dimensions(), logical_dim)) { new_reduce_dimensions.push_back(i); } else { new_reduce_shape_data.push_back(dim_size); int64_t logical_reduce_dim = to_reduce_logical_dim(logical_dim); int64_t physical_reduce_dim = reduce_shape_logical_to_physical[logical_reduce_dim]; VLOG(5) << "logical_reduce_dim = " << logical_reduce_dim << ", " << "physical_reduce_dim = " << physical_reduce_dim; new_reduce_shape_layout[reduce_shape.rank() - physical_reduce_dim - 1] = new_reduce_shape_data.size() - 1; } } Shape new_operand_shape = ShapeUtil::MakeShape( operand_shape.element_type(), new_operand_shape_data); Shape new_reduce_shape = ShapeUtil::MakeShapeWithDenseLayout( reduce_shape.element_type(), new_reduce_shape_data, new_reduce_shape_layout); if (new_operand_shape == operand_shape && reduce->inputs().size() == 1) { return absl::OkStatus(); } HloInstruction *canonical_reduce_input = new_operand_shape != operand_shape ? reduce->parent()->AddInstruction( HloInstruction::CreateBitcast(new_operand_shape, operand)) : operand; canonical_reduce_input->set_metadata(operand->metadata()); VLOG(5) << "Reduction input: " << canonical_reduce_input->ToString(); new_reduce_shapes.push_back(new_reduce_shape); canonical_reduce_inputs.push_back(canonical_reduce_input); if (out_reduce_dimensions.empty()) { out_reduce_dimensions = new_reduce_dimensions; } else { TF_RET_CHECK(out_reduce_dimensions == new_reduce_dimensions); } } Shape new_reduce_shape = ShapeUtil::MakeMaybeTupleShape(new_reduce_shapes); std::unique_ptr<HloInstruction> new_reduce = HloInstruction::CreateReduce( new_reduce_shape, canonical_reduce_inputs, reduce->init_values(), out_reduce_dimensions, reduce->to_apply()); VLOG(5) << "Generated new reduction: " << new_reduce->ToString(); const Shape &orig_reduce_shape = reduce->shape(); if (new_reduce_shape != orig_reduce_shape) { HloInstruction *wrapped_reduce = reduce->parent()->AddInstruction(std::move(new_reduce)); if (!new_reduce_shape.IsTuple()) { new_reduce = HloInstruction::CreateBitcast(reduce->shape(), wrapped_reduce); } else { absl::InlinedVector<HloInstruction *, 2> out; for (int oidx = 0; oidx < reduce->input_count(); oidx++) { HloInstruction *gte = reduce->parent()->AddInstruction( HloInstruction::CreateGetTupleElement(wrapped_reduce, oidx)); out.push_back( reduce->parent()->AddInstruction(HloInstruction::CreateBitcast( orig_reduce_shape.tuple_shapes(oidx), gte))); } new_reduce = HloInstruction::CreateTuple(out); } } VLOG(5) << "Generated output: " << new_reduce->ToString(); return ReplaceWithNewInstruction(reduce, std::move(new_reduce)); } }; absl::StatusOr<bool> ReductionLayoutNormalizer::Run( HloModule *module, const absl::flat_hash_set<absl::string_view> &execution_threads) { TF_ASSIGN_OR_RETURN(bool changed, EnforceMinorToMajorReduceOpVisitor().RunOnModule( module, execution_threads)); return changed; } } }
#include "xla/service/gpu/transforms/reduction_layout_normalizer.h" #include <optional> #include <utility> #include <gmock/gmock.h> #include "absl/strings/string_view.h" #include "xla/error_spec.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/errors.h" #include "tsl/platform/status_matchers.h" #include "tsl/platform/statusor.h" #include "tsl/platform/test.h" namespace xla { namespace gpu { namespace { using ::testing::HasSubstr; using ::tsl::testing::StatusIs; class ReductionLayoutNormalizerTest : public HloTestBase { public: void CheckReductionLayoutNormalizer( absl::string_view hlo, std::optional<absl::string_view> expected) { RunAndFilecheckHloRewrite(hlo, ReductionLayoutNormalizer{}, expected); } }; TEST_F(ReductionLayoutNormalizerTest, LayoutCanonicalizerTest) { const char* hlo = R"( HloModule ReduceWithLayoutChange add { x0 = f32[] parameter(0) y0 = f32[] parameter(1) ROOT add0 = f32[] add(x0, y0) } ENTRY main { arg0 = f32[4,5,5,16,12,12,3,3]{2,3,5,4,0,7,6,1} parameter(0) constant0 = f32[] constant(0) ROOT reduce0 = f32[4,5,16,12,12]{4,3,2,1,0} reduce(arg0, constant0), dimensions={1,6,7}, to_apply=add } )"; CheckReductionLayoutNormalizer(hlo, R"( )"); } TEST_F(ReductionLayoutNormalizerTest, LayoutCanonicalizerTestVariadic) { const char* hlo = R"( HloModule ReduceWithLayoutChangeVariadic argmax { running_max = f32[] parameter(0) running_max_idx = u32[] parameter(1) current_value = f32[] parameter(2) current_value_idx = u32[] parameter(3) current = (f32[], u32[]) tuple(running_max, running_max_idx) potential = (f32[], u32[]) tuple(current_value, current_value_idx) cmp_code = pred[] compare(current_value, running_max), direction=GT new_max = f32[] select(cmp_code, current_value, running_max) new_idx = u32[] select(cmp_code, current_value_idx, running_max_idx) ROOT out = (f32[], u32[]) tuple(new_max, new_idx) } ENTRY main { arg0 = f32[4,5,5,16,12,12,3,3]{2,3,5,4,0,7,6,1} parameter(0) idxs = u32[4,5,5,16,12,12,3,3]{2,3,5,4,0,7,6,1} parameter(1) constant0 = f32[] constant(0) constant1 = u32[] constant(0) ROOT reduce0 = ( f32[4,5,16,12,12]{4,3,2,1,0}, u32[4,5,16,12,12]{4,3,2,1,0} ) reduce(arg0, idxs, constant0,constant1), dimensions={1,6,7}, to_apply=argmax } )"; CheckReductionLayoutNormalizer(hlo, R"( )"); } TEST_F(ReductionLayoutNormalizerTest, LayoutCanonicalizerTestVariadicDifferentLayouts) { const char* hlo = R"( HloModule ReduceWithLayoutChangeVariadicDifferent argmax { running_max = f32[] parameter(0) running_max_idx = u32[] parameter(1) current_value = f32[] parameter(2) current_value_idx = u32[] parameter(3) current = (f32[], u32[]) tuple(running_max, running_max_idx) potential = (f32[], u32[]) tuple(current_value, current_value_idx) cmp_code = pred[] compare(current_value, running_max), direction=GT new_max = f32[] select(cmp_code, current_value, running_max) new_idx = u32[] select(cmp_code, current_value_idx, running_max_idx) ROOT out = (f32[], u32[]) tuple(new_max, new_idx) } ENTRY main { arg0 = f32[2,3,4,7]{2,1,0,3} parameter(0) idxs = u32[2,3,4,7]{3,2,1,0} parameter(1) constant0 = f32[] constant(0) constant1 = u32[] constant(0) ROOT reduce0 = ( f32[2,3,4]{2,1,0}, u32[2,3,4]{2,1,0} ) reduce(arg0, idxs, constant0,constant1), dimensions={3}, to_apply=argmax } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo)); auto cloned_module = module->Clone(); ReductionLayoutNormalizer normalizer; EXPECT_THAT(normalizer.Run(module.get()), StatusIs(tsl::error::FAILED_PRECONDITION, HasSubstr("Layout assignment"))); EXPECT_TRUE(RunAndCompare(std::move(cloned_module), ErrorSpec{1e-5, 1e-5})); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/reduction_layout_normalizer.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/reduction_layout_normalizer_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
a07ea66a-4414-454d-a9cf-f85639ffbeb2
cpp
tensorflow/tensorflow
topk_specializer
third_party/xla/xla/service/gpu/transforms/topk_specializer.cc
third_party/xla/xla/service/gpu/transforms/topk_specializer_test.cc
#include "xla/service/gpu/transforms/topk_specializer.h" #include <stddef.h> #include <initializer_list> #include <string> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/dfs_hlo_visitor_with_default.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/primitive_util.h" #include "xla/service/hlo.pb.h" #include "xla/service/tuple_util.h" #include "xla/shape.h" #include "xla/status_macros.h" #include "xla/util.h" #include "xla/xla_data.pb.h" namespace xla { namespace gpu { namespace { absl::StatusOr<HloInstruction*> SmallBufferOptimization( HloCustomCallInstruction* topk) { Shape data_shape = topk->operand(0)->shape(); auto supported_dtypes = {F32, BF16}; if (!absl::c_linear_search(supported_dtypes, data_shape.element_type())) { return InvalidArgument( "Invalid Dtype: %s", primitive_util::LowercasePrimitiveTypeName(data_shape.element_type())); } if (data_shape.dimensions_size() > 2) { return InvalidArgument("Invalid input dimensions: %s", data_shape.ToString()); } bool has_batch = data_shape.dimensions_size() == 2; constexpr size_t max_k = 16; constexpr size_t min_n = 1024; size_t n = data_shape.dimensions(has_batch ? 1 : 0); size_t k = topk->shape().tuple_shapes(0).dimensions(has_batch ? 1 : 0); if (k > max_k) { return InvalidArgument("k too large (%d), must be <= %d", k, max_k); } if (n < min_n) { return InvalidArgument("Input too small (n=%d, min_n=%d)", n, min_n); } HloComputation* comp = topk->parent(); HloInstruction* new_topk = comp->AddInstruction(HloInstruction::CreateCustomCall( topk->shape(), topk->operands(), topk->to_apply(), "__gpu$TopK", "", CustomCallApiVersion::API_VERSION_TYPED_FFI)); return TupleUtil::ExtractPrefix(new_topk, 2); } class SpecializeTopkVisitor : public DfsHloRewriteVisitor { public: absl::Status HandleCustomCall(HloInstruction* inst) override { HloCustomCallInstruction* topk = DynCast<HloCustomCallInstruction>(inst); if (topk == nullptr || topk->custom_call_target() != "TopK") { return absl::OkStatus(); } TF_RET_CHECK(topk->operand_count() == 1); if (auto small_topk = SmallBufferOptimization(topk); small_topk.ok()) { return ReplaceInstruction(topk, *small_topk); } else { VLOG(2) << "Small TopK optimization doesn't match: " << small_topk.status(); } return absl::OkStatus(); } }; } absl::StatusOr<bool> TopkSpecializer::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { return SpecializeTopkVisitor().RunOnModule(module, execution_threads); } } }
#include "xla/service/gpu/transforms/topk_specializer.h" #include <stddef.h> #include <memory> #include <optional> #include <string> #include <string_view> #include <tuple> #include <utility> #include <gtest/gtest.h> #include "absl/container/flat_hash_set.h" #include "absl/status/status.h" #include "absl/strings/string_view.h" #include "absl/strings/substitute.h" #include "xla/hlo/ir/dfs_hlo_visitor_with_default.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/pass/hlo_pass_interface.h" #include "xla/service/platform_util.h" #include "xla/service/topk_rewriter.h" #include "xla/shape_util.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/statusor.h" namespace xla { namespace { using ::testing::Combine; using ::testing::Values; using ParameterizedInterface = ::testing::WithParamInterface<std::tuple<int, int, int, std::string_view>>; class TopkTest : public HloTestBase, public ParameterizedInterface { public: TopkTest() : HloTestBase(*PlatformUtil::GetPlatform("gpu"), *PlatformUtil::GetPlatform("gpu"), true, true, {}) {} protected: absl::StatusOr<std::unique_ptr<HloModule>> TopkHlo(int n, int k, int batch_size, std::string_view dtype) { return ParseAndReturnVerifiedModule(absl::Substitute( R"( %compare { %p.1.lhs.40628 = s32[] parameter(2) %p.1.rhs.40629 = s32[] parameter(3) %constant.40630 = pred[] constant(true) %broadcast.40631 = pred[] broadcast(pred[] %constant.40630), dimensions={} %p.0.lhs.40626 = f32[] parameter(0) %p.0.rhs.40627 = f32[] parameter(1) %compare.40632 = pred[] compare(f32[] %p.0.lhs.40626, f32[] %p.0.rhs.40627), direction=GT, type=TOTALORDER ROOT %select.40633 = pred[] select(pred[] %broadcast.40631, pred[] %compare.40632, pred[] %broadcast.40631) } ENTRY top_k { %arg = $3[$2,$0] parameter(0) ROOT %result = ($3[$2,$1], s32[$2,$1]) custom-call(%arg), custom_call_target="TopK", to_apply=%compare } )", n, k, batch_size, dtype)); } }; class GeneralizeTopkVisitor : public DfsHloRewriteVisitor { public: absl::Status HandleCustomCall(HloInstruction* inst) override { HloCustomCallInstruction* topk = DynCast<HloCustomCallInstruction>(inst); if (topk == nullptr || topk->custom_call_target() != "__gpu$TopK") { return absl::OkStatus(); } HloComputation* comp = topk->parent(); auto original_shape = ShapeUtil::SliceTuple(topk->shape(), 0, 2); HloInstruction* original_topk = comp->AddInstruction(HloInstruction::CreateCustomCall( original_shape, topk->operands(), topk->to_apply(), "TopK")); HloInstruction* new_tuple = topk->users()[0]->users()[0]; return ReplaceInstruction(new_tuple, original_topk); } }; class GeneralizeTopk : public HloModulePass { public: absl::string_view name() const override { return "generalized-topk"; } using HloPassInterface::Run; absl::StatusOr<bool> Run(HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) override { return GeneralizeTopkVisitor().RunOnModule(module, execution_threads); } }; void ToSortAndSlice(HloModule* module) { TF_ASSERT_OK_AND_ASSIGN(bool changed, GeneralizeTopk().Run(module)); ASSERT_TRUE(changed); TF_ASSERT_OK_AND_ASSIGN(changed, TopkDecomposer().Run(module)); ASSERT_TRUE(changed); } TEST_P(TopkTest, ProducesCorrectResult) { const auto [n_kb, k, batch_size, dtype] = GetParam(); const size_t n = n_kb * 1024; TF_ASSERT_OK_AND_ASSIGN(auto topk_module, TopkHlo(n, k, batch_size, dtype)); TF_ASSERT_OK_AND_ASSIGN(bool changed, gpu::TopkSpecializer().Run(topk_module.get())); ASSERT_TRUE(changed); EXPECT_TRUE( RunAndCompare(std::move(topk_module), std::nullopt, ToSortAndSlice)); } INSTANTIATE_TEST_SUITE_P( TopkTests, TopkTest, Combine( Values(1, 8, 12, 32), Values(1, 2, 4, 8, 16, 7, 12), Values(1, 16, 32, 64, 128), Values(absl::string_view("f32"), "bf16")), [](const auto& info) { return absl::Substitute("n$0KiB_k$1_batch_size$2_$3", std::get<0>(info.param), std::get<1>(info.param), std::get<2>(info.param), std::get<3>(info.param)); }); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/topk_specializer.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/topk_specializer_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
d1d92fbe-e80a-43c3-9c83-84c759e94301
cpp
tensorflow/tensorflow
dot_dimension_sorter
third_party/xla/xla/service/gpu/transforms/dot_dimension_sorter.cc
third_party/xla/xla/service/gpu/transforms/dot_dimension_sorter_test.cc
#include "xla/service/gpu/transforms/dot_dimension_sorter.h" #include <cstdint> #include <memory> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_set.h" #include "absl/status/status.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/layout_util.h" #include "xla/permutation_util.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" namespace xla { namespace gpu { namespace { absl::Status SortDotDimensions(HloDotInstruction* dot) { const DotDimensionNumbers& dims = dot->dot_dimension_numbers(); DotDimensionNumbers new_dims(dims); new_dims.clear_lhs_contracting_dimensions(); new_dims.clear_rhs_contracting_dimensions(); const bool sort_by_lhs = DistinctNumbersAreConsecutiveIfSorted(dims.lhs_contracting_dimensions()); const absl::Span<const int64_t>& sort_key = sort_by_lhs ? dims.lhs_contracting_dimensions() : dims.rhs_contracting_dimensions(); std::vector<int64_t> permutation; for (const int64_t a : sort_key) { permutation.push_back(a - *absl::c_min_element(sort_key)); } const std::vector<int64_t> sorted_lhs = Permute(dims.lhs_contracting_dimensions(), permutation); *new_dims.mutable_lhs_contracting_dimensions() = {sorted_lhs.begin(), sorted_lhs.end()}; const std::vector<int64_t> sorted_rhs = Permute(dims.rhs_contracting_dimensions(), permutation); *new_dims.mutable_rhs_contracting_dimensions() = {sorted_rhs.begin(), sorted_rhs.end()}; std::unique_ptr<HloInstruction> new_dot = HloInstruction::CreateDot( dot->shape(), dot->mutable_operand(0), dot->mutable_operand(1), new_dims, dot->precision_config(), {dot->sparsity().begin(), dot->sparsity().end()}, absl::MakeSpan(dot->operands()).subspan(HloDotInstruction::kOperands)); dot->SetupDerivedInstruction(new_dot.get()); VLOG(3) << "Sorted dot() dimensions:\n" << "\t before: " << dot->ToString() << "\n" << "\t after: " << new_dot->ToString(); return dot->parent()->ReplaceWithNewInstruction(dot, std::move(new_dot)); } } absl::StatusOr<bool> DotDimensionSorter::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { std::vector<HloInstruction*> dots_to_process; for (const HloComputation* computation : module->MakeNonfusionComputations(execution_threads)) { for (HloInstruction* instr : computation->instructions()) { if (instr->opcode() != HloOpcode::kDot) { continue; } if ((instr->operand(0)->shape().has_layout() && !LayoutUtil::IsMonotonicWithDim0Major( instr->operand(0)->shape().layout())) || (instr->operand(1)->shape().has_layout() && !LayoutUtil::IsMonotonicWithDim0Major( instr->operand(1)->shape().layout()))) { continue; } const DotDimensionNumbers& dims = instr->dot_dimension_numbers(); if (dims.lhs_contracting_dimensions_size() == 0) { continue; } const bool cons_lhs = DistinctNumbersAreConsecutiveIfSorted( dims.lhs_contracting_dimensions()); const bool cons_rhs = DistinctNumbersAreConsecutiveIfSorted( dims.rhs_contracting_dimensions()); const bool sorted_lhs = absl::c_is_sorted(dims.lhs_contracting_dimensions()); const bool sorted_rhs = absl::c_is_sorted(dims.rhs_contracting_dimensions()); if ((cons_lhs && !sorted_lhs && !cons_rhs) || (cons_rhs && !sorted_rhs && !cons_lhs) || (cons_lhs && !sorted_lhs && cons_rhs && !sorted_rhs)) { dots_to_process.push_back(instr); } } } if (dots_to_process.empty()) { return false; } for (HloInstruction* dot : dots_to_process) { TF_RETURN_IF_ERROR(SortDotDimensions(Cast<HloDotInstruction>(dot))); } return true; } } }
#include "xla/service/gpu/transforms/dot_dimension_sorter.h" #include <memory> #include <gtest/gtest.h> #include "xla/error_spec.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/service/gpu/tests/gpu_codegen_test.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { class WithoutDotDimensionSorterTest : public GpuCodegenTest { public: DebugOptions GetDebugOptionsForTest() override { DebugOptions debug_options = GpuCodegenTest::GetDebugOptionsForTest(); debug_options.add_xla_disable_hlo_passes("dot_dimension_sorter"); return debug_options; } }; TEST_F(WithoutDotDimensionSorterTest, UnsortedDimsCreateTransposes) { const char* hlo_text = R"( HloModule m ENTRY e { p0 = f16[1,14,9,32] parameter(0) p1 = f16[12,9,32] parameter(1) ROOT _ = f16[1,14,12] dot(p0, p1), lhs_contracting_dims={3,2}, rhs_contracting_dims={2,1} } )"; MatchOptimizedHlo(hlo_text, R"( ; CHECK: transpose )"); } TEST_F(WithoutDotDimensionSorterTest, SortedDimsDoNotCreateTransposes) { const char* hlo_text = R"( HloModule m ENTRY e { p0 = f16[1,14,9,32] parameter(0) p1 = f16[12,9,32] parameter(1) ROOT _ = f16[1,14,12] dot(p0, p1), lhs_contracting_dims={2,3}, rhs_contracting_dims={1,2} } )"; MatchOptimizedHlo(hlo_text, R"( ; CHECK-NOT: transpose )"); } TEST_F(WithoutDotDimensionSorterTest, DimOrderCanBeChanged) { const char* hlo_text_ref = R"( HloModule m ENTRY e { p0 = f16[1,14,9,32] parameter(0) p1 = f16[12,9,32] parameter(1) ROOT _ = f16[1,14,12] dot(p0, p1), lhs_contracting_dims={3,2}, rhs_contracting_dims={2,1} } )"; const char* hlo_text_modified = R"( HloModule m ENTRY e { p0 = f16[1,14,9,32] parameter(0) p1 = f16[12,9,32] parameter(1) ROOT _ = f16[1,14,12] dot(p0, p1), lhs_contracting_dims={2,3}, rhs_contracting_dims={1,2} } )"; EXPECT_TRUE(RunAndCompareTwoModules(hlo_text_ref, hlo_text_modified, ErrorSpec{1e-5, 1e-3}, true)); } using DotDimensionSorterTest = GpuCodegenTest; TEST_F(DotDimensionSorterTest, SortContractingDims) { const char* module_string = R"( HloModule m ENTRY e { p0 = f16[1,144,96,32] parameter(0) p1 = f16[122,96,32] parameter(1) ROOT _ = f16[1,144,122] dot(p0, p1), lhs_contracting_dims={3,2}, rhs_contracting_dims={2,1} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); const auto& dims = module->entry_computation()->root_instruction()->dot_dimension_numbers(); EXPECT_EQ(dims.lhs_contracting_dimensions(0), 3); EXPECT_EQ(dims.lhs_contracting_dimensions(1), 2); EXPECT_EQ(dims.rhs_contracting_dimensions(0), 2); EXPECT_EQ(dims.rhs_contracting_dimensions(1), 1); TF_ASSERT_OK_AND_ASSIGN(bool modified, DotDimensionSorter().Run(module.get())); EXPECT_TRUE(modified); const auto& dims2 = module->entry_computation()->root_instruction()->dot_dimension_numbers(); EXPECT_EQ(dims2.lhs_contracting_dimensions(0), 2); EXPECT_EQ(dims2.lhs_contracting_dimensions(1), 3); EXPECT_EQ(dims2.rhs_contracting_dimensions(0), 1); EXPECT_EQ(dims2.rhs_contracting_dimensions(1), 2); } TEST_F(DotDimensionSorterTest, NothingToReorder) { const char* module_string = R"( HloModule m ENTRY e { p0 = f16[1,144,96,32] parameter(0) p1 = f16[122,96,32] parameter(1) ROOT _ = f16[1,144,122] dot(p0, p1), lhs_contracting_dims={2,3}, rhs_contracting_dims={1,2} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); TF_ASSERT_OK_AND_ASSIGN(bool modified, DotDimensionSorter().Run(module.get())); EXPECT_FALSE(modified); } TEST_F(DotDimensionSorterTest, SparseDotSortContractingDims) { const char* module_string = R"( HloModule m ENTRY e { p0 = f16[1,144,96,16] parameter(0) p1 = f16[122,96,32] parameter(1) meta = u16[1,144,96,2] parameter(2) ROOT _ = f16[1,144,122] dot(p0, p1, meta), sparsity=L.3@2:4, lhs_contracting_dims={3,2}, rhs_contracting_dims={2,1} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); TF_ASSERT_OK_AND_ASSIGN(bool modified, DotDimensionSorter().Run(module.get())); EXPECT_TRUE(modified); HloDotInstruction* dot = DynCast<HloDotInstruction>( module->entry_computation()->root_instruction()); EXPECT_TRUE(dot != nullptr && dot->sparse_operands() == 1); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/dot_dimension_sorter.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/dot_dimension_sorter_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
94fd6b98-724b-4a98-b0f9-05dcbf12c062
cpp
tensorflow/tensorflow
async_wrapper
third_party/xla/xla/service/gpu/transforms/async_wrapper.cc
third_party/xla/xla/service/gpu/transforms/async_wrapper_test.cc
#include "xla/service/gpu/transforms/async_wrapper.h" #include <algorithm> #include <deque> #include <iterator> #include "absl/container/flat_hash_set.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/shape_util.h" #include "xla/util.h" #include "tsl/platform/errors.h" namespace xla::gpu { absl::StatusOr<bool> AsyncWrapper::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool changed = false; XLA_VLOG_LINES( 1, absl::StrCat("AsyncWrapper will process the following module:\n", module->ToString())); std::deque<HloComputation*> computations; computations.push_back(module->entry_computation()); while (!computations.empty()) { HloComputation* computation = computations.front(); computations.pop_front(); for (HloInstruction* instruction : computation->MakeInstructionPostOrder()) { if (predicate_(instruction)) { XLA_VLOG_LINES( 1, absl::StrCat( "AsyncWrapper will make the following instruction async:\n", instruction->ToString())); TF_RETURN_IF_ERROR( computation ->CreateAsyncInstructions(instruction, {ShapeUtil::MakeScalarShape(U32)}) .status()); changed = true; continue; } if (instruction->opcode() == HloOpcode::kCall) { std::copy(instruction->called_computations().begin(), instruction->called_computations().end(), std::back_inserter(computations)); } } } XLA_VLOG_LINES( 1, absl::StrCat("AsyncWrapper finished processing the following module:\n", module->ToString())); return changed; } }
#include "xla/service/gpu/transforms/async_wrapper.h" #include <memory> #include <utility> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/pass/hlo_pass_interface.h" #include "xla/literal.h" #include "xla/literal_util.h" #include "xla/tests/hlo_test_base.h" #include "xla/tests/literal_test_util.h" #include "xla/tests/verified_hlo_module.h" #include "tsl/platform/status_matchers.h" namespace xla::gpu { namespace { using ::tsl::testing::IsOkAndHolds; class AsyncWrapperTest : public HloTestBase {}; int CountAsyncInstructions(HloComputation* computation) { int count = 0; for (const HloInstruction* instruction : computation->instructions()) { if (instruction->IsAsynchronous()) ++count; } return count; } TEST_F(AsyncWrapperTest, BasicFusion) { const char* hlo_text = R"( HloModule m double1 { p0 = f32[1] parameter(0) ROOT add = f32[1] add(p0, p0) } double2 { p0 = f32[1] parameter(0) ROOT add = f32[1] add(p0, p0) } ENTRY main { p0 = f32[1] parameter(0) agg1 = f32[1] fusion(p0), kind=kLoop, calls=double1 agg2 = f32[1] fusion(p0), kind=kLoop, calls=double2 ROOT done = f32[1] add(agg1, agg2) })"; std::unique_ptr<VerifiedHloModule> module = ParseAndReturnVerifiedModule(hlo_text).value(); AsyncWrapper wrapper([](const HloInstruction* instruction) { return instruction->opcode() == HloOpcode::kFusion; }); EXPECT_THAT(wrapper.HloModulePass::Run(module.get()), IsOkAndHolds(true)); EXPECT_EQ(CountAsyncInstructions(module->entry_computation()), 4); Literal argument = LiteralUtil::CreateR1<float>({1.0}); Literal expected = LiteralUtil::CreateR1<float>({4.0}); Literal result = ExecuteNoHloPasses(std::move(module), {&argument}); EXPECT_TRUE(LiteralTestUtil::Equal(expected, result)); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/async_wrapper.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/async_wrapper_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
7736eb72-e714-45fa-ab25-d615e11bdbed
cpp
tensorflow/tensorflow
stream_attribute_async_wrapper
third_party/xla/xla/service/gpu/transforms/stream_attribute_async_wrapper.cc
third_party/xla/xla/service/gpu/transforms/stream_attribute_async_wrapper_test.cc
#include "xla/service/gpu/transforms/stream_attribute_async_wrapper.h" #include "absl/container/flat_hash_set.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/runtime/thunk.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/logging.h" #include "tsl/platform/statusor.h" namespace xla::gpu { namespace { static absl::StatusOr<bool> AsynchronizeInstruction(HloInstruction* instr) { auto instr_gpu_config = instr->backend_config<GpuBackendConfig>(); if (!instr_gpu_config.ok() || instr_gpu_config->operation_queue_id() == Thunk::kDefaultExecutionStreamId.value()) { return false; } HloComputation* computation = instr->parent(); TF_ASSIGN_OR_RETURN( HloInstruction * done, computation->CreateAsyncInstructions( instr, {}, StreamAttributeAsyncWrapper::kParallelExecutionThread, true)); TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config, done->backend_config<GpuBackendConfig>()); gpu_config.set_force_earliest_schedule(false); TF_RETURN_IF_ERROR(done->set_backend_config(gpu_config)); VLOG(5) << "Created async instruction: " << done->ToString(); return true; } } absl::StatusOr<bool> StreamAttributeAsyncWrapper::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { XLA_VLOG_LINES( 2, "StreamAttributeAsyncWrapper::Run(), before:\n" + module->ToString()); bool changed = false; for (const HloComputation* comp : module->MakeNonfusionComputations(execution_threads)) { for (HloInstruction* instr : comp->instructions()) { TF_ASSIGN_OR_RETURN(bool result, AsynchronizeInstruction(instr)); changed |= result; } } XLA_VLOG_LINES( 2, "StreamAttributeAsyncWrapper::Run(), after:\n" + module->ToString()); return changed; } }
#include "xla/service/gpu/transforms/stream_attribute_async_wrapper.h" #include <memory> #include <gtest/gtest.h> #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/statusor.h" namespace xla::gpu { namespace { using StreamAttributeAsyncWrapperTest = HloTestBase; TEST_F(StreamAttributeAsyncWrapperTest, NonDefaultOpIsWrapped) { constexpr absl::string_view kHloString = R"( HloModule ModuleWithAsync ENTRY entry { p1_32 = f32[1] parameter(0) p2_32 = f32[1] parameter(1) add_32 = f32[1] add(p1_32, p2_32), backend_config={"operation_queue_id":"1", "wait_on_operation_queues":[], "force_earliest_schedule":true} ROOT exp_32 = f32[1] exponential(add_32), backend_config={"operation_queue_id":"0", "wait_on_operation_queues":[1]} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloString)); StreamAttributeAsyncWrapper async_wrapper; bool changed; TF_ASSERT_OK_AND_ASSIGN(changed, async_wrapper.Run(module.get())); EXPECT_TRUE(changed); const HloInstruction* producer = module->entry_computation()->root_instruction()->operand(0); EXPECT_EQ(producer->opcode(), HloOpcode::kAsyncDone); TF_ASSERT_OK_AND_ASSIGN(GpuBackendConfig done_gpu_config, producer->backend_config<GpuBackendConfig>()); EXPECT_EQ(done_gpu_config.force_earliest_schedule(), false); const HloInstruction* producer_start = producer->operand(0); EXPECT_EQ(producer_start->opcode(), HloOpcode::kAsyncStart); const xla::HloAsyncInstruction* async = Cast<HloAsyncInstruction>(producer_start); EXPECT_EQ(async->async_wrapped_opcode(), HloOpcode::kAdd); TF_ASSERT_OK_AND_ASSIGN(GpuBackendConfig gpu_config, async->backend_config<GpuBackendConfig>()); EXPECT_EQ(gpu_config.operation_queue_id(), 1); EXPECT_EQ(gpu_config.force_earliest_schedule(), true); EXPECT_EQ(async->async_execution_thread(), "parallel"); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/stream_attribute_async_wrapper.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/stream_attribute_async_wrapper_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
017d6ee9-96b6-4ac8-8dc8-91d074a92ed2
cpp
tensorflow/tensorflow
command_buffer_scheduling
third_party/xla/xla/service/gpu/transforms/command_buffer_scheduling.cc
third_party/xla/xla/service/gpu/transforms/command_buffer_scheduling_test.cc
#include "xla/service/gpu/transforms/command_buffer_scheduling.h" #include <algorithm> #include <cstddef> #include <cstdint> #include <iterator> #include <memory> #include <string> #include <utility> #include <variant> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/container/inlined_vector.h" #include "absl/status/status.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/ffi/ffi_api.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_clone_context.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/ir/hlo_schedule.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/cublas_cudnn.h" #include "xla/service/gpu/hlo_fusion_analysis.h" #include "xla/service/gpu/hlo_traversal.h" #include "xla/service/gpu/ir_emission_utils.h" #include "xla/service/gpu/variant_visitor.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/stream_executor/device_description.h" #include "xla/stream_executor/semantic_version.h" #include "xla/util.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" #include "tsl/platform/statusor.h" namespace xla::gpu { using CommandBuffer = CommandBufferScheduling::CommandBuffer; using CommandBufferConfig = CommandBufferScheduling::CommandBufferConfig; static bool IsCommand(const HloComputation* computation, const CommandBufferConfig& config); static bool IsConstant(const HloInstruction* hlo) { return hlo->opcode() == HloOpcode::kConstant; } static bool IsParameter(const HloInstruction* hlo) { return hlo->opcode() == HloOpcode::kParameter; } static bool IsNoOp(const HloInstruction* hlo) { return HloPredicateIsOp<HloOpcode::kBitcast, HloOpcode::kTuple, HloOpcode::kGetTupleElement>(hlo); }; static bool IsAsyncStartCommand(const HloInstruction* hlo, const CommandBufferConfig& config) { if (hlo->opcode() == HloOpcode::kAllReduceStart || hlo->opcode() == HloOpcode::kAllGatherStart) { return config.enabled_commands.contains(DebugOptions::COLLECTIVES); } if (hlo->opcode() == HloOpcode::kAsyncStart) { if (IsCublasGemm(*hlo->async_wrapped_instruction())) { return config.enabled_commands.contains(DebugOptions::CUBLAS); } if (hlo->async_wrapped_opcode() == HloOpcode::kFusion) { return config.enabled_commands.contains(DebugOptions::FUSION); } if (hlo->async_wrapped_opcode() == HloOpcode::kReduceScatter || hlo->async_wrapped_opcode() == HloOpcode::kAllToAll) { return config.enabled_commands.contains(DebugOptions::COLLECTIVES); } } if (hlo->opcode() == HloOpcode::kReduceScatter || hlo->opcode() == HloOpcode::kAllToAll) { return config.enabled_commands.contains(DebugOptions::COLLECTIVES); } return false; } static bool IsAsyncDoneCommand(const HloInstruction* hlo, const CommandBufferConfig& config) { if (hlo->opcode() == HloOpcode::kAllReduceDone || hlo->opcode() == HloOpcode::kAllGatherDone) { return config.enabled_commands.contains(DebugOptions::COLLECTIVES); } if (hlo->opcode() == HloOpcode::kAsyncDone) { if (IsCublasGemm(*hlo->async_wrapped_instruction())) { return config.enabled_commands.contains(DebugOptions::CUBLAS); } if (hlo->async_wrapped_opcode() == HloOpcode::kFusion) { return config.enabled_commands.contains(DebugOptions::FUSION); } if (hlo->async_wrapped_opcode() == HloOpcode::kReduceScatter || hlo->async_wrapped_opcode() == HloOpcode::kAllToAll) { return config.enabled_commands.contains(DebugOptions::COLLECTIVES); } } return false; } static HloInstruction* FindAsyncDoneCommand(const HloInstruction* start) { if (start->opcode() == HloOpcode::kAllReduceStart || start->opcode() == HloOpcode::kAllGatherStart) { CHECK(start->users().size() == 1); return start->users().front(); } else if (start->opcode() == HloOpcode::kAsyncStart) { return start->async_chain_done(); } return nullptr; } template <HloOpcode op> static bool IsCommand(const HloInstruction*, const CommandBufferConfig&); template <> bool IsCommand<HloOpcode::kWhile>(const HloInstruction* hlo, const CommandBufferConfig& config) { return config.enabled_commands.contains(DebugOptions::CONDITIONALS) && IsCommand(hlo->while_body(), config) && IsCommand(hlo->while_condition(), config); } template <> bool IsCommand<HloOpcode::kConditional>(const HloInstruction* hlo, const CommandBufferConfig& config) { return config.enabled_commands.contains(DebugOptions::CONDITIONALS) && absl::c_all_of(hlo->branch_computations(), [&](const HloComputation* comp) { return IsCommand(comp, config); }); } static bool IsCommand(const HloCustomCallInstruction* hlo, const CommandBufferConfig& config) { if (config.enabled_commands.contains(DebugOptions::CUBLAS) && IsLegacyCublasMatmul(*hlo)) { return true; } if (config.enabled_commands.contains(DebugOptions::CUBLASLT) && (IsCublasLtMatmul(*hlo) || IsCublasLtMatmulF8(*hlo))) { return true; } if (config.enabled_commands.contains(DebugOptions::CUDNN) && IsCustomCallTofMHA(*hlo)) { VLOG(3) << "Recording FusedMHA, target " << hlo->custom_call_target() << " into command buffer."; return true; } if (!config.enabled_commands.contains(DebugOptions::CUSTOM_CALL)) { return false; } if (config.enabled_legacy_custom_call_targets.contains( hlo->custom_call_target())) { VLOG(3) << "Recording legacy custom call target " << hlo->custom_call_target() << " into command buffer."; return true; } auto registration = ffi::FindHandler(hlo->custom_call_target(), "gpu"); return registration.ok() ? ffi::IsCommandBufferCompatible(registration->traits) : false; } static bool IsCommand(const HloInstruction* hlo, const CommandBufferConfig& config) { if (auto* fusion = DynCast<HloFusionInstruction>(hlo)) { auto gpu_config = fusion->backend_config<GpuBackendConfig>(); const FusionBackendConfig& backend_config = gpu_config->fusion_backend_config(); if (backend_config.kind() == kCuDnnFusionKind) { return config.enabled_commands.contains(DebugOptions::CUDNN); } const auto& custom_config = backend_config.custom_fusion_config(); if (custom_config.name() == "address_computation") { auto fusion_analysis = HloFusionAnalysis::Create(*hlo, config.device_description); const HloFusionAdaptor& adaptor = fusion_analysis.fusion(); auto hero_adaptor = HloBfsFindIf(adaptor.GetRoots(), adaptor, [](auto node) { return node.opcode() == HloOpcode::kCustomCall || node.opcode() == HloOpcode::kReduceScatter; }); const HloInstruction* hero = &hero_adaptor->instruction(); return IsCommand(hero, config) || IsAsyncStartCommand(hero, config); } if (custom_config.name() == "dynamic_address_computation") { return false; } return config.enabled_commands.contains(DebugOptions::FUSION); } if (auto* sort = DynCast<HloSortInstruction>(hlo)) return config.enabled_commands.contains(DebugOptions::FUSION); if (hlo->opcode() == HloOpcode::kPartitionId || hlo->opcode() == HloOpcode::kReplicaId) { return config.enabled_commands.contains(DebugOptions::FUSION); } if (auto* custom_call = DynCast<HloCustomCallInstruction>(hlo)) return IsCommand(custom_call, config); if (hlo->opcode() == HloOpcode::kWhile) return IsCommand<HloOpcode::kWhile>(hlo, config); if (hlo->opcode() == HloOpcode::kConditional) return IsCommand<HloOpcode::kConditional>(hlo, config); return false; } static bool IsCommand(const HloComputation* computation, const CommandBufferConfig& config) { return absl::c_all_of( computation->instructions(), [&](const HloInstruction* inst) { return IsNoOp(inst) || IsConstant(inst) || IsParameter(inst) || IsCommand(inst, config) || IsAsyncStartCommand(inst, config) || IsAsyncDoneCommand(inst, config); }); } static void RemoveTrailingNoOps(HloInstructionSequence& seq) { std::vector<HloInstruction*> instructions = seq.instructions(); for (int i = instructions.size() - 1; i >= 0; i--) { if (HloInstruction* inst = instructions[i]; IsNoOp(inst)) { seq.remove_instruction(inst); } else { break; } } } std::vector<HloInstructionSequence> CommandBufferScheduling::CollectCommandBufferSequences( const HloInstructionSequence schedule, const CommandBufferConfig& config, int32_t min_num_commands) { std::vector<HloInstructionSequence> sequences; HloInstructionSequence current_seq; int64_t num_commands_in_current_seq = 0; auto collect_current_seq = [&]() { if (num_commands_in_current_seq >= std::max(1, min_num_commands)) { RemoveTrailingNoOps(current_seq); sequences.push_back(std::move(current_seq)); } current_seq = HloInstructionSequence(); num_commands_in_current_seq = 0; }; auto& instructions = schedule.instructions(); auto collect_async_region = [&](const HloInstruction* start) { auto get_index = [&](const HloInstruction* inst) -> size_t { auto it = std::find(instructions.begin(), instructions.end(), inst); return std::distance(instructions.begin(), it); }; HloInstructionSequence seq; size_t done_index = get_index(FindAsyncDoneCommand(start)); for (size_t i = get_index(start); i <= done_index; i++) { HloInstruction* inst = instructions.at(i); if (IsAsyncStartCommand(inst, config)) { const HloInstruction* done = FindAsyncDoneCommand(inst); done_index = std::max(done_index, get_index(done)); } seq.push_back(inst); } return seq; }; auto check_async_region = [&](const HloInstructionSequence& seq) { if (!absl::c_all_of(seq.instructions(), [&](HloInstruction* inst) { return IsNoOp(inst) || IsCommand(inst, config) || IsAsyncStartCommand(inst, config) || IsAsyncDoneCommand(inst, config); })) { return false; } absl::flat_hash_set<HloInstruction*> done_instructions; for (const HloInstruction* inst : seq.instructions()) { if (IsAsyncStartCommand(inst, config)) { done_instructions.insert(FindAsyncDoneCommand(inst)); } if (IsAsyncDoneCommand(inst, config)) { if (!done_instructions.contains(inst)) { return false; } } } return true; }; for (size_t i = 0; i < instructions.size(); i++) { HloInstruction* inst = instructions.at(i); if (IsNoOp(inst) && num_commands_in_current_seq) { current_seq.push_back(inst); continue; } if (IsCommand(inst, config)) { num_commands_in_current_seq++; current_seq.push_back(inst); continue; } if (IsAsyncStartCommand(inst, config)) { HloInstructionSequence seq = collect_async_region(inst); if (check_async_region(seq)) { num_commands_in_current_seq += seq.instructions().size(); for (HloInstruction* inst : seq.instructions()) { current_seq.push_back(inst); } i += seq.instructions().size() - 1; continue; } } collect_current_seq(); } collect_current_seq(); return sequences; } absl::StatusOr<bool> CommandBufferScheduling::MoveParametersAndConstantsToFront( HloComputation* computation) { HloInstructionSequence new_sequence; HloSchedule& schedule = computation->parent()->schedule(); HloInstructionSequence& sequence = schedule.GetOrCreateSequence(computation); for (HloInstruction* inst : sequence.instructions()) { if (IsParameter(inst) || IsConstant(inst)) { new_sequence.push_back(inst); for (HloInstruction* control_predecessor : inst->control_predecessors()) { for (HloInstruction* user : inst->users()) { TF_RETURN_IF_ERROR(control_predecessor->AddControlDependencyTo(user)); } } TF_RETURN_IF_ERROR(inst->DropAllControlDeps()); } } for (HloInstruction* inst : sequence.instructions()) { if (!IsParameter(inst) && !IsConstant(inst)) { new_sequence.push_back(inst); } } schedule.set_sequence(computation, new_sequence); for (auto [old_i, new_i] : llvm::zip(sequence.instructions(), new_sequence.instructions())) { if (old_i != new_i) return true; } return false; } absl::StatusOr<CommandBuffer> CommandBufferScheduling::PrepareCommandBuffer( const HloInstructionSequence& seq, HloModule* module) { auto builder = HloComputation::Builder("command_buffer"); absl::Span<HloInstruction* const> instructions = absl::MakeSpan(seq.instructions()); absl::flat_hash_set<HloInstruction*> in_command_buffer(instructions.begin(), instructions.end()); absl::flat_hash_map<HloInstruction*, HloParameterInstruction*> parameters; absl::flat_hash_map<HloInstruction*, HloInstruction*> inst_mapping; auto mapped_operands = [&](HloInstruction* instr) { absl::InlinedVector<HloInstruction*, 4> operands; for (HloInstruction* operand : instr->operands()) { if (auto it = inst_mapping.find(operand); it != inst_mapping.end()) operands.push_back(it->second); } return operands; }; for (HloInstruction* inst : instructions) { for (HloInstruction* operand : inst->operands()) { if (parameters.contains(operand)) continue; if (in_command_buffer.contains(operand)) continue; int64_t parameter_id = parameters.size(); auto* parameter = Cast<HloParameterInstruction>( builder.AddInstruction(HloInstruction::CreateParameter( parameter_id, operand->shape(), "p"))); parameter->UniquifyName(module); parameter->UniquifyId(module); inst_mapping[operand] = parameters[operand] = parameter; } } for (HloInstruction* inst : seq.instructions()) { HloCloneContext ctx(inst->GetModule()); for (HloComputation* called_computation : inst->called_computations()) { if (called_computation->IsAsyncComputation()) { called_computation->RemoveAsyncStart(); } ctx.MapComputation(called_computation, called_computation); } inst_mapping[inst] = builder.AddInstruction( inst->CloneWithNewOperands(inst->shape(), mapped_operands(inst), &ctx)); inst_mapping[inst]->UniquifyId(module); } std::vector<HloInstruction*> arguments(parameters.size()); for (auto& [argument, parameter] : parameters) { arguments[parameter->parameter_number()] = argument; } std::vector<HloInstruction*> results; std::vector<HloInstruction*> returned; auto has_external_users = [&](HloInstruction* inst) { return inst->IsRoot() || absl::c_any_of(inst->users(), [&](auto* user) { return !in_command_buffer.contains(user); }); }; for (HloInstruction* inst : instructions) { if (has_external_users(inst)) { results.push_back(inst); returned.push_back(inst_mapping[inst]); } } if (returned.size() > 1) { HloInstruction* inst = builder.AddInstruction(HloInstruction::CreateTuple(returned)); inst->UniquifyName(module); inst->UniquifyId(module); } std::unique_ptr<HloComputation> comp = builder.Build(); comp->UniquifyName(module); comp->SetUniqueId(comp->root_instruction()->unique_id()); return CommandBuffer{std::move(arguments), std::move(results), std::move(comp), std::move(inst_mapping)}; } absl::StatusOr<HloComputation*> CommandBufferScheduling::RewriteCommandBuffer( HloComputation* parent, const HloInstructionSequence& seq, CommandBuffer command_buffer) { if (command_buffer.results.empty()) return absl::InternalError("command buffer results must not be empty"); Shape cmd_buffer_result_shape; bool has_single_result = command_buffer.results.size() == 1; if (has_single_result) { cmd_buffer_result_shape = command_buffer.results[0]->shape(); } else { absl::InlinedVector<Shape, 4> shapes; shapes.reserve(command_buffer.results.size()); for (auto* res : command_buffer.results) shapes.push_back(res->shape()); cmd_buffer_result_shape = ShapeUtil::MakeTupleShape(shapes); } HloComputation* computation = parent->parent()->AddComputation(std::move(command_buffer.computation), false); HloInstruction* call = parent->AddInstruction(HloInstruction::CreateCall( cmd_buffer_result_shape, command_buffer.arguments, computation)); if (has_single_result) { TF_RETURN_IF_ERROR(command_buffer.results[0]->ReplaceAllUsesWith(call)); } else { for (int i = 0; i < command_buffer.results.size(); i++) { TF_RETURN_IF_ERROR( command_buffer.results[i]->ReplaceAllUsesWith(parent->AddInstruction( HloInstruction::CreateGetTupleElement(call, i)))); } } HloSchedule& schedule = parent->parent()->schedule(); HloInstructionSequence& sequence = schedule.GetOrCreateSequence(parent); sequence.replace_instruction(seq.instructions().back(), call); HloInstructionSequence cmd_buffer_schedule; for (auto* argument : command_buffer.arguments) { cmd_buffer_schedule.push_back(command_buffer.inst_mapping[argument]); } for (auto* inst : seq.instructions()) { cmd_buffer_schedule.push_back(command_buffer.inst_mapping[inst]); } if (!has_single_result) { cmd_buffer_schedule.push_back(computation->root_instruction()); } schedule.set_sequence(computation, cmd_buffer_schedule); auto& inst_mapping = command_buffer.inst_mapping; for (HloInstruction* inst : seq.instructions()) { HloInstruction* cmd_inst = inst_mapping[inst]; for (HloInstruction* predecessor : inst->control_predecessors()) { if (auto it = inst_mapping.find(predecessor); it != inst_mapping.end()) { HloInstruction* cmd_predecessor = it->second; if (IsParameter(cmd_predecessor)) { TF_RETURN_IF_ERROR(predecessor->AddControlDependencyTo(call)); } else { TF_RETURN_IF_ERROR(cmd_predecessor->AddControlDependencyTo(cmd_inst)); } } else { TF_RETURN_IF_ERROR(predecessor->AddControlDependencyTo(call)); } } for (HloInstruction* successor : inst->control_successors()) { if (auto it = inst_mapping.find(successor); it != inst_mapping.end()) { HloInstruction* cmd_successor = it->second; TF_RETURN_IF_ERROR(cmd_inst->AddControlDependencyTo(cmd_successor)); } else { TF_RETURN_IF_ERROR(call->AddControlDependencyTo(successor)); } } TF_RETURN_IF_ERROR(inst->DropAllControlDeps()); } for (int32_t i = seq.instructions().size() - 1; i >= 0; i--) { TF_RETURN_IF_ERROR(parent->RemoveInstruction(seq.instructions()[i])); } return computation; } CommandBufferScheduling::CommandBufferScheduling( const se::DeviceDescription& device_description) : device_description_(device_description) {} absl::StatusOr<bool> CommandBufferScheduling::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { if (!module->has_schedule()) return Internal("module is not scheduled"); const DebugOptions& debug_options = module->config().debug_options(); absl::flat_hash_set<DebugOptions::CommandBufferCmdType> commands; for (auto cmd_type : debug_options.xla_gpu_enable_command_buffer()) { commands.insert(static_cast<DebugOptions::CommandBufferCmdType>(cmd_type)); } absl::flat_hash_set<std::string> legacy_custom_call_targets; for (const auto& target : debug_options.legacy_command_buffer_custom_call_targets()) { legacy_custom_call_targets.insert(target); } CommandBufferConfig config{std::move(commands), std::move(legacy_custom_call_targets), device_description_}; static constexpr auto kRequireConditionals = {DebugOptions::CONDITIONALS}; static constexpr auto kRequireTracing = { DebugOptions::CUBLAS, DebugOptions::CUBLASLT, DebugOptions::CUDNN, DebugOptions::CUSTOM_CALL, DebugOptions::COLLECTIVES}; auto erase = [&](absl::Span<const DebugOptions::CommandBufferCmdType> cmds) { for (auto cmd : cmds) { if (config.enabled_commands.erase(cmd)) { VLOG(1) << "Removed command buffer support for " << DebugOptions::CommandBufferCmdType_Name(cmd) << " as it's not supported with gpu toolkit version " << device_description_.runtime_version() << " and driver version " << device_description_.driver_version() << ". This might negatively impact peformance. To enable " << DebugOptions::CommandBufferCmdType_Name(cmd) << " support in command buffers use cuda-compat package: " #if defined(PLATFORM_GOOGLE) << "set CUDA_COMPAT_LOAD=1 env variable."; #else << "https: #endif } } }; auto erase_cuda = [&](const se::CudaComputeCapability& cuda_comp) { if (std::min(device_description_.runtime_version(), device_description_.driver_version()) < se::SemanticVersion{12, 3, 0}) { erase(kRequireTracing); erase(kRequireConditionals); } }; auto erase_rocm = [&](const se::RocmComputeCapability& rocm_comp) { erase(kRequireConditionals); }; std::visit(VariantVisitor{erase_cuda, erase_rocm}, device_description_.gpu_compute_capability()); auto order = module->MakeComputationPostOrder(); std::reverse(order.begin(), order.end()); absl::flat_hash_set<HloComputation*> processed_command_buffers; auto changed = false; for (HloComputation* comp : order) { if (comp->IsFusionComputation() || comp->IsAsyncComputation() || comp->IsCustomCallComputation()) continue; if (processed_command_buffers.contains(comp)) continue; TF_ASSIGN_OR_RETURN(bool changed_, MoveParametersAndConstantsToFront(comp)); changed |= changed_; std::vector<HloInstructionSequence> sequences = CollectCommandBufferSequences( module->schedule().sequence(comp), config, debug_options.xla_gpu_graph_min_graph_size()); for (const HloInstructionSequence& seq : sequences) { TF_ASSIGN_OR_RETURN(CommandBuffer command_buffer, PrepareCommandBuffer(seq, comp->parent())); TF_ASSIGN_OR_RETURN( HloComputation * command_buffer_computation, RewriteCommandBuffer(comp, seq, std::move(command_buffer))); changed = true; for (HloComputation* called : command_buffer_computation->MakeEmbeddedComputationsList()) { processed_command_buffers.insert(called); } } } TF_RETURN_IF_ERROR(module->schedule().Update()); return changed; } }
#include "xla/service/gpu/transforms/command_buffer_scheduling.h" #include <memory> #include <string> #include <utility> #include <vector> #include <gtest/gtest.h> #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/ir/hlo_schedule.h" #include "xla/service/gpu/gpu_device_info_for_tests.h" #include "xla/service/gpu/gpu_executable.h" #include "xla/service/hlo_parser.h" #include "xla/stream_executor/device_description.h" #include "xla/tests/filecheck.h" #include "xla/tests/hlo_test_base.h" #include "xla/tests/verified_hlo_module.h" #include "xla/tsl/lib/core/status_test_util.h" #include "tsl/platform/status.h" #include "tsl/platform/statusor.h" namespace xla::gpu { namespace { class CommandBufferSchedulingTest : public HloTestBase { public: se::DeviceDescription device_desc() { return TestGpuDeviceInfo::CudaOrRocmDeviceInfo(); } DebugOptions GetDebugOptionsForTest() override { auto debug_options = HloTestBase::GetDebugOptionsForTest(); debug_options.add_xla_gpu_enable_command_buffer(DebugOptions::FUSION); debug_options.add_xla_gpu_enable_command_buffer(DebugOptions::CONDITIONALS); debug_options.add_xla_gpu_enable_command_buffer(DebugOptions::COLLECTIVES); debug_options.add_xla_gpu_enable_command_buffer(DebugOptions::CUDNN); debug_options.add_xla_gpu_enable_command_buffer(DebugOptions::CUBLASLT); debug_options.add_xla_gpu_enable_command_buffer(DebugOptions::CUSTOM_CALL); debug_options.set_xla_gpu_graph_min_graph_size(2); return debug_options; } }; using CommandBuffer = CommandBufferScheduling::CommandBuffer; TEST_F(CommandBufferSchedulingTest, SingleCommandBuffer) { const char* hlo = R"( HloModule TestModule, is_scheduled=true %fused_computation (param_0: s32[], param_1: s32[]) -> s32[] { %p0 = s32[] parameter(0) %p1 = s32[] parameter(1) ROOT %add = s32[] add(s32[] %p0, s32[] %p1) } %fused_computation.1 (param_0: s32[], param_1: s32[]) -> s32[] { %p0 = s32[] parameter(0) %p1 = s32[] parameter(1) ROOT %add = s32[] add(s32[] %p0, s32[] %p1) } ENTRY %main (a: s32[], b: s32[]) -> s32[] { %a = s32[] parameter(0) %b = s32[] parameter(1) %fusion = s32[] fusion(s32[] %a, s32[] %b), kind=kLoop, calls=%fused_computation %fusion.1 = s32[] fusion(s32[] %a, s32[] %b), kind=kLoop, calls=%fused_computation.1 ROOT %custom-call = s32[] custom-call(s32[] %fusion, s32[] %fusion.1), custom_call_target="some target" })"; const char* expected = R"( RunAndFilecheckHloRewrite(hlo, CommandBufferScheduling(device_desc()), expected, [](HloModule* module) { EXPECT_TRUE(module->has_schedule()); TF_CHECK_OK(module->schedule().Verify()); }); } TEST_F(CommandBufferSchedulingTest, MultipleCommandBuffers) { const char* hlo = R"( HloModule TestModule, is_scheduled=true %fused_computation(param_0: s32[], param_1: s32[]) -> s32[] { %p0 = s32[] parameter(0) %p1 = s32[] parameter(1) ROOT %add = s32[] add(s32[] %p0, s32[] %p1) } %fused_computation.1(param_0: s32[], param_1: s32[]) -> s32[] { %p0 = s32[] parameter(0) %p1 = s32[] parameter(1) ROOT %add = s32[] add(s32[] %p0, s32[] %p1) } %fused_computation.2(param_0: s32[], param_1: s32[]) -> s32[] { %p0 = s32[] parameter(0) %p1 = s32[] parameter(1) ROOT %add = s32[] add(s32[] %p0, s32[] %p1) } %fused_computation.3(param_0: s32[], param_1: s32[]) -> s32[] { %p0 = s32[] parameter(0) %p1 = s32[] parameter(1) ROOT %add = s32[] add(s32[] %p0, s32[] %p1) } ENTRY %main (a: s32[], b: s32[], c: (s32[], s32[])) -> s32[] { %a = s32[] parameter(0) %b = s32[] parameter(1) %c = (s32[], s32[]) parameter(2) %fusion = s32[] fusion(s32[] %a, s32[] %b), kind=kLoop, calls=%fused_computation %d = s32[] get-tuple-element((s32[], s32[]) %c), index=0 %fusion.1 = s32[] fusion(s32[] %fusion, s32[] %d), kind=kLoop, calls=%fused_computation.1 %e = s32[] get-tuple-element((s32[], s32[]) %c), index=1 %custom-call = s32[] custom-call(s32[] %fusion.1, s32[] %e), custom_call_target="some target" %fusion.2 = s32[] fusion(s32[] %custom-call, s32[] %a), kind=kLoop, calls=%fused_computation.2 %fusion.3 = s32[] fusion(s32[] %custom-call, s32[] %fusion.2), kind=kLoop, calls=%fused_computation.3 ROOT %custom-call.1 = s32[] custom-call(s32[] %fusion.3), custom_call_target="some target" })"; const char* expected = R"( RunAndFilecheckHloRewrite(hlo, CommandBufferScheduling(device_desc()), expected, [](HloModule* module) { EXPECT_TRUE(module->has_schedule()); TF_CHECK_OK(module->schedule().Verify()); }); } TEST_F(CommandBufferSchedulingTest, AllReduceStartFollowedByDone) { const char* hlo = R"( HloModule TestModule, is_scheduled=true %add (p0: s32[4], p1: s32[4]) -> s32[4] { %p0 = s32[4] parameter(0) %p1 = s32[4] parameter(1) ROOT %add = s32[4] add(s32[4] %p0, s32[4] %p1) } ENTRY %main (a: s32[4]) -> s32[4] { %a = s32[4] parameter(0) %start = s32[4]{0} all-reduce-start(s32[4]{0} %a), replica_groups={{0,1}}, to_apply=%add, backend_config={"collective_backend_config": {"is_sync":true,"no_parallel_custom_call":false}} ROOT %done = s32[4]{0} all-reduce-done(s32[4]{0} %start) })"; const char* expected = R"( CHECK: %command_buffer ([[P0:.+]]: s32[4]) -> s32[4] { CHECK: %[[P0]] = s32[4]{0} parameter(0) CHECK: %[[START:.+]] = s32[4]{0} all-reduce-start(%[[P0]]) CHECK: ROOT %[[DONE:.+]] = s32[4]{0} all-reduce-done(%[[START]]) CHECK: } CHECK: ENTRY %main (a: s32[4]) -> s32[4] { CHECK: %[[A:.+]] = s32[4]{0} parameter(0) CHECK: ROOT %[[CALL:.+]] = s32[4]{0} call(%[[A]]), CHECK: to_apply=%command_buffer CHECK: })"; RunAndFilecheckHloRewrite(hlo, CommandBufferScheduling(device_desc()), expected, [](HloModule* module) { EXPECT_TRUE(module->has_schedule()); TF_CHECK_OK(module->schedule().Verify()); }); } TEST_F(CommandBufferSchedulingTest, AllGatherStartFollowedByDone) { const char* hlo = R"( HloModule TestModule, is_scheduled=true ENTRY %main (a: s32[2]) -> s32[4] { %a = s32[2] parameter(0) %start = (s32[2]{0}, s32[4]{0}) all-gather-start(%a), channel_id=555, replica_groups={{0,1}}, dimensions={0}, backend_config={"collective_backend_config": {"is_sync":true,"no_parallel_custom_call":false}} ROOT %done = s32[4]{0} all-gather-done(%start) })"; const char* expected = R"( CHECK: %command_buffer ([[P0:.+]]: s32[2]) -> s32[4] { CHECK: %[[P0]] = s32[2]{0} parameter(0) CHECK: %[[START:.+]] = {{.*}} all-gather-start(%[[P0]]) CHECK: ROOT %[[DONE:.+]] = s32[4]{0} all-gather-done(%[[START]]) CHECK: } CHECK: ENTRY %main (a: s32[2]) -> s32[4] { CHECK: %[[A:.+]] = s32[2]{0} parameter(0) CHECK: ROOT %[[CALL:.+]] = s32[4]{0} call(%[[A]]), CHECK: to_apply=%command_buffer CHECK: })"; RunAndFilecheckHloRewrite(hlo, CommandBufferScheduling(device_desc()), expected, [](HloModule* module) { EXPECT_TRUE(module->has_schedule()); TF_CHECK_OK(module->schedule().Verify()); }); } TEST_F(CommandBufferSchedulingTest, ReduceScatterStartFollowedByDone) { const char* hlo = R"( HloModule TestModule, is_scheduled=true %add (p0: s32[], p1: s32[]) -> s32[] { %p0 = s32[] parameter(0) %p1 = s32[] parameter(1) ROOT %add = s32[] add(s32[] %p0, s32[] %p1) } ENTRY %main (a: s32[4]) -> s32[2] { %a = s32[4] parameter(0) %start = ((s32[4]{0}), s32[2]{0}) reduce-scatter-start(%a), channel_id=555, replica_groups={{0,1}}, dimensions={0}, to_apply=add, backend_config={"collective_backend_config": {"is_sync":true,"no_parallel_custom_call":false}} ROOT %done = s32[2]{0} reduce-scatter-done(%start) })"; const char* expected = R"( CHECK: %command_buffer ([[P0:.+]]: s32[4]) -> s32[2] { CHECK: %[[P0]] = s32[4]{0} parameter(0) CHECK: %[[START:.+]] = {{.*}} reduce-scatter-start(%[[P0]]) CHECK: ROOT %[[DONE:.+]] = s32[2]{0} reduce-scatter-done(%[[START]]) CHECK: } CHECK: ENTRY %main (a: s32[4]) -> s32[2] { CHECK: %[[A:.+]] = s32[4]{0} parameter(0) CHECK: ROOT %[[CALL:.+]] = s32[2]{0} call(%[[A]]), CHECK: to_apply=%command_buffer CHECK: })"; RunAndFilecheckHloRewrite(hlo, CommandBufferScheduling(device_desc()), expected, [](HloModule* module) { EXPECT_TRUE(module->has_schedule()); TF_CHECK_OK(module->schedule().Verify()); }); } TEST_F(CommandBufferSchedulingTest, AllReduceStartFollowedByBitcast) { const char* hlo = R"( HloModule TestModule, is_scheduled=true %add (p0: s32[4], p1: s32[4]) -> s32[4] { %p0 = s32[4] parameter(0) %p1 = s32[4] parameter(1) ROOT %add = s32[4] add(s32[4] %p0, s32[4] %p1) } ENTRY %main (a: s32[4]) -> s32[4] { %a = s32[4] parameter(0) %start = s32[4]{0} all-reduce-start(s32[4]{0} %a), replica_groups={{0,1}}, to_apply=%add, backend_config={"collective_backend_config": {"is_sync":true,"no_parallel_custom_call":false}} %bitcast = s32[4] bitcast(s32[4]{0} %a) ROOT %done = s32[4]{0} all-reduce-done(s32[4]{0} %start) })"; const char* expected = R"( CHECK: %command_buffer ([[P0:.+]]: s32[4]) -> s32[4] { CHECK: %[[P0]] = s32[4]{0} parameter(0) CHECK: %[[START:.+]] = s32[4]{0} all-reduce-start(%[[P0]]) CHECK: %[[BITCAST:.+]] = s32[4]{0} bitcast(%[[P0]]) CHECK: ROOT %[[DONE:.+]] = s32[4]{0} all-reduce-done(%[[START]]) CHECK: } CHECK: ENTRY %main (a: s32[4]) -> s32[4] { CHECK: %[[A:.+]] = s32[4]{0} parameter(0) CHECK: ROOT %[[CALL:.+]] = s32[4]{0} call(%[[A]]), CHECK: to_apply=%command_buffer CHECK: })"; RunAndFilecheckHloRewrite(hlo, CommandBufferScheduling(device_desc()), expected, [](HloModule* module) { EXPECT_TRUE(module->has_schedule()); TF_CHECK_OK(module->schedule().Verify()); }); } TEST_F(CommandBufferSchedulingTest, AllReduceStartFollowedAllReduceStart) { const char* hlo = R"( HloModule TestModule, is_scheduled=true %add (p0: s32[4], p1: s32[4]) -> s32[4] { %p0 = s32[4] parameter(0) %p1 = s32[4] parameter(1) ROOT %add = s32[4] add(s32[4] %p0, s32[4] %p1) } ENTRY %main (a: s32[4]) -> s32[4] { %a = s32[4] parameter(0) %start1 = s32[4]{0} all-reduce-start(s32[4]{0} %a), replica_groups={{0,1}}, to_apply=%add, backend_config={"collective_backend_config": {"is_sync":true,"no_parallel_custom_call":false}} %start2 = s32[4]{0} all-reduce-start(s32[4]{0} %a), replica_groups={{0,1}}, to_apply=%add, backend_config={"collective_backend_config": {"is_sync":true,"no_parallel_custom_call":false}} %done1 = s32[4]{0} all-reduce-done(s32[4]{0} %start1) ROOT %done2 = s32[4]{0} all-reduce-done(s32[4]{0} %start2) })"; const char* expected = R"( CHECK: %command_buffer ([[P0:.+]]: s32[4]) -> s32[4] { CHECK: %[[P0]] = s32[4]{0} parameter(0) CHECK: %[[START1:.+]] = s32[4]{0} all-reduce-start(%[[P0]]) CHECK: %[[START2:.+]] = s32[4]{0} all-reduce-start(%[[P0]]) CHECK: %[[DONE1:.+]] = s32[4]{0} all-reduce-done(%[[START1]]) CHECK: ROOT %[[DONE2:.+]] = s32[4]{0} all-reduce-done(%[[START2]]) CHECK: } CHECK: ENTRY %main (a: s32[4]) -> s32[4] { CHECK: %[[A:.+]] = s32[4]{0} parameter(0) CHECK: ROOT %[[CALL:.+]] = s32[4]{0} call(%[[A]]), CHECK: to_apply=%command_buffer CHECK: })"; RunAndFilecheckHloRewrite(hlo, CommandBufferScheduling(device_desc()), expected, [](HloModule* module) { EXPECT_TRUE(module->has_schedule()); TF_CHECK_OK(module->schedule().Verify()); }); } TEST_F(CommandBufferSchedulingTest, DoNotCaptureUnmatchedAsyncDone) { const char* hlo = R"( HloModule TestModule, is_scheduled=true %fused_computation(param_0: s32[], param_1: s32[]) -> s32[] { %p0 = s32[] parameter(0) %p1 = s32[] parameter(1) ROOT %add = s32[] add(s32[] %p0, s32[] %p1) } %fused_computation.1(param_0: s32[], param_1: s32[]) -> s32[] { %p0 = s32[] parameter(0) %p1 = s32[] parameter(1) ROOT %add = s32[] add(s32[] %p0, s32[] %p1) } %add (p0: s32[4], p1: s32[4]) -> s32[4] { %p0 = s32[4] parameter(0) %p1 = s32[4] parameter(1) ROOT %add = s32[4] add(s32[4] %p0, s32[4] %p1) } ENTRY %main (a: s32[4], b:s32[]) -> s32[] { %a = s32[4] parameter(0) %b = s32[] parameter(1) %start1 = s32[4]{0} all-reduce-start(s32[4]{0} %a), replica_groups={{0,1}}, to_apply=%add, backend_config={"collective_backend_config": {"is_sync":true,"no_parallel_custom_call":false}} %c = s32[] custom-call(), custom_call_target="target" %start2 = s32[4]{0} all-reduce-start(s32[4]{0} %a), replica_groups={{0,1}}, to_apply=%add, backend_config={"collective_backend_config": {"is_sync":true,"no_parallel_custom_call":false}} %done1 = s32[4]{0} all-reduce-done(s32[4]{0} %start1) %done2 = s32[4]{0} all-reduce-done(s32[4]{0} %start2) %fusion = s32[] fusion(s32[] %b, s32[] %c), kind=kLoop, calls=%fused_computation ROOT %fusion.1 = s32[] fusion(s32[] %b, s32[] %c), kind=kLoop, calls=%fused_computation.1 })"; const char* expected = R"( CHECK: %command_buffer ([[P0:.+]]: s32[], [[P1:.+]]: s32[]) -> s32[] { CHECK: %[[P0]] = s32[] parameter(0) CHECK: %[[P1]] = s32[] parameter(1) CHECK: %fusion = s32[] fusion(%[[P0]], %[[P1]]), kind=kLoop, calls=%fused_computation CHECK: ROOT %fusion.1 = s32[] fusion(%[[P0]], %[[P1]]), kind=kLoop, calls=%fused_computation.1 CHECK: } CHECK: ENTRY %main (a: s32[4], b: s32[]) -> s32[] { CHECK: %[[A:.+]] = s32[4]{0} parameter(0) CHECK: %[[B:.+]] = s32[] parameter(1) CHECK: %[[START1:.+]] = s32[4]{0} all-reduce-start(%[[A]]) CHECK: %[[C:.+]] = s32[] custom-call() CHECK: %[[START2:.+]] = s32[4]{0} all-reduce-start(%[[A]]) CHECK: %[[DONE1:.+]] = s32[4]{0} all-reduce-done(%[[START1]]) CHECK: %[[DONE2:.+]] = s32[4]{0} all-reduce-done(%[[START2]]) CHECK: %call = s32[] call(%b, %c), to_apply=%command_buffer CHECK: })"; RunAndFilecheckHloRewrite(hlo, CommandBufferScheduling(device_desc()), expected, [](HloModule* module) { EXPECT_TRUE(module->has_schedule()); TF_CHECK_OK(module->schedule().Verify()); }); } TEST_F(CommandBufferSchedulingTest, CollectCommandBufferSequence) { const char* hlo = R"( HloModule TestModule, is_scheduled=true %fused_computation(param_0: s32[], param_1: s32[]) -> s32[] { %p0 = s32[] parameter(0) %p1 = s32[] parameter(1) ROOT %add = s32[] add(s32[] %p0, s32[] %p1) } %fused_computation.1(param_0: s32[], param_1: s32[]) -> s32[] { %p0 = s32[] parameter(0) %p1 = s32[] parameter(1) ROOT %add = s32[] add(s32[] %p0, s32[] %p1) } %fused_computation.2(param_0: s32[], param_1: s32[]) -> s32[] { %p0 = s32[] parameter(0) %p1 = s32[] parameter(1) ROOT %add = s32[] add(s32[] %p0, s32[] %p1) } %fused_computation.3(param_0: s32[], param_1: s32[]) -> s32[] { %p0 = s32[] parameter(0) %p1 = s32[] parameter(1) ROOT %add = s32[] add(s32[] %p0, s32[] %p1) } ENTRY %main (a: s32[], b: s32[], c: (s32[], s32[])) -> s32[] { %a = s32[] parameter(0) %b = s32[] parameter(1) %c = (s32[], s32[]) parameter(2) %fusion = s32[] fusion(s32[] %a, s32[] %b), kind=kLoop, calls=%fused_computation %d = s32[] get-tuple-element((s32[], s32[]) %c), index=0 %fusion.1 = s32[] fusion(s32[] %fusion, s32[] %d), kind=kLoop, calls=%fused_computation.1 %e = s32[] get-tuple-element((s32[], s32[]) %c), index=1 %custom-call = s32[] custom-call(s32[] %fusion.1, s32[] %e), custom_call_target="some target" %fusion.2 = s32[] fusion(s32[] %custom-call, s32[] %a), kind=kLoop, calls=%fused_computation.2 ROOT %fusion.3 = s32[] fusion(s32[] %custom-call, s32[] %fusion.2), kind=kLoop, calls=%fused_computation.3 })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(hlo)); HloInstructionSequence seq; for (HloInstruction* x : module->entry_computation()->instructions()) { seq.push_back(x); } EXPECT_EQ(seq.size(), 10); CommandBufferScheduling::CommandBufferConfig config{ {DebugOptions::FUSION}, {}, device_desc()}; std::vector<HloInstructionSequence> command_buffer_sequences = CommandBufferScheduling::CollectCommandBufferSequences(seq, config); EXPECT_EQ(command_buffer_sequences.size(), 2); std::vector<HloInstruction*> seq_0 = command_buffer_sequences[0].instructions(); EXPECT_EQ(seq_0.size(), 3); EXPECT_EQ(seq_0[0]->opcode(), HloOpcode::kFusion); EXPECT_EQ(seq_0[1]->opcode(), HloOpcode::kGetTupleElement); EXPECT_EQ(seq_0[2]->opcode(), HloOpcode::kFusion); std::vector<HloInstruction*> seq_1 = command_buffer_sequences[1].instructions(); EXPECT_EQ(seq_1.size(), 2); EXPECT_EQ(seq_1[0]->opcode(), HloOpcode::kFusion); EXPECT_EQ(seq_1[1]->opcode(), HloOpcode::kFusion); } TEST_F(CommandBufferSchedulingTest, MoveParametersToFront) { const char* hlo = R"( HloModule TestModule, is_scheduled=true %fused_computation (param_0: s32[], param_1: s32[]) -> s32[] { %p0 = s32[] parameter(0) %p1 = s32[] parameter(1) ROOT %add = s32[] add(s32[] %p0, s32[] %p1) } %fused_computation.1 (param_0: s32[], param_1: s32[]) -> s32[] { %p0 = s32[] parameter(0) %p1 = s32[] parameter(1) ROOT %add = s32[] add(s32[] %p0, s32[] %p1) } ENTRY %main (a: s32[], b: s32[], c: s32[]) -> s32[] { %a = s32[] parameter(0) %b = s32[] parameter(1) %fusion = s32[] fusion(s32[] %a, s32[] %b), kind=kLoop, calls=%fused_computation %c = s32[] parameter(2) ROOT %fusion.1 = s32[] fusion(s32[] %a, s32[] %c), kind=kLoop, calls=%fused_computation.1 })"; const char* expected = R"( TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(hlo)); TF_ASSERT_OK(CommandBufferScheduling::MoveParametersAndConstantsToFront( module->entry_computation())); TF_ASSERT_OK_AND_ASSIGN( bool filecheck_matches, RunFileCheck( module->ToString(HloPrintOptions{}.set_print_operand_shape(false)), expected)); EXPECT_TRUE(filecheck_matches); } TEST_F(CommandBufferSchedulingTest, PrepareCommandBuffer) { const char* hlo = R"( HloModule TestModule, is_scheduled=true %fused_computation(param_0: s32[], param_1: s32[]) -> (s32[], s32[]) { %p0 = s32[] parameter(0) %p1 = s32[] parameter(1) ROOT %tuple.1 = (s32[], s32[]) tuple(s32[] %p0, s32[] %p1) } %fused_computation.1(param_0: s32[], param_1: s32[]) -> s32[] { %p0 = s32[] parameter(0) %p1 = s32[] parameter(1) ROOT %add = s32[] add(s32[] %p0, s32[] %p1) } ENTRY %main (a: s32[], b: s32[]) -> s32[] { %a = s32[] parameter(0) %b = s32[] custom-call(), custom_call_target="target" %fusion = (s32[], s32[]) fusion(s32[] %a, s32[] %b), kind=kLoop, calls=%fused_computation %d = s32[] get-tuple-element((s32[], s32[]) %fusion), index=0 %fusion.1 = s32[] fusion(s32[] %a, s32[] %d), kind=kLoop, calls=%fused_computation.1 ROOT %custom-call = s32[] custom-call(s32[] %fusion.1, s32[] %d), custom_call_target="some target" })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnUnverifiedModule(hlo)); EXPECT_EQ(module->entry_computation()->instruction_count(), 6); std::vector<HloInstruction*> instructions; HloInstructionSequence seq; for (HloInstruction* inst : module->entry_computation()->instructions()) { if (inst->opcode() == HloOpcode::kFusion || inst->opcode() == HloOpcode::kGetTupleElement) { seq.push_back(inst); } instructions.push_back(inst); } TF_ASSERT_OK_AND_ASSIGN( CommandBuffer command_buffer, CommandBufferScheduling::PrepareCommandBuffer(seq, module.get())); HloComputation* computation = module->AddComputation( std::move(command_buffer.computation), false); const char* expected = R"( TF_ASSERT_OK_AND_ASSIGN( bool filecheck_matches, RunFileCheck(computation->ToString( HloPrintOptions{}.set_print_operand_shape(false)), expected)); EXPECT_TRUE(filecheck_matches); auto& arguments = command_buffer.arguments; ASSERT_EQ(arguments.size(), 2); EXPECT_EQ(arguments[0], instructions[0]); EXPECT_EQ(arguments[1], instructions[1]); auto& results = command_buffer.results; ASSERT_EQ(results.size(), 2); EXPECT_EQ(results[0], instructions[3]); EXPECT_EQ(results[1], instructions[4]); } TEST_F(CommandBufferSchedulingTest, ForwardControlDependencies) { const char* hlo = R"( HloModule TestModule, is_scheduled=true %fused_computation (param_0: s32[], param_1: s32[]) -> s32[] { %p0 = s32[] parameter(0) %p1 = s32[] parameter(1) ROOT %add = s32[] add(s32[] %p0, s32[] %p1) } %fused_computation.1 (param_0: s32[], param_1: s32[]) -> s32[] { %p0 = s32[] parameter(0) %p1 = s32[] parameter(1) ROOT %add = s32[] add(s32[] %p0, s32[] %p1) } %fused_computation.2 (param_0: s32[], param_1: s32[]) -> s32[] { %p0 = s32[] parameter(0) %p1 = s32[] parameter(1) ROOT %add = s32[] add(s32[] %p0, s32[] %p1) } ENTRY %main (a: s32[], b: s32[]) -> s32[] { %a = s32[] parameter(0) %b = s32[] parameter(1) %custom-call = s32[] custom-call(), custom_call_target="some target" %fusion = s32[] fusion(s32[] %a, s32[] %b), kind=kLoop, calls=%fused_computation, control-predecessors={%custom-call} %fusion.1 = s32[] fusion(s32[] %a, s32[] %b), kind=kLoop, calls=%fused_computation.1, control-predecessors={%fusion} %custom-call.1 = s32[] custom-call(), custom_call_target="some target" %fusion.2 = s32[] fusion(s32[] %a, s32[] %b), kind=kLoop, calls=%fused_computation.2, control-predecessors={%fusion.1} ROOT %custom-call.2 = s32[] custom-call(s32[] %fusion.1, s32[] %fusion.2), custom_call_target="some target" })"; const char* expected = R"( CHECK: %command_buffer ([[P0:.+]]: s32[], [[P1:.+]]: s32[]) -> s32[] { CHECK: %[[P0]] = s32[] parameter(0) CHECK: %[[P1]] = s32[] parameter(1) CHECK: %[[F0:.+]] = s32[] fusion(%[[P0]], %[[P1]]) CHECK: ROOT {{.*}} = s32[] fusion(%[[P0]], %[[P1]]), {{.*}} control-predecessors={%[[F0]]} CHECK: } CHECK: ENTRY %main (a: s32[], b: s32[]) -> s32[] { CHECK: %a = s32[] parameter(0) CHECK: %b = s32[] parameter(1) CHECK: %custom-call = s32[] custom-call(), custom_call_target="some target" CHECK: %call = s32[] call(%a, %b), to_apply=%command_buffer, control-predecessors={%custom-call} CHECK: %custom-call.1 = s32[] custom-call(), custom_call_target="some target" CHECK: %[[F3:.+]] = s32[] fusion(%a, %b), kind=kLoop, calls=%fused_computation.2, control-predecessors={%call} CHECK: ROOT %custom-call.2 = s32[] custom-call(%call, %[[F3]]), custom_call_target="some target" CHECK: })"; RunAndFilecheckHloRewrite(hlo, CommandBufferScheduling(device_desc()), expected, [](HloModule* module) { EXPECT_TRUE(module->has_schedule()); TF_CHECK_OK(module->schedule().Verify()); }); } TEST_F(CommandBufferSchedulingTest, ForwardControlDependenciesToParams) { const char* hlo = R"( HloModule TestModule, is_scheduled=true %fused_computation.0 (p0: s32[], p1: s32[]) -> s32[] { %p0 = s32[] parameter(0) %p1 = s32[] parameter(1) ROOT %add = s32[] add(s32[] %p0, s32[] %p1) } %fused_computation.1 (p0: s32[], p1: s32[]) -> s32[] { %p0 = s32[] parameter(0) %p1 = s32[] parameter(1) ROOT %add = s32[] add(s32[] %p0, s32[] %p1) } ENTRY %main (a: s32[], b: s32[]) -> s32[] { %a = s32[] parameter(0) %b = s32[] parameter(1) %custom-call = s32[] custom-call(), custom_call_target="some target" %fusion = s32[] fusion(s32[] %custom-call, s32[] %a), kind=kLoop, calls=%fused_computation.0, control-predecessors={%custom-call} ROOT %fusion.1 = s32[] fusion(s32[] %fusion, s32[] %b), kind=kLoop, calls=%fused_computation.1 })"; const char* expected = R"( CHECK: ENTRY %main (a: s32[], b: s32[]) -> s32[] { CHECK: %a = s32[] parameter(0) CHECK: %b = s32[] parameter(1) CHECK: %[[CUSTOM_CALL:.+]] = s32[] custom-call(), custom_call_target="some target" CHECK: ROOT {{.*}} call(%[[CUSTOM_CALL]], %a, %b), to_apply=%command_buffer, control-predecessors={%[[CUSTOM_CALL]]} CHECK: })"; RunAndFilecheckHloRewrite(hlo, CommandBufferScheduling(device_desc()), expected, [](HloModule* module) { EXPECT_TRUE(module->has_schedule()); TF_CHECK_OK(module->schedule().Verify()); }); } TEST_F(CommandBufferSchedulingTest, WhileNotCommand) { const char* hlo = R"( HloModule TestModule, is_scheduled=true %fused_computation (param_0: f32[1]) -> f32[1] { %param_0 = f32[1]{0} parameter(0) ROOT %copy.5 = f32[1]{0} copy(f32[1]{0} %param_0) } %fused_computation.1 (param_0.1: f32[1], param_1: f32[1]) -> f32[1] { %param_0.1 = f32[1]{0} parameter(0) %param_1 = f32[1]{0} parameter(1) ROOT %add.2 = f32[1]{0} add(f32[1]{0} %param_0.1, f32[1]{0} %param_1) } %fused_computation.2 (param_0.2: f32[1], param_1.1: f32[1]) -> pred[1] { %param_0.2 = f32[1]{0} parameter(0) %param_1.1 = f32[1]{0} parameter(1) ROOT %compare.3 = pred[1]{0} compare(f32[1]{0} %param_0.2, f32[1]{0} %param_1.1), direction=LT } %fused_computation.3 (param_0.1: f32[1], param_1: f32[1]) -> f32[1] { %param_0.1 = f32[1]{0} parameter(0) %param_1 = f32[1]{0} parameter(1) ROOT %add.2 = f32[1]{0} add(f32[1]{0} %param_0.1, f32[1]{0} %param_1) } %body (Arg_.3: f32[1]) -> f32[1] { %constant_4 = f32[1]{0} constant({1}) %Arg_.3 = f32[1]{0} parameter(0) %custom-call = s32[] custom-call(), custom_call_target="some target" %add = f32[1]{0} fusion(f32[1]{0} %Arg_.3, f32[1]{0} %constant_4), kind=kLoop, calls=%fused_computation.1, control-predecessors={%custom-call} ROOT %wrapped_add.1 = f32[1]{0} fusion(f32[1]{0} %add, f32[1]{0} %constant_4), kind=kLoop, calls=%fused_computation.3, control-predecessors={%custom-call} } %cond (Arg_.11: f32[1]) -> pred[] { %constant = f32[1]{0} constant({100}) %Arg_.11 = f32[1]{0} parameter(0) %wrapped_compare.2 = pred[1]{0} fusion(f32[1]{0} %Arg_.11, f32[1]{0} %constant), kind=kLoop, calls=%fused_computation.2 ROOT %bitcast = pred[] bitcast(pred[1]{0} %wrapped_compare.2) } ENTRY %main.18 (Arg_0.1: f32[1]) -> f32[] { %Arg_0.1 = f32[1]{0} parameter(0), sharding={replicated} %wrapped_copy.4 = f32[1]{0} fusion(f32[1]{0} %Arg_0.1), kind=kLoop, calls=%fused_computation %while.16 = f32[1]{0} while(f32[1]{0} %wrapped_copy.4), condition=%cond, body=%body ROOT %bitcast.1 = f32[] bitcast(f32[1]{0} %while.16) })"; const char* expected = R"( CHECK: %command_buffer ([[P0:.+]]: f32[1], [[P1:.+]]: f32[1]) -> f32[1] { CHECK: %[[P0]] = f32[1]{0} parameter(0) CHECK: %[[P1]] = f32[1]{0} parameter(1) CHECK: %[[ADD:.*]] = f32[1]{0} fusion(%[[P0]], %[[P1]]), kind=kLoop CHECK: ROOT {{.*}} = f32[1]{0} fusion(%[[ADD]], %[[P1]]), kind=kLoop CHECK: } CHECK: %[[BODY:[a-z_0-9.]+]] ([[P0:.+]]: f32[1]) -> f32[1] { CHECK: %[[C1:.*]] = f32[1]{0} constant({1}) CHECK: %[[P0]] = f32[1]{0} parameter(0) CHECK: %[[CC:.*]] = s32[] custom-call(), custom_call_target="some target" CHECK: ROOT %call = f32[1]{0} call(%[[P0]], %[[C1]]), to_apply=%command_buffer, control-predecessors={%[[CC]]} CHECK: } CHECK: ENTRY %[[MAIN:.+]] ([[ARG0:.+]]: f32[1]) -> f32[] { CHECK: %[[ARG0]] = f32[1]{0} parameter(0) CHECK: %[[COPY:.*]] = f32[1]{0} fusion(%[[ARG0]]), kind=kLoop CHECK: %[[WHILE:.*]] = f32[1]{0} while(%[[COPY]]), condition=%[[COND:[a-z_0-9.]+]], body=%[[BODY]] CHECK: ROOT %[[BC:.+]] = f32[] bitcast(%[[WHILE]]) CHECK: })"; RunAndFilecheckHloRewrite(hlo, CommandBufferScheduling(device_desc()), expected, [](HloModule* module) { EXPECT_TRUE(module->has_schedule()); TF_CHECK_OK(module->schedule().Verify()); }); } TEST_F(CommandBufferSchedulingTest, While) { const char* hlo = R"( HloModule TestModule, is_scheduled=true %fused_computation (param_0: f32[1]) -> f32[1] { %param_0 = f32[1]{0} parameter(0) ROOT %copy.5 = f32[1]{0} copy(f32[1]{0} %param_0) } %fused_computation.1 (param_0.1: f32[1], param_1: f32[1]) -> f32[1] { %param_0.1 = f32[1]{0} parameter(0) %param_1 = f32[1]{0} parameter(1) ROOT %add.2 = f32[1]{0} add(f32[1]{0} %param_0.1, f32[1]{0} %param_1) } %fused_computation.2 (param_0.2: f32[1], param_1.1: f32[1]) -> pred[1] { %param_0.2 = f32[1]{0} parameter(0) %param_1.1 = f32[1]{0} parameter(1) ROOT %compare.3 = pred[1]{0} compare(f32[1]{0} %param_0.2, f32[1]{0} %param_1.1), direction=LT } %body (Arg_.3: f32[1]) -> f32[1] { %constant_4 = f32[1]{0} constant({1}) %Arg_.3 = f32[1]{0} parameter(0) ROOT %wrapped_add.1 = f32[1]{0} fusion(f32[1]{0} %Arg_.3, f32[1]{0} %constant_4), kind=kLoop, calls=%fused_computation.1 } %cond (Arg_.11: f32[1]) -> pred[] { %constant = f32[1]{0} constant({100}) %Arg_.11 = f32[1]{0} parameter(0) %wrapped_compare.2 = pred[1]{0} fusion(f32[1]{0} %Arg_.11, f32[1]{0} %constant), kind=kLoop, calls=%fused_computation.2 ROOT %bitcast = pred[] bitcast(pred[1]{0} %wrapped_compare.2) } ENTRY %main.18 (Arg_0.1: f32[1]) -> f32[] { %Arg_0.1 = f32[1]{0} parameter(0), sharding={replicated} %wrapped_copy.4 = f32[1]{0} fusion(f32[1]{0} %Arg_0.1), kind=kLoop, calls=%fused_computation %while.16 = f32[1]{0} while(f32[1]{0} %wrapped_copy.4), condition=%cond, body=%body ROOT %bitcast.1 = f32[] bitcast(f32[1]{0} %while.16) })"; const char* expected = R"( CHECK: %command_buffer ([[P0:.+]]: f32[1]) -> f32[1] { CHECK: %[[P0]] = f32[1]{0} parameter(0) CHECK: %[[COPY:.*]] = f32[1]{0} fusion(%[[P0]]), kind=kLoop CHECK: ROOT {{.*}} = f32[1]{0} while(%[[COPY]]), condition=%[[COND:[a-z_0-9.]+]], body=%[[BODY:[a-z_0-9.]+]] CHECK: } CHECK: ENTRY %[[MAIN:.+]] ([[ARG0:.+]]: f32[1]) -> f32[] { CHECK: %[[ARG0]] = f32[1]{0} parameter(0) CHECK: %call = f32[1]{0} call(%[[ARG0]]), to_apply=%command_buffer CHECK: ROOT %[[BC:.+]] = f32[] bitcast(%call) CHECK: })"; RunAndFilecheckHloRewrite(hlo, CommandBufferScheduling(device_desc()), expected, [](HloModule* module) { EXPECT_TRUE(module->has_schedule()); TF_CHECK_OK(module->schedule().Verify()); }); } TEST_F(CommandBufferSchedulingTest, Conditional) { const char* hlo = R"( HloModule TestModule, is_scheduled=true %fused_computation.1 (param_0.2: s32[5]) -> s32[5] { %param_0.2 = s32[5]{0} parameter(0) ROOT %negate.2 = s32[5]{0} negate(s32[5]{0} %param_0.2) } %region_0.7 (Arg_.8: s32[5]) -> (s32[5]) { %Arg_.8 = s32[5]{0} parameter(0) %wrapped_negate.1 = s32[5]{0} fusion(s32[5]{0} %Arg_.8), kind=kLoop, calls=%fused_computation.1 ROOT %tuple.3 = (s32[5]{0}) tuple(s32[5]{0} %wrapped_negate.1) } %fused_computation.2 (param_0.3: s32[5]) -> s32[5] { %param_0.3 = s32[5]{0} parameter(0) ROOT %not.2 = s32[5]{0} not(s32[5]{0} %param_0.3) } %region_1.10 (Arg_.11: s32[5]) -> (s32[5]) { %Arg_.11 = s32[5]{0} parameter(0) %wrapped_not.1 = s32[5]{0} fusion(s32[5]{0} %Arg_.11), kind=kLoop, calls=%fused_computation.2 ROOT %tuple.4 = (s32[5]{0}) tuple(s32[5]{0} %wrapped_not.1) } %fused_computation.3 (param_0.4: s32[5]) -> s32[5] { %param_0.4 = s32[5]{0} parameter(0) ROOT %multiply.2 = s32[5]{0} multiply(s32[5]{0} %param_0.4, s32[5]{0} %param_0.4) } %region_2.13 (Arg_.14: s32[5]) -> (s32[5]) { %Arg_.14 = s32[5]{0} parameter(0) %wrapped_multiply.1 = s32[5]{0} fusion(s32[5]{0} %Arg_.14), kind=kLoop, calls=%fused_computation.3 ROOT %tuple.5 = (s32[5]{0}) tuple(s32[5]{0} %wrapped_multiply.1) } %fused_computation (param_0.1: s64[]) -> s32[] { %constant_1 = s32[] constant(0) %param_0.1 = s64[] parameter(0) %convert.2 = s32[] convert(s64[] %param_0.1) %constant_0 = s32[] constant(2) ROOT %clamp.2 = s32[] clamp(s32[] %constant_1, s32[] %convert.2, s32[] %constant_0) } ENTRY %main.17 (Arg_0.1: s64[], Arg_1.2: s32[5]) -> s32[5] { %Arg_0.1 = s64[] parameter(0), sharding={replicated} %fusion = s32[] fusion(s64[] %Arg_0.1), kind=kLoop, calls=%fused_computation %Arg_1.2 = s32[5]{0} parameter(1), sharding={replicated} %conditional.16.clone = (s32[5]{0}) conditional(s32[] %fusion, s32[5]{0} %Arg_1.2, s32[5]{0} %Arg_1.2, s32[5]{0} %Arg_1.2), branch_computations={%region_0.7, %region_1.10, %region_2.13} ROOT %get-tuple-element = s32[5]{0} get-tuple-element((s32[5]{0}) %conditional.16.clone), index=0 })"; const char* expected = R"( CHECK: %command_buffer ([[P0:.+]]: s64[], [[P1:.+]]: s32[5]) -> (s32[5]) { CHECK: %[[P0]] = s64[] parameter(0) CHECK: %[[P1]] = s32[5]{0} parameter(1) CHECK: %[[FUSION:.*]] = s32[] fusion(%[[P0]]), kind=kLoop CHECK: ROOT {{.*}} = (s32[5]{0}) conditional(%[[FUSION]], %[[P1]], %[[P1]], %[[P1]]), branch_computations={%[[B1:[a-z_0-9.]+]], %[[B2:[a-z_0-9.]+]], %[[B3:[a-z_0-9.]+]]} CHECK: } CHECK: ENTRY %[[MAIN:.+]] ([[ARG0:.+]]: s64[], [[ARG1:.+]]: s32[5]) -> s32[5] { CHECK: %[[ARG0]] = s64[] parameter(0) CHECK: %[[ARG1]] = s32[5]{0} parameter(1) CHECK: %call = (s32[5]{0}) call(%[[ARG0]], %[[ARG1]]), to_apply=%command_buffer CHECK: ROOT %[[GEP:.+]] = s32[5]{0} get-tuple-element(%call) CHECK: })"; RunAndFilecheckHloRewrite(hlo, CommandBufferScheduling(device_desc()), expected, [](HloModule* module) { EXPECT_TRUE(module->has_schedule()); TF_CHECK_OK(module->schedule().Verify()); }); } TEST_F(CommandBufferSchedulingTest, CuDnnFusionGraphCaptureWorks) { const std::string kHloText = R"( HloModule m, is_scheduled=true fusion0 { p0 = f32[64,64] parameter(0) p1 = f32[64,64] parameter(1) ROOT d = f32[64,64] dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} } fusion1 { p0 = f32[64,64] parameter(0) p1 = f32[64,64] parameter(1) ROOT d = f32[64,64] dot(p0, p1), lhs_contracting_dims={0}, rhs_contracting_dims={1} } fusion_a { p0 = f32[64,64] parameter(0) p1 = f32[64,64] parameter(1) ROOT a = f32[64,64] add(p0, p1) } ENTRY e { p0 = f32[64,64] parameter(0) p1 = f32[64,64] parameter(1) d0 = f32[64,64] fusion(p0, p1), kind=kCustom, calls=fusion0, backend_config={"fusion_backend_config": {"kind":"__cudnn$fusion"}} a = f32[64,64] fusion(d0, d0), kind=kLoop, calls=fusion_a ROOT d1 = f32[64,64] fusion(a, p1), kind=kCustom, calls=fusion1, backend_config={"fusion_backend_config": {"kind":"__cudnn$fusion"}} })"; const std::string kExpected = R"( ; CHECK: ENTRY ; CHECK-NEXT: parameter ; CHECK-NEXT: parameter ; CHECK-NEXT: ROOT ; CHECK-SAME: call( ; CHECK-SAME: to_apply=%command_buffer })"; RunAndFilecheckHloRewrite(kHloText, CommandBufferScheduling(device_desc()), kExpected, [](HloModule* module) { EXPECT_TRUE(module->has_schedule()); TF_CHECK_OK(module->schedule().Verify()); }); } TEST_F(CommandBufferSchedulingTest, AsyncCustomCall) { const char* hlo = R"( HloModule m, is_scheduled=true ENTRY %main (a: s32[], b: s32[]) -> f32[2,2] { %p = f32[2,2]{1,0} parameter(0) %start1 = ((f32[2,2], f32[2,2]), (f32[2,2], s8[4]), u32[]) custom-call-start(f32[2,2] %p, f32[2,2] %p), custom_call_target="__cublas$gemm" %start2 = ((f32[2,2], f32[2,2]), (f32[2,2], s8[4]), u32[]) custom-call-start(f32[2,2] %p, f32[2,2] %p), custom_call_target="__cublas$gemm" %done1 = (f32[2,2], s8[4]) custom-call-done(((f32[2,2], f32[2,2]), (f32[2,2], s8[4]), u32[]) %start1) %done2 = (f32[2,2], s8[4]) custom-call-done(((f32[2,2], f32[2,2]), (f32[2,2], s8[4]), u32[]) %start2) %result1 = f32[2,2] get-tuple-element((f32[2,2], s8[4]) %done1), index=0 %result2 = f32[2,2] get-tuple-element((f32[2,2], s8[4]) %done2), index=0 ROOT %sum = f32[2,2] add(f32[2,2] %result1, f32[2,2] %result2) })"; const char* expected = R"( CHECK: %command_buffer ([[P:.+]]: f32[2,2]) -> ((f32[2,2], s8[4]), (f32[2,2], s8[4])) { CHECK: %[[P]] = f32[2,2]{1,0} parameter(0) CHECK: %[[S1:.+]] = ((f32[2,2]{1,0}, f32[2,2]{1,0}), (f32[2,2]{1,0}, s8[4]{0}), u32[]) custom-call-start(%[[P]], %[[P]]), custom_call_target="__cublas$gemm" CHECK: %[[S2:.+]] = ((f32[2,2]{1,0}, f32[2,2]{1,0}), (f32[2,2]{1,0}, s8[4]{0}), u32[]) custom-call-start(%[[P]], %[[P]]), custom_call_target="__cublas$gemm" CHECK: %[[D1:.+]] = (f32[2,2]{1,0}, s8[4]{0}) custom-call-done(%[[S1]]) CHECK: %[[D2:.+]] = (f32[2,2]{1,0}, s8[4]{0}) custom-call-done(%[[S2]]) CHECK: ROOT %[[T:.+]] = ((f32[2,2]{1,0}, s8[4]{0}), (f32[2,2]{1,0}, s8[4]{0})) tuple(%[[D1]], %[[D2]]) CHECK: })"; RunAndFilecheckHloRewrite(hlo, CommandBufferScheduling(device_desc()), expected, [](HloModule* module) { EXPECT_TRUE(module->has_schedule()); TF_CHECK_OK(module->schedule().Verify()); }); } TEST_F(CommandBufferSchedulingTest, AsyncFusion) { const char* hlo = R"( HloModule m, is_scheduled=true add0 { %p0 = s32[] parameter(0) %p1 = s32[] parameter(1) ROOT %add = s32[] add(s32[] %p0, s32[] %p1) } add1 { %p0 = s32[] parameter(0) %p1 = s32[] parameter(1) ROOT %add = s32[] add(s32[] %p0, s32[] %p1) } ENTRY main { %a = s32[] parameter(0) %b = s32[] parameter(1) %start1 = ((s32[], s32[]), s32[], u32[]) fusion-start(%a, %b), kind=kLoop, calls=add0 %start2 = ((s32[], s32[]), s32[], u32[]) fusion-start(%a, %b), kind=kLoop, calls=add1 %done1 = s32[] fusion-done(%start1) %done2 = s32[] fusion-done(%start2) ROOT %tuple = (s32[], s32[]) tuple(%done1, %done2) })"; const char* expected = R"( CHECK: %command_buffer {{.*}} -> (s32[], s32[]) { CHECK: %[[S1:.+]] = ((s32[], s32[]), s32[], u32[]) fusion-start CHECK: %[[S2:.+]] = ((s32[], s32[]), s32[], u32[]) fusion-start CHECK: %[[D1:.+]] = s32[] fusion-done(%[[S1]]) CHECK: %[[D2:.+]] = s32[] fusion-done(%[[S2]]) CHECK: ROOT {{.*}} = (s32[], s32[]) tuple(%[[D1]], %[[D2]]) CHECK: })"; RunAndFilecheckHloRewrite(hlo, CommandBufferScheduling(device_desc()), expected, [](HloModule* module) { EXPECT_TRUE(module->has_schedule()); TF_CHECK_OK(module->schedule().Verify()); }); } TEST_F(CommandBufferSchedulingTest, AsyncAlltoAll) { const char* hlo = R"( HloModule m, is_scheduled=true async_computation.1 { param.1 = f32[4,8,128]{2,1,0} parameter(0) ROOT all-to-all.1 = f32[4,8,128]{2,1,0} all-to-all(param.1), channel_id=1, dimensions={1} } ENTRY main { param.0 = f32[4,8,128]{2,1,0} parameter(0) all-to-all-start = ((f32[4,8,128]{2,1,0}), f32[4,8,128]{2,1,0}) async-start(param.0), calls=async_computation.1 ROOT all-to-all-done = f32[4,8,128]{2,1,0} async-done(all-to-all-start) })"; const char* expected = R"( CHECK: %command_buffer ([[P:.+]]: f32[4,8,128]) -> f32[4,8,128] { CHECK: %[[P]] = f32[4,8,128]{2,1,0} parameter(0) CHECK: %[[S1:.+]] = ((f32[4,8,128]{2,1,0}), f32[4,8,128]{2,1,0}) all-to-all-start(%[[P]]), channel_id=1, replica_groups={}, dimensions={1} CHECK: ROOT {{.*}} = f32[4,8,128]{2,1,0} all-to-all-done(%[[S1]]) CHECK: })"; RunAndFilecheckHloRewrite(hlo, CommandBufferScheduling(device_desc()), expected, [](HloModule* module) { EXPECT_TRUE(module->has_schedule()); TF_CHECK_OK(module->schedule().Verify()); }); } TEST_F(CommandBufferSchedulingTest, DynamicSliceFusionDynamicSlicing) { if (backend().platform()->Name() == "Host") { GTEST_SKIP() << "GPU support required for this test"; } const char* hlo = R"( HloModule jit_slice, replica_count=2 add { a = s32[] parameter(0) b = s32[] parameter(1) ROOT add = add(a,b) } ENTRY main.9 { p0 = s32[2,8,32]{2,1,0} parameter(0) p1 = s32[8,32]{1,0} parameter(1) c0 = s32[] constant(0) c1 = s32[] constant(1) slice = s32[1,8,32]{2,1,0} dynamic-slice(p0, c1, c0, c0), dynamic_slice_sizes={1,8,32} input = s32[8,32]{1,0} reshape(slice) rs = s32[4,32] reduce-scatter(input), channel_id=64, replica_groups={{0,1}}, use_global_device_ids=true, dimensions={0}, to_apply=add ROOT dus = s32[8,32] dynamic-update-slice(p1, rs, c0, c0) })"; TF_ASSERT_OK_AND_ASSIGN(auto m, GetOptimizedModule(hlo)); HloModuleConfig config(m->config()); DebugOptions options(config.debug_options()); options.set_xla_gpu_graph_min_graph_size(0); auto check = [&m, this](DebugOptions options) -> absl::Status { auto m_clone = m->Clone(); HloModuleConfig config(m_clone->config()); config.set_debug_options(options); m_clone->set_config(config); TF_ASSIGN_OR_RETURN(auto exec, CreateExecutable(std::move(m_clone), false)); auto gpu_exec = std::unique_ptr<GpuExecutable>( static_cast<GpuExecutable*>(exec.release())); TF_RET_CHECK(llvm::any_of(gpu_exec->GetThunk().thunks(), [](const std::unique_ptr<Thunk>& thunk) { return thunk->kind() == Thunk::kDynamicSlice; })); return absl::OkStatus(); }; options.clear_xla_gpu_enable_command_buffer(); options.add_xla_gpu_enable_command_buffer(DebugOptions::FUSION); options.add_xla_gpu_enable_command_buffer(DebugOptions::COLLECTIVES); TF_ASSERT_OK(check(options)); options.clear_xla_gpu_enable_command_buffer(); TF_ASSERT_OK(check(options)); options.clear_xla_gpu_enable_command_buffer(); options.add_xla_gpu_enable_command_buffer(DebugOptions::COLLECTIVES); TF_ASSERT_OK(check(options)); options.clear_xla_gpu_enable_command_buffer(); options.add_xla_gpu_enable_command_buffer(DebugOptions::FUSION); TF_ASSERT_OK(check(options)); } TEST_F(CommandBufferSchedulingTest, DynamicSliceFusionStaticSlicing) { if (backend().platform()->Name() == "Host" || backend().device_count() < 2) { GTEST_SKIP() << "Atleast two GPUs required for this test"; } const char* hlo = R"( HloModule jit_slice, replica_count=2 add { a = s32[] parameter(0) b = s32[] parameter(1) ROOT add = add(a,b) } ENTRY main.9 { p0 = s32[2,8,32]{2,1,0} parameter(0) p1 = s32[8,32]{1,0} parameter(1) c0 = s32[] constant(0) c1 = s32[] constant(1) slice = s32[1,8,32]{2,1,0} slice(p0), slice={[1:2], [0:8], [0:32]} input = s32[8,32]{1,0} reshape(slice) ROOT rs = s32[4,32] reduce-scatter(input), channel_id=64, replica_groups={{0,1}}, use_global_device_ids=true, dimensions={0}, to_apply=add })"; TF_ASSERT_OK_AND_ASSIGN(auto m, GetOptimizedModule(hlo)); HloModuleConfig config(m->config()); DebugOptions options(config.debug_options()); options.set_xla_gpu_graph_min_graph_size(0); auto get_exec = [&m, this](DebugOptions options) -> absl::StatusOr<std::unique_ptr<GpuExecutable>> { auto m_clone = m->Clone(); HloModuleConfig config(m_clone->config()); config.set_debug_options(options); m_clone->set_config(config); TF_ASSIGN_OR_RETURN(auto exec, CreateExecutable(std::move(m_clone), false)); return std::unique_ptr<GpuExecutable>( static_cast<GpuExecutable*>(exec.release())); }; { options.clear_xla_gpu_enable_command_buffer(); options.add_xla_gpu_enable_command_buffer(DebugOptions::FUSION); options.add_xla_gpu_enable_command_buffer(DebugOptions::COLLECTIVES); TF_ASSERT_OK_AND_ASSIGN(auto gpu_exec, get_exec(options)); Thunk* child = gpu_exec->GetThunk().thunks()[0].get(); ASSERT_EQ(child->kind(), Thunk::kCommandBuffer); } { options.clear_xla_gpu_enable_command_buffer(); TF_ASSERT_OK_AND_ASSIGN(auto gpu_exec, get_exec(options)); Thunk* child = gpu_exec->GetThunk().thunks()[0].get(); ASSERT_NE(child->kind(), Thunk::kCommandBuffer); } { options.clear_xla_gpu_enable_command_buffer(); options.add_xla_gpu_enable_command_buffer(DebugOptions::COLLECTIVES); TF_ASSERT_OK_AND_ASSIGN(auto gpu_exec, get_exec(options)); Thunk* child = gpu_exec->GetThunk().thunks()[0].get(); ASSERT_EQ(child->kind(), Thunk::kCommandBuffer); } { options.clear_xla_gpu_enable_command_buffer(); options.add_xla_gpu_enable_command_buffer(DebugOptions::FUSION); TF_ASSERT_OK_AND_ASSIGN(auto gpu_exec, get_exec(options)); Thunk* child = gpu_exec->GetThunk().thunks()[0].get(); ASSERT_NE(child->kind(), Thunk::kCommandBuffer); } options.clear_xla_gpu_enable_command_buffer(); auto m_ref = m->Clone(); config.set_debug_options(options); m_ref->set_config(config); config.set_debug_options(GetDebugOptionsForTest()); m->set_config(config); ASSERT_TRUE(RunAndCompareTwoModulesReplicated(std::move(m_ref), std::move(m), false, true, std::nullopt)); } TEST_F(CommandBufferSchedulingTest, ReturnFalseWhenNoChange) { const char* hlo = R"( HloModule module, is_scheduled=true ENTRY main { a = s32[8,8] parameter(0) b = s32[8,8] parameter(1) ROOT call = s32[8,8] custom-call(a,b), custom_call_target="__cublas$gemm" } )"; HloModuleConfig config; DebugOptions options = GetDebugOptionsForTest(); options.clear_xla_gpu_enable_command_buffer(); options.add_xla_gpu_enable_command_buffer(DebugOptions::COLLECTIVES); config.set_debug_options(options); TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo, config)); RunAndFilecheckHloRewrite(hlo, CommandBufferScheduling(device_desc()), std::nullopt); } TEST_F(CommandBufferSchedulingTest, ReturnTrueWhenOnlyParamMoved) { const char* hlo = R"( HloModule module, is_scheduled=true ENTRY main { a = s32[8,8] parameter(0) b = s32[8,8] parameter(1) call = s32[8,8] custom-call(a,b), custom_call_target="__cublas$gemm" c = s32[8,8] parameter(2) ROOT call2 = s32[8,8] custom-call(call, c), custom_call_target="__cublas$gemm" } )"; HloModuleConfig config; DebugOptions options = GetDebugOptionsForTest(); options.clear_xla_gpu_enable_command_buffer(); options.add_xla_gpu_enable_command_buffer(DebugOptions::COLLECTIVES); config.set_debug_options(options); TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(hlo, config)); RunAndFilecheckHloRewrite(hlo, CommandBufferScheduling(device_desc()), R"( )"); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/command_buffer_scheduling.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/command_buffer_scheduling_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
c5c247b6-696e-48cf-bbe8-95daf7a893d3
cpp
tensorflow/tensorflow
gemm_rewriter
third_party/xla/xla/service/gpu/transforms/gemm_rewriter.cc
third_party/xla/xla/service/gpu/transforms/gemm_rewriter_test.cc
#include "xla/service/gpu/transforms/gemm_rewriter.h" #include <algorithm> #include <array> #include <cmath> #include <cstddef> #include <cstdint> #include <initializer_list> #include <limits> #include <memory> #include <numeric> #include <optional> #include <tuple> #include <utility> #include <variant> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/hlo/evaluator/hlo_evaluator.h" #include "xla/hlo/ir/dfs_hlo_visitor_with_default.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/layout.h" #include "xla/layout_util.h" #include "xla/literal.h" #include "xla/literal_util.h" #include "xla/permutation_util.h" #include "xla/primitive_util.h" #include "xla/service/algorithm_util.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/cublas_cudnn.h" #include "xla/service/gpu/ir_emission_utils.h" #include "xla/service/gpu/matmul_utils.h" #include "xla/service/hlo_creation_utils.h" #include "xla/service/pattern_matcher.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/status_macros.h" #include "xla/stream_executor/blas.h" #include "xla/stream_executor/device_description.h" #include "xla/stream_executor/gpu/gpu_blas_lt.h" #include "xla/stream_executor/semantic_version.h" #include "xla/tsl/protobuf/dnn.pb.h" #include "xla/types.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/ml_dtypes.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { namespace m = match; absl::Status SetName(HloModule *module, HloInstruction *gemm) { if (IsCublasLtMatmul(*gemm)) { module->SetAndUniquifyInstrName(gemm, "cublas-lt-matmul"); return absl::OkStatus(); } TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config, gemm->backend_config<GpuBackendConfig>()); const GemmBackendConfig &config = gpu_config.gemm_backend_config(); const DotDimensionNumbers &dot_dims = config.dot_dimension_numbers(); bool is_batch_dot = !dot_dims.lhs_batch_dimensions().empty() || !dot_dims.rhs_batch_dimensions().empty(); module->SetAndUniquifyInstrName( gemm, is_batch_dot ? "cublas-batch-gemm" : "cublas-gemm"); return absl::OkStatus(); } bool SupportsEpilogueFusion(PrimitiveType type) { switch (type) { case F8E4M3FN: case F8E5M2: case F16: case BF16: case F32: case F64: return true; default: return false; } } bool IsF8Type(const HloInstruction *instr) { return primitive_util::IsF8Type(instr->shape().element_type()); } Shape PadShapeToMultipleOf16(const Shape old_shape, const absl::Span<const int64_t> batch_dims) { Shape padded_shape = old_shape; for (int i = 0; i < old_shape.rank(); ++i) { if (!absl::c_linear_search(batch_dims, i)) { int64_t padded_dimension = RoundUpTo<int64_t>(old_shape.dimensions(i), 16); padded_shape.set_dimensions(i, padded_dimension); } } return padded_shape; } HloInstruction *PadOperandToTargetShape(const Shape &target, HloInstruction *x) { if (ShapeUtil::Equal(target, x->shape()) || !ShapeUtil::SameElementType(x->shape(), target)) { return x; } PaddingConfig padding_config; for (int i = 0; i < x->shape().rank(); ++i) { auto dimension = padding_config.add_dimensions(); dimension->set_edge_padding_low(0); dimension->set_edge_padding_high(target.dimensions(i) - x->shape().dimensions(i)); dimension->set_interior_padding(0); } HloInstruction *zero = x->AddInstruction(HloInstruction::CreateConstant( LiteralUtil::Zero(x->shape().element_type()))); return x->AddInstruction( HloInstruction::CreatePad(target, x, zero, padding_config)); } HloInstruction *PadOperandToMultipleOf16(absl::Span<const int64_t> batch_dims, HloInstruction *x) { Shape padded_shape = PadShapeToMultipleOf16(x->shape(), batch_dims); return PadOperandToTargetShape(padded_shape, x); } absl::StatusOr<HloInstruction *> InvertAndConvertScalar(HloInstruction *scalar, bool invert) { DCHECK(ShapeUtil::IsScalar(scalar->shape())); if (invert) { Literal one_literal = LiteralUtil::One(scalar->shape().element_type()); HloInstruction *one = scalar->parent()->AddInstruction( HloInstruction::CreateConstant(one_literal.Clone())); TF_ASSIGN_OR_RETURN(scalar, MakeBinaryHlo(HloOpcode::kDivide, one, scalar, &scalar->metadata())); } if (scalar->shape().element_type() != F32) { scalar = MakeConvertToHlo(scalar, F32, &scalar->metadata()); } return scalar; } using InstrPath = std::vector<std::pair<HloInstruction *, int>>; std::optional<InstrPath> FindF8SubgraphRecursive( HloInstruction *instr, absl::flat_hash_set<int> &visited_instrs) { if (!visited_instrs.emplace(instr->unique_id()).second) { return std::nullopt; } if (IsF8Type(instr)) { return InstrPath{{instr, -1}}; } if (instr->operand_count() == 1 || instr->opcode() == HloOpcode::kDivide || instr->opcode() == HloOpcode::kDynamicSlice || instr->opcode() == HloOpcode::kPad) { std::optional<InstrPath> subgraph = FindF8SubgraphRecursive(instr->mutable_operand(0), visited_instrs); if (subgraph) { subgraph->emplace_back(std::make_pair(instr, 0)); } return subgraph; } else if (instr->opcode() == HloOpcode::kMultiply || instr->opcode() == HloOpcode::kSelect) { for (int k = 0; k < 2; ++k) { int operand_idx = k + (instr->opcode() == HloOpcode::kSelect); std::optional<InstrPath> subgraph = FindF8SubgraphRecursive( instr->mutable_operand(operand_idx), visited_instrs); if (subgraph) { subgraph->emplace_back(std::make_pair(instr, operand_idx)); return subgraph; } } } return std::nullopt; } struct MatchedFp8Param { HloInstruction *fp8_input = nullptr; HloInstruction *scale = nullptr; bool mult_scale = false; InstrPath commutative_ops; }; std::optional<MatchedFp8Param> MatchFp8Param(HloInstruction *instr) { absl::flat_hash_set<int> visited_instrs; std::optional<InstrPath> maybe_subgraph = FindF8SubgraphRecursive(instr, visited_instrs); if (!maybe_subgraph) { return std::nullopt; } InstrPath &subgraph = maybe_subgraph.value(); MatchedFp8Param param; if (subgraph.size() == 1) { CHECK(IsF8Type(subgraph[0].first)); param.fp8_input = subgraph[0].first; return param; } int num_dequant_ops; if (subgraph.size() > 2 && Match(subgraph[2].first, m::MultiplyAnyOrder(m::Convert(m::Op(&param.fp8_input)), m::Broadcast(m::Op(&param.scale))))) { param.mult_scale = true; num_dequant_ops = 2; } else if (subgraph.size() > 2 && Match(subgraph[2].first, m::Divide(m::Convert(m::Op(&param.fp8_input)), m::Broadcast(m::Op(&param.scale))))) { param.mult_scale = false; num_dequant_ops = 2; } else if (subgraph.size() > 1 && Match(subgraph[1].first, m::Convert(m::Op(&param.fp8_input)))) { param.scale = nullptr; num_dequant_ops = 1; } else { VLOG(1) << "Possible intended FP8 GEMM operating on " << instr->ToShortString() << " not rewritten into FP8 Custom Call."; return std::nullopt; } auto preserves_element_type = [](const HloInstruction *instr) -> bool { return ShapeUtil::SameElementType(instr->shape(), instr->operand(0)->shape()); }; auto use_spmd_partitioning = [](const HloInstruction *instr) -> bool { return instr->GetModule()->config().use_spmd_partitioning(); }; int start = 1 + num_dequant_ops; for (int i = start; i < subgraph.size(); ++i) { if (!Match( subgraph[i].first, m::AnyOf<HloInstruction>( m::Bitcast().WithPredicate(preserves_element_type), m::Broadcast(), m::Copy(), m::DynamicSlice(), m::Pad(), m::Reshape(), m::Select(), m::Slice(), m::Transpose(), m::AllGather().WithPredicate(use_spmd_partitioning), m::AllToAll().WithPredicate(use_spmd_partitioning), m::CollectivePermute().WithPredicate(use_spmd_partitioning)))) { VLOG(1) << "Possible intended FP8 GEMM operating on " << instr->ToShortString() << " not rewritten into FP8 Custom Call."; return std::nullopt; } if (Match(subgraph[i].first, m::Select()) && !Match(subgraph[i].first->operand(subgraph[i].second == 2 ? 1 : 2), m::Broadcast(m::ConstantScalar(0)))) { VLOG(1) << "Possible intended FP8 GEMM operating on " << instr->ToShortString() << " not rewritten into FP8 Custom Call. Select requires a zero " "operand to be exchanged with dequantization."; return std::nullopt; } } param.commutative_ops = {subgraph.begin() + start, subgraph.end()}; return param; } HloInstruction *TransposeMatrix(HloInstruction *instr, int64_t contracting_dim, absl::Span<const int64_t> batch_dims) { auto input_shape = instr->shape(); std::vector<int64_t> permutation(input_shape.dimensions_size(), -1); for (int64_t batch_dim : batch_dims) { permutation[batch_dim] = batch_dim; } int non_contracting_dim; for (int i = 0; i < input_shape.dimensions_size(); ++i) { if (permutation[i] == -1 && contracting_dim != i) { non_contracting_dim = i; } } if (Layout::Equal()(input_shape.layout(), LayoutUtil::GetDefaultLayoutForShape(input_shape))) { permutation[non_contracting_dim] = contracting_dim; permutation[contracting_dim] = non_contracting_dim; Shape new_shape = ShapeUtil::PermuteDimensions(permutation, input_shape); *new_shape.mutable_layout() = input_shape.layout(); return instr->AddInstruction( HloInstruction::CreateTranspose(new_shape, instr, permutation)); } Shape normalized_input_shape = ShapeUtil::MakeShapeWithDescendingLayoutAndSamePhysicalLayout( input_shape); auto a0 = MakeBitcastHlo(instr, normalized_input_shape); std::vector<int64_t> layout_permuation( input_shape.layout().minor_to_major().begin(), input_shape.layout().minor_to_major().end()); absl::c_reverse(layout_permuation); auto inv_perm = InversePermutation(layout_permuation); int new_contracting_dim = inv_perm[contracting_dim]; int new_non_contracting_dim = inv_perm[non_contracting_dim]; absl::c_iota(permutation, 0); std::swap(permutation[new_contracting_dim], permutation[new_non_contracting_dim]); Shape transpose_shape = ShapeUtil::PermuteDimensions(permutation, a0->shape()); *transpose_shape.mutable_layout() = a0->shape().layout(); HloInstruction *normalized_transpose = instr->AddInstruction( HloInstruction::CreateTranspose(transpose_shape, a0, permutation)); Shape final_shape = ShapeUtil::PermuteDimensions(inv_perm, transpose_shape); *final_shape.mutable_layout() = input_shape.layout(); return MakeBitcastHlo(normalized_transpose, final_shape); } HloInstruction *MaybeConstantFoldBias(HloInstruction *bias) { constexpr int kMaxMaterializeBiasBytes = 8 * 1024 * 1024; auto is_nonscalar = [](const HloInstruction *instr) { return !ShapeUtil::IsEffectiveScalar(instr->shape()); }; auto broadcast_of_nonscalar = m::Broadcast(m::Constant().WithPredicate(is_nonscalar)); if (ShapeUtil::ByteSizeOf(bias->shape()) <= kMaxMaterializeBiasBytes && (Match(bias, broadcast_of_nonscalar) || Match(bias, m::Reshape(broadcast_of_nonscalar)) || Match(bias, m::Transpose(broadcast_of_nonscalar)) || Match(bias, m::Bitcast(broadcast_of_nonscalar)))) { HloEvaluator evaluator(0); Literal result; if (evaluator.TryEvaluate( bias, &result, true)) { return bias->parent()->AddInstruction( HloInstruction::CreateConstant(std::move(result))); } } return bias; } auto Gemm(HloInstruction **instr) { return m::CustomCall(instr, {kGemmCallTarget}); } auto CublasLtMatmul(HloInstruction **instr) { return m::CustomCall(instr, {kCublasLtMatmulCallTarget}); } auto CublasLtMatmulF8(HloInstruction **instr) { return m::CustomCall(instr, {kCublasLtMatmulF8CallTarget}); } auto CublasLtMatmulMaybeF8(HloInstruction **instr) { return m::CustomCall( instr, {kCublasLtMatmulCallTarget, kCublasLtMatmulF8CallTarget}); } auto GemmOrCublasLtMatmul(HloInstruction **instr) { return m::CustomCall(instr, {kGemmCallTarget, kCublasLtMatmulCallTarget}); } auto GemmOrCublasLtMatmulMaybeF8(HloInstruction **instr) { return m::CustomCall(instr, {kGemmCallTarget, kCublasLtMatmulCallTarget, kCublasLtMatmulF8CallTarget}); } auto BcastConstScalar(HloInstruction **instr, double value) { return m::Broadcast(instr, m::ConstantScalar(value)); } auto BcastConstScalar(double value) { return BcastConstScalar(nullptr, value); } auto BcastConstScalarNear(double value) { return m::Broadcast(m::ConstantScalar().WithPredicate( [expected = value](const HloInstruction *instr) { std::optional<double> actual = xla::Cast<const HloConstantInstruction>(instr) ->literal() .GetAsDouble({}); if (!actual.has_value()) return false; double epsilon; switch (instr->shape().element_type()) { case F16: epsilon = 128 * std::numeric_limits<Eigen::half>::epsilon(); break; case BF16: epsilon = 128 * std::numeric_limits<bfloat16>::epsilon(); break; case F32: epsilon = 128 * std::numeric_limits<float>::epsilon(); break; case F64: epsilon = 128 * std::numeric_limits<double>::epsilon(); break; default: return false; } return abs(*actual - expected) < (abs(*actual + expected) * epsilon); })); } template <typename Pattern> auto OptionalSlice(HloInstruction **optional_slice, Pattern pattern) { return m::AnyOf<HloInstruction>(m::Slice(optional_slice, pattern), std::move(pattern)); } template <typename Pattern> auto OptionalConvert(HloInstruction **optional_convert, Pattern pattern) { return m::AnyOf<HloInstruction>(m::Convert(optional_convert, pattern), std::move(pattern)); } template <typename Pattern> auto OptionalBitcast(HloInstruction **optional_bitcast, Pattern pattern) { return m::AnyOf<HloInstruction>(m::Bitcast(optional_bitcast, pattern), std::move(pattern)); } class GemmRewriterVisitor : public DfsHloRewriteVisitor { public: explicit GemmRewriterVisitor(const se::GpuComputeCapability &gpu_version, se::SemanticVersion toolkit_version, const GemmRewriterOptions options) : gpu_version_(gpu_version), toolkit_version_(toolkit_version), options_(options) {} absl::Status HandleDot(HloInstruction *instr) override { if (!IsMatrixMultiplication(*instr) && !IsMatrixVectorMultiplication(*instr)) { return absl::OkStatus(); } if (Cast<HloDotInstruction>(instr)->sparse_operands()) { return absl::OkStatus(); } int64_t gemm_rewrite_size_threshold = instr->GetModule() ->config() .debug_options() .xla_gpu_gemm_rewrite_size_threshold(); TF_ASSIGN_OR_RETURN(bool is_matmul_tiny, IsMatrixMultiplicationTooSmallForRewriting( *instr, gemm_rewrite_size_threshold)); if (is_matmul_tiny && IsDotSupportedByClassicalEmitters(*instr)) { return absl::OkStatus(); } CHECK(!instr->IsRank2Transpose()); if (instr->operand(0)->IsRank2Transpose() || instr->operand(1)->IsRank2Transpose()) { return absl::OkStatus(); } TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_backend_config, instr->backend_config<GpuBackendConfig>()); GemmBackendConfig &gemm_backend_config = *gpu_backend_config.mutable_gemm_backend_config(); gemm_backend_config.set_alpha_real(1.0); gemm_backend_config.set_alpha_imag(0.0); gemm_backend_config.set_beta(0.0); *gemm_backend_config.mutable_dot_dimension_numbers() = instr->dot_dimension_numbers(); *gemm_backend_config.mutable_precision_config() = instr->precision_config(); HloInstruction *lhs = instr->mutable_operand(0); HloInstruction *rhs = instr->mutable_operand(1); auto attributes = instr->frontend_attributes().map(); gemm_backend_config.set_grad_x(attributes["grad_x"] == "true"); gemm_backend_config.set_grad_y(attributes["grad_y"] == "true"); int64_t lhs_batch_dims_size = instr->dot_dimension_numbers().lhs_batch_dimensions_size(); bool is_lhs_vector = lhs->shape().dimensions_size() == lhs_batch_dims_size + 1; bool is_rhs_vector = rhs->shape().dimensions_size() == lhs_batch_dims_size + 1; int64_t lhs_stride = is_lhs_vector ? lhs->shape().dimensions(lhs_batch_dims_size) : lhs->shape().dimensions(lhs_batch_dims_size) * lhs->shape().dimensions(lhs_batch_dims_size + 1); int64_t rhs_stride = is_rhs_vector ? rhs->shape().dimensions(lhs_batch_dims_size) : rhs->shape().dimensions(lhs_batch_dims_size) * rhs->shape().dimensions(lhs_batch_dims_size + 1); gemm_backend_config.set_lhs_stride(lhs_stride); gemm_backend_config.set_rhs_stride(rhs_stride); switch (options_.dtype) { case GemmRewriterOptions::DType::kFp8Only: { TF_ASSIGN_OR_RETURN( bool supported_by_cublaslt, GemmIsSupportedByCublasLt(*instr, gemm_backend_config)); std::optional<MatchedFp8Param> a, b; if (supported_by_cublaslt && instr->opcode() == HloOpcode::kDot && (a = MatchFp8Param( const_cast<HloInstruction *>(instr->operand(0)))) && (b = MatchFp8Param( const_cast<HloInstruction *>(instr->operand(1))))) { if (IsRocm(gpu_version_) && toolkit_version_ < stream_executor::SemanticVersion{6, 2, 0} && instr->shape().element_type() != F16 && instr->shape().element_type() != F32) { TF_ASSIGN_OR_RETURN( instr, TurnF8DotWithUnsupportedOutputTypeIntoF32(instr)); } TF_ASSIGN_OR_RETURN(bool created_call, CreateF8CustomCall(instr, gpu_backend_config, a.value(), b.value())); if (created_call) { return absl::OkStatus(); } } if (IsF8Type(instr->operand(0))) { TF_ASSIGN_OR_RETURN(instr, TurnF8DotIntoF16Dot(instr)); } break; } case GemmRewriterOptions::DType::kNonFp8Only: { TF_ASSIGN_OR_RETURN( absl::string_view gemm_custom_call_target, GetNonFp8GemmCustomCallTarget(*instr, gemm_backend_config)); const Shape &output_shape = instr->shape(); HloInstruction *gemm_call = instr->AddInstruction(HloInstruction::CreateCustomCall( output_shape, {instr->mutable_operand(0), instr->mutable_operand(1)}, gemm_custom_call_target)); TF_RETURN_IF_ERROR(gemm_call->set_backend_config(gpu_backend_config)); TF_RETURN_IF_ERROR(ReplaceInstruction(instr, gemm_call)); } break; }; return absl::OkStatus(); } absl::Status HandleMultiply(HloInstruction *instr) override { HloInstruction *alpha, *existing_gemm; if (Match(instr, m::MultiplyAnyOrder( GemmOrCublasLtMatmulMaybeF8(&existing_gemm).WithOneUser(), m::Broadcast(m::ConstantScalar(&alpha)).WithOneUser()))) { TF_ASSIGN_OR_RETURN(auto gpu_config, existing_gemm->backend_config<GpuBackendConfig>()); GemmBackendConfig &config = *gpu_config.mutable_gemm_backend_config(); if (existing_gemm->shape().element_type() == S32) { return absl::OkStatus(); } if (config.beta() == 0.0 && existing_gemm->user_count() == 1) { complex128 prev_alpha = {config.alpha_real(), config.alpha_imag()}; complex128 new_alpha = *alpha->literal().GetAsComplex128({}) * prev_alpha; config.set_alpha_real(new_alpha.real()); config.set_alpha_imag(new_alpha.imag()); TF_RETURN_IF_ERROR(existing_gemm->set_backend_config(gpu_config)); return ReplaceInstruction(instr, existing_gemm); } } HloInstruction *d_scale; if (Match(instr, m::MultiplyAnyOrder( CublasLtMatmulF8(&existing_gemm).WithOneUser(), m::Broadcast(m::Op(&d_scale)).WithOneUser()))) { return F8ScaleD(instr, existing_gemm, d_scale); } HloInstruction *cdf, *slice_or_bitcast = nullptr; if (Match(instr, m::MultiplyAnyOrder( m::AnyOf<HloInstruction>( m::Slice(&slice_or_bitcast, CublasLtMatmulMaybeF8(&existing_gemm)), m::Bitcast(&slice_or_bitcast, CublasLtMatmulMaybeF8(&existing_gemm)), CublasLtMatmulMaybeF8(&existing_gemm)), m::Op(&cdf).WithOneUser())) && Match(cdf, m::MultiplyAnyOrder( BcastConstScalar(0.5), m::AddAnyOrder( BcastConstScalar(1.0), m::Tanh( m::MultiplyAnyOrder( BcastConstScalarNear(sqrt(M_2_PI)), m::AddAnyOrder( m::Op().Is(slice_or_bitcast ? slice_or_bitcast : existing_gemm), m::MultiplyAnyOrder( BcastConstScalarNear(0.044715), m::MultiplyAnyOrder( m::Op().Is(slice_or_bitcast ? slice_or_bitcast : existing_gemm), m::MultiplyAnyOrder( m::Op().Is(slice_or_bitcast ? slice_or_bitcast : existing_gemm), m::Op().Is(slice_or_bitcast ? slice_or_bitcast : existing_gemm)) .WithOneUser()) .WithOneUser()) .WithOneUser()) .WithOneUser()) .WithOneUser()) .WithOneUser())))) { return FuseGeluActivation(instr, existing_gemm, slice_or_bitcast); } return absl::OkStatus(); } absl::Status HandleDivide(HloInstruction *instr) override { HloInstruction *existing_gemm, *d_scale; if (Match(instr, m::Divide(CublasLtMatmulF8(&existing_gemm).WithOneUser(), m::Broadcast(m::Op(&d_scale)).WithOneUser()))) { return F8ScaleD(instr, existing_gemm, d_scale); } return absl::OkStatus(); } absl::Status HandleAdd(HloInstruction *instr) override { if (options_.bias_mode == GemmRewriterOptions::BiasMode::kNoBias) { return absl::OkStatus(); } HloInstruction *bias, *existing_gemm = nullptr; HloInstruction *optional_slice = nullptr; HloInstruction *optional_convert = nullptr; HloInstruction *optional_bitcast = nullptr; if (Match(instr, m::AddAnyOrder( OptionalBitcast( &optional_bitcast, OptionalSlice( &optional_slice, CublasLtMatmulMaybeF8(&existing_gemm).WithOneUser()) .WithOneUser()) .WithOneUser(), m::Broadcast(&bias, OptionalConvert(&optional_convert, m::Op()))))) { TF_ASSIGN_OR_RETURN( bool was_fused, FuseVectorBiasAdd(instr, bias, existing_gemm, optional_slice, optional_convert, optional_bitcast)); if (was_fused) { return absl::OkStatus(); } } if (Match( instr, m::AddAnyOrder( m::Bitcast(CublasLtMatmulMaybeF8(&existing_gemm).WithOneUser()) .WithOneUser(), m::Broadcast(&bias, m::Op()).WithOneUser()))) { TF_ASSIGN_OR_RETURN( HloInstruction * new_add, MakeBinaryHlo(HloOpcode::kAdd, existing_gemm, MakeBitcastHlo(bias, existing_gemm->shape()))); TF_RETURN_IF_ERROR( ReplaceInstruction(instr, MakeBitcastHlo(new_add, instr->shape()))); instr = new_add; } auto is_not_broadcast = [](const HloInstruction *instr) { return instr->opcode() != HloOpcode::kBroadcast; }; if (Match(instr, m::AddAnyOrder( m::Bitcast( GemmOrCublasLtMatmulMaybeF8(&existing_gemm).WithOneUser()) .WithOneUser(), m::Op(&bias).WithPredicate(is_not_broadcast)))) { HloInstruction *new_bitcast = MakeBitcastHlo(bias, existing_gemm->shape(), &bias->metadata()); TF_ASSIGN_OR_RETURN(HloInstruction * new_add, MakeBinaryHlo(HloOpcode::kAdd, existing_gemm, new_bitcast, &bias->metadata())); TF_RETURN_IF_ERROR( ReplaceInstruction(instr, MakeBitcastHlo(new_add, instr->shape()))); instr = new_add; } if (Match(instr, m::AddAnyOrder( m::AnyOf<HloInstruction>( GemmOrCublasLtMatmul(&existing_gemm).WithOneUser(), m::Convert( GemmOrCublasLtMatmul(&existing_gemm).WithOneUser()) .WithOneUser()), m::Op(&bias).WithPredicate(is_not_broadcast)))) { TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_backend_config, existing_gemm->backend_config<GpuBackendConfig>()); const GemmBackendConfig &gemm_backend_config = gpu_backend_config.gemm_backend_config(); TF_ASSIGN_OR_RETURN( bool types_are_supported, IsLegacyCublasMatmul(*existing_gemm) ? TypesAreSupportedByLegacyCublas(*existing_gemm, gemm_backend_config, instr) : TypesAreSupportedByCublasLt(*existing_gemm, gemm_backend_config, instr)); bool has_no_consumer = instr->shape().element_type() == existing_gemm->shape().element_type() || instr->user_count() == 0 || (instr->user_count() == 1 && instr->users()[0]->opcode() == HloOpcode::kTuple && instr->users()[0]->user_count() == 0); if (types_are_supported && has_no_consumer) { return FuseMatrixBiasAdd(instr, bias, existing_gemm); } } HloInstruction *optional_bitcast_matrix = nullptr; HloInstruction *optional_slice_matrix = nullptr; if (Match(instr, m::AddAnyOrder( OptionalBitcast( &optional_bitcast_matrix, OptionalSlice(&optional_slice_matrix, GemmOrCublasLtMatmulMaybeF8(&existing_gemm) .WithOneUser())) .WithOneUser(), m::Op(&bias).WithPredicate(is_not_broadcast)))) { if (!IsF8Type(bias)) { return FuseMatrixBiasAdd(instr, bias, existing_gemm, optional_bitcast_matrix, optional_slice_matrix); } } return absl::OkStatus(); } absl::Status HandleMaximum(HloInstruction *instr) override { HloInstruction *existing_gemm, *zeros; HloInstruction *optional_slice_or_bitcast = nullptr; if (Match(instr, m::MaximumAnyOrder( m::AnyOf<HloInstruction>( m::Slice( &optional_slice_or_bitcast, CublasLtMatmulMaybeF8(&existing_gemm).WithOneUser()), m::Bitcast( &optional_slice_or_bitcast, CublasLtMatmulMaybeF8(&existing_gemm).WithOneUser()), CublasLtMatmulMaybeF8(&existing_gemm)) .WithOneUser(), m::Broadcast(&zeros, m::ConstantScalar(0))))) { TF_RETURN_IF_ERROR(FuseReluActivation(instr, zeros, existing_gemm, optional_slice_or_bitcast)); } return absl::OkStatus(); } absl::Status HandleConvert(HloInstruction *instr) override { HloInstruction *clamp_lower, *clamp_upper, *existing_gemm, *d_scale = nullptr, *binary = nullptr; if (Match(instr, m::Convert( m::Clamp( m::Broadcast(m::ConstantScalar(&clamp_lower)), m::AnyOf<HloInstruction>( CublasLtMatmulF8(&existing_gemm), m::Divide(&binary, CublasLtMatmulF8(&existing_gemm), m::Broadcast(m::Op(&d_scale))), m::MultiplyAnyOrder(&binary, CublasLtMatmulF8(&existing_gemm), m::Broadcast(m::Op(&d_scale)))), m::Broadcast(m::ConstantScalar(&clamp_upper))) .WithOneUser()))) { return F8ConvertD( instr, existing_gemm, d_scale, clamp_lower, clamp_upper, (binary && binary->opcode() == HloOpcode::kMultiply)); } return absl::OkStatus(); } static bool IsCuda(const se::GpuComputeCapability &gpu_version) { return std::holds_alternative<se::CudaComputeCapability>(gpu_version); } static absl::StatusOr<se::CudaComputeCapability> GetCudaComputeCapability( const se::GpuComputeCapability &gpu_version) { auto *cuda_cc = std::get_if<se::CudaComputeCapability>(&gpu_version); if (cuda_cc == nullptr) { return absl::InvalidArgumentError("Compute Capability is not CUDA."); } return *cuda_cc; } static bool IsRocm(const se::GpuComputeCapability &gpu_version) { return std::holds_alternative<se::RocmComputeCapability>(gpu_version); } static absl::StatusOr<se::RocmComputeCapability> GetRocmComputeCapability( const se::GpuComputeCapability &gpu_version) { auto rocm_cc = std::get_if<se::RocmComputeCapability>(&gpu_version); if (rocm_cc == nullptr) { return absl::InvalidArgumentError("Compute Capability is not ROCm."); } return *rocm_cc; } absl::StatusOr<bool> CreateF8CustomCall(HloInstruction *instr, GpuBackendConfig &gpu_backend_config, MatchedFp8Param a, MatchedFp8Param b) { GemmBackendConfig &gemm_backend_config = *gpu_backend_config.mutable_gemm_backend_config(); if (IsCuda(gpu_version_)) { TF_ASSIGN_OR_RETURN(auto cuda_compute_capability, GetCudaComputeCapability(gpu_version_)); if (!cuda_compute_capability.IsAtLeast(8, 9)) { VLOG(1) << "FP8 Custom Calls require Ada, Hopper, or later " "architectures. Got: " << cuda_compute_capability.ToString() << " and toolkit version: " << toolkit_version_; return false; } if (toolkit_version_ < stream_executor::SemanticVersion{12, 0, 0}) { VLOG(1) << "FP8 Custom Calls require CUDA 12.0 or newer."; return false; } } if (IsRocm(gpu_version_)) { TF_ASSIGN_OR_RETURN(auto rocm_compute_capability, GetRocmComputeCapability(gpu_version_)); if (!rocm_compute_capability.has_fp8_support()) { VLOG(1) << "FP8 Custom Calls require MI300, or later architectures."; return false; } if (toolkit_version_ < stream_executor::SemanticVersion{6, 0, 0}) { VLOG(1) << "FP8 Custom Calls require ROCm 6.0 or newer."; return false; } } PrimitiveType a_type = a.fp8_input->shape().element_type(); PrimitiveType b_type = b.fp8_input->shape().element_type(); if (IsCuda(gpu_version_)) { if (a_type == F8E5M2 && b_type == F8E5M2) { VLOG(1) << "Failed to rewrite " << instr->ToShortString() << " into FP8 Custom Call. The element type of one of the operands " "must be F8E4M3FN."; return false; } if ((a_type != F8E5M2 && a_type != F8E4M3FN) || (b_type != F8E5M2 && b_type != F8E4M3FN)) { VLOG(1) << "Failed to rewrite " << instr->ToShortString() << " into FP8 Custom Call. The input types must be F8E5M2 or " "F8E4M3FN, but got " << PrimitiveType_Name(a_type) << " and " << PrimitiveType_Name(b_type); return false; } } if (IsRocm(gpu_version_)) { if (a_type == F8E5M2FNUZ && b_type == F8E5M2FNUZ) { VLOG(1) << "Failed to rewrite " << instr->ToShortString() << " into FP8 Custom Call. The element type of one of the operands " "must be F8E4M3FNUZ."; return false; } if ((a_type != F8E5M2FNUZ && a_type != F8E4M3FNUZ) || (b_type != F8E5M2FNUZ && b_type != F8E4M3FNUZ)) { VLOG(1) << "Failed to rewrite " << instr->ToShortString() << " into FP8 Custom Call. The input types must be F8E5M2FNUZ or " "F8E4M3FNUZ, but got " << PrimitiveType_Name(a_type) << " and " << PrimitiveType_Name(b_type); return false; } } absl::Span<const int64_t> a_batch_dims = gemm_backend_config.dot_dimension_numbers().lhs_batch_dimensions(); absl::Span<const int64_t> b_batch_dims = gemm_backend_config.dot_dimension_numbers().rhs_batch_dimensions(); const size_t num_batch_dims = a_batch_dims.size(); std::array<bool, 2> mult_scale{a.mult_scale, b.mult_scale}; std::array<HloInstruction *, 2> scales{a.scale, b.scale}, inv_scales, scales_f32; HloInstruction *one_constant = nullptr; auto one = [&one_constant, instr]() -> HloInstruction * { if (!one_constant) { one_constant = instr->AddInstruction( HloInstruction::CreateConstant(LiteralUtil::One(F32))); } return one_constant; }; for (int i = 0; i < scales.size(); ++i) { if (scales[i]) { if (!ShapeUtil::IsScalar(scales[i]->shape())) { VLOG(1) << "Failed to rewrite " << instr->ToShortString() << " into FP8 Custom Call. The scaling factors must be " "scalars."; return false; } if (!mult_scale[i]) { inv_scales[i] = instr->AddInstruction(HloInstruction::CreateBinary( scales[i]->shape(), HloOpcode::kDivide, one(), scales[i])); } scales_f32[i] = mult_scale[i] ? scales[i] : inv_scales[i]; if (scales_f32[i]->shape().element_type() != F32) { scales_f32[i] = instr->AddInstruction(HloInstruction::CreateConvert( ShapeUtil::MakeScalarShape(F32), scales_f32[i])); } } else { scales_f32[i] = one(); } } PrimitiveType d_type = instr->shape().element_type(); bool supported_d_type = (d_type == BF16 || d_type == F16 || d_type == F32); if (IsCuda(gpu_version_) && (d_type == F8E4M3FN || d_type == F8E5M2)) { supported_d_type = true; } if (IsRocm(gpu_version_) && toolkit_version_ >= stream_executor::SemanticVersion{6, 2, 0} && (d_type == F8E4M3FNUZ || d_type == F8E5M2FNUZ)) { supported_d_type = true; } if (!supported_d_type) { VLOG(1) << "Failed to rewrite " << instr->ToShortString() << " into FP8 Custom Call. Output element type must be " << (IsCuda(gpu_version_) ? "F8E4M3FN, F8E5M2, BF16, F16 or F32. " : toolkit_version_ >= stream_executor::SemanticVersion{6, 2, 0} ? "F8E4M3FNUZ, F8E5M2FNUZ, BF16, F16 or F32. " : "BF16, F16 or F32. ") << "Actual element type is " << PrimitiveType_Name(d_type); return false; } absl::Span<const int64_t> a_contracting_dims = gemm_backend_config.dot_dimension_numbers() .lhs_contracting_dimensions(); absl::Span<const int64_t> b_contracting_dims = gemm_backend_config.dot_dimension_numbers() .rhs_contracting_dimensions(); if (a_contracting_dims.size() != 1 || b_contracting_dims.size() != 1) { VLOG(1) << "Failed to rewrite " << instr->ToShortString() << " into FP8 Custom Call. A and B must have one contracting " "dimension."; return false; } for (const MatchedFp8Param &param : {a, b}) { const HloInstruction *input = param.commutative_ops.empty() ? param.fp8_input : param.commutative_ops.back().first; if (input->shape().rank() != num_batch_dims + 2) { VLOG(1) << "Failed to rewrite " << instr->ToShortString() << "into FP8 Custom Call. Inputs must have exactly one " "contracting and one non-contracting dimension."; return false; } } auto shift_ops = [&instr](HloInstruction *&x, InstrPath &x_ops) -> void { for (std::pair<HloInstruction *, int> op : x_ops) { std::vector<HloInstruction *> operands = {x}; if (op.first->opcode() == HloOpcode::kDynamicSlice) { for (int i = 1; i < op.first->operand_count(); ++i) { operands.emplace_back(op.first->mutable_operand(i)); } } if (op.first->opcode() == HloOpcode::kPad) { HloInstruction *convert = instr->AddInstruction(HloInstruction::CreateConvert( ShapeUtil::ChangeElementType(op.first->operand(1)->shape(), x->shape().element_type()), op.first->mutable_operand(1))); operands.emplace_back(convert); } if (op.first->opcode() == HloOpcode::kSelect) { operands.emplace(operands.begin(), op.first->mutable_operand(0)); int operand_idx = op.second == 2 ? 1 : 2; HloInstruction *convert = instr->AddInstruction(HloInstruction::CreateConvert( ShapeUtil::ChangeElementType( op.first->operand(operand_idx)->shape(), x->shape().element_type()), op.first->mutable_operand(operand_idx))); operands.emplace(operands.begin() + operand_idx, convert); } x = instr->AddInstruction(op.first->CloneWithNewOperands( ShapeUtil::MakeShapeWithDenseLayout( x->shape().element_type(), op.first->shape().dimensions(), op.first->shape().layout().minor_to_major()), operands)); } return; }; shift_ops(a.fp8_input, a.commutative_ops); shift_ops(b.fp8_input, b.commutative_ops); TF_ASSIGN_OR_RETURN(GemmConfig gemm_config, GemmConfig::For(instr, gemm_backend_config)); DotDimensionNumbers *dim_nums = gemm_backend_config.mutable_dot_dimension_numbers(); if (gemm_config.lhs_layout.order == MatrixLayout::Order::kColumnMajor) { CHECK(a_contracting_dims[0] == num_batch_dims || a_contracting_dims[0] == num_batch_dims + 1); if (a_contracting_dims[0] == num_batch_dims) { dim_nums->set_lhs_contracting_dimensions(0, num_batch_dims + 1); } else { dim_nums->set_lhs_contracting_dimensions(0, num_batch_dims); } a.fp8_input = TransposeMatrix(a.fp8_input, a_contracting_dims[0], a_batch_dims); } if (gemm_config.rhs_layout.order == MatrixLayout::Order::kRowMajor) { CHECK(b_contracting_dims[0] == num_batch_dims || b_contracting_dims[0] == num_batch_dims + 1); if (b_contracting_dims[0] == num_batch_dims) { dim_nums->set_rhs_contracting_dimensions(0, num_batch_dims + 1); } else { dim_nums->set_rhs_contracting_dimensions(0, num_batch_dims); } b.fp8_input = TransposeMatrix(b.fp8_input, b_contracting_dims[0], b_batch_dims); } a.fp8_input = PadOperandToMultipleOf16(a_batch_dims, a.fp8_input); b.fp8_input = PadOperandToMultipleOf16(b_batch_dims, b.fp8_input); std::vector<int64_t> out_batch_dims(num_batch_dims); std::iota(out_batch_dims.begin(), out_batch_dims.end(), 0); Shape new_output_shape = PadShapeToMultipleOf16(instr->shape(), out_batch_dims); std::vector<HloInstruction *> operands_list = { a.fp8_input, b.fp8_input, scales_f32[0], scales_f32[1]}; HloInstruction *new_custom_call = instr->AddInstruction(HloInstruction::CreateCustomCall( ShapeUtil::MakeShapeWithDenseLayout( instr->shape().element_type(), new_output_shape.dimensions(), instr->shape().layout().minor_to_major()), operands_list, kCublasLtMatmulF8CallTarget)); TF_RETURN_IF_ERROR(new_custom_call->set_backend_config(gpu_backend_config)); TF_RETURN_IF_ERROR(SetName(instr->GetModule(), new_custom_call)); HloInstruction *slice = nullptr; if (new_output_shape.dimensions() != instr->shape().dimensions()) { std::vector<int64_t> start_indices(instr->shape().rank(), 0); std::vector<int64_t> strides(instr->shape().rank(), 1); slice = instr->AddInstruction(HloInstruction::CreateSlice( instr->shape(), new_custom_call, start_indices, instr->shape().dimensions(), strides)); } TF_RETURN_IF_ERROR( ReplaceInstruction(instr, slice ? slice : new_custom_call)); VLOG(1) << instr->ToString() << " rewritten into FP8 Custom Call."; return true; } absl::Status F8ScaleD(HloInstruction *instr, HloInstruction *existing_gemm, HloInstruction *d_scale) { if (!ShapeUtil::IsScalar(d_scale->shape())) { return absl::OkStatus(); } if (!existing_gemm->operand(2)->IsConstant() || existing_gemm->operand(2)->literal().GetAsDouble({}) != 1.) { return absl::OkStatus(); } TF_ASSIGN_OR_RETURN(auto gpu_backend_config, existing_gemm->backend_config<GpuBackendConfig>()); const GemmBackendConfig &config = gpu_backend_config.gemm_backend_config(); if ((config.epilogue() != GemmBackendConfig::DEFAULT && config.epilogue() != GemmBackendConfig::RELU) || config.beta() != 0.) { return absl::OkStatus(); } TF_ASSIGN_OR_RETURN( d_scale, InvertAndConvertScalar(d_scale, instr->opcode() == HloOpcode::kDivide)); TF_RETURN_IF_ERROR(existing_gemm->ReplaceOperandWith(2, d_scale)); TF_RETURN_IF_ERROR(ReplaceInstruction(instr, existing_gemm)); VLOG(1) << "Scaling of FP8 GEMM fused into Custom Call."; return absl::OkStatus(); } absl::Status F8ConvertD(HloInstruction *instr, HloInstruction *existing_gemm, HloInstruction *d_scale, HloInstruction *clamp_lower, HloInstruction *clamp_upper, bool mult_scale = false) { if (instr->shape().element_type() == F8E4M3FN) { if (!clamp_lower->literal().IsAllFloat(static_cast<float>( std::numeric_limits<tsl::float8_e4m3fn>::lowest())) || !clamp_upper->literal().IsAllFloat(static_cast<float>( std::numeric_limits<tsl::float8_e4m3fn>::max()))) { return absl::OkStatus(); } } else if (instr->shape().element_type() == F8E5M2) { if (!clamp_lower->literal().IsAllFloat(static_cast<float>( std::numeric_limits<tsl::float8_e5m2>::lowest())) || !clamp_upper->literal().IsAllFloat(static_cast<float>( std::numeric_limits<tsl::float8_e5m2>::max()))) { return absl::OkStatus(); } } else { return absl::OkStatus(); } if (d_scale && !ShapeUtil::IsScalar(d_scale->shape())) { return absl::OkStatus(); } const std::vector<HloInstruction *> gemm_users = existing_gemm->users(); HloInstruction *reduce_damax = nullptr; if (gemm_users.size() == 2) { TF_ASSIGN_OR_RETURN(auto gpu_config, existing_gemm->backend_config<GpuBackendConfig>()); const GemmBackendConfig &config = gpu_config.gemm_backend_config(); for (int i = 0; i < gemm_users.size(); ++i) { HloInstruction *maybe_reduce = nullptr; if (gemm_users[i]->opcode() == HloOpcode::kAbs) { if (gemm_users[i]->users().size() != 1) continue; maybe_reduce = gemm_users[i]->users()[0]; } else { if (config.epilogue() != GemmBackendConfig::BIAS_RELU && config.epilogue() != GemmBackendConfig::RELU) continue; maybe_reduce = gemm_users[i]; } if (maybe_reduce->opcode() == HloOpcode::kReduce && maybe_reduce->operands().size() == 2 && maybe_reduce->operand(1)->opcode() == HloOpcode::kConstant && ShapeUtil::IsScalar(maybe_reduce->operand(1)->shape())) { HloInstruction *reduce = maybe_reduce; HloComputation *reduce_comp = reduce->to_apply(); HloInstruction *reduce_comp_root = reduce_comp->root_instruction(); if (reduce->operand(1)->literal().GetAsDouble({}) <= 0. && reduce_comp_root->opcode() == HloOpcode::kMaximum && reduce_comp_root->operand(0)->opcode() == HloOpcode::kParameter && reduce_comp_root->operand(1)->opcode() == HloOpcode::kParameter) { reduce_damax = reduce; } } } if (!reduce_damax) { return absl::OkStatus(); } } else if (gemm_users.size() > 2) { return absl::OkStatus(); } TF_ASSIGN_OR_RETURN(auto gpu_backend_config, existing_gemm->backend_config<GpuBackendConfig>()); const GemmBackendConfig &gemm_backend_config = gpu_backend_config.gemm_backend_config(); if (gemm_backend_config.beta() != 0.0) { if (existing_gemm->operand(2)->shape().element_type() != BF16 && existing_gemm->operand(2)->shape().element_type() != F16) { VLOG(1) << "The scaling and conversion of the result of " << existing_gemm->ToShortString() << " is not fused into the FP8 Custom Call because it " "conflicts with the existing fusion of the addition of a " "matrix bias with element type other than BF16 or F16."; return absl::OkStatus(); } else { xla::Cast<HloCustomCallInstruction>(existing_gemm) ->set_output_to_operand_aliasing({}); } } if (d_scale) { TF_ASSIGN_OR_RETURN(d_scale, InvertAndConvertScalar(d_scale, !mult_scale)); } else { d_scale = instr->AddInstruction( HloInstruction::CreateConstant(LiteralUtil::One(F32))); } existing_gemm->AppendOperand(d_scale); if (reduce_damax) { return F8AddDAmax(instr, existing_gemm, reduce_damax); } std::unique_ptr<HloInstruction> new_gemm = existing_gemm->CloneWithNewShape(instr->shape()); TF_RETURN_IF_ERROR(ReplaceWithNewInstruction(instr, std::move(new_gemm))); VLOG(1) << "Conversion" << (reduce_damax ? " and amax calculation" : "") << " fused into FP8 GEMM."; return absl::OkStatus(); } absl::Status F8AddDAmax(HloInstruction *instr, HloInstruction *existing_gemm, HloInstruction *reduce_damax) { Shape damax_shape = ShapeUtil::MakeScalarShape(F32); Shape tuple_shape = ShapeUtil::MakeTupleShape({instr->shape(), damax_shape}); HloInstruction *gemm_and_damax = instr->AddInstruction(existing_gemm->CloneWithNewShape(tuple_shape)); TF_ASSIGN_OR_RETURN(auto gpu_config, gemm_and_damax->backend_config<GpuBackendConfig>()); GemmBackendConfig &config = *gpu_config.mutable_gemm_backend_config(); config.set_damax_output(true); TF_RETURN_IF_ERROR(gemm_and_damax->set_backend_config(gpu_config)); HloInstruction *d = instr->AddInstruction(HloInstruction::CreateGetTupleElement( instr->shape(), gemm_and_damax, 0)); HloInstruction *damax = instr->AddInstruction( HloInstruction::CreateGetTupleElement(damax_shape, gemm_and_damax, 1)); HloInstruction *damax_converted = instr->AddInstruction( HloInstruction::CreateConvert(reduce_damax->shape(), damax)); TF_RETURN_IF_ERROR(ReplaceInstruction(reduce_damax, damax_converted)); TF_RETURN_IF_ERROR(ReplaceInstruction(instr, d)); return absl::OkStatus(); } absl::Status FuseMatrixBiasAdd(HloInstruction *instr, HloInstruction *bias, const HloInstruction *gemm, HloInstruction *bitcast = nullptr, HloInstruction *slice = nullptr) { TF_RET_CHECK(Shape::Equal().IgnoreElementType()(bias->shape(), bitcast ? bitcast->shape() : slice ? slice->shape() : gemm->shape())); if (gemm->shape().element_type() == S32) { return absl::OkStatus(); } if (slice) { int slice_op_dim = slice->operand(0)->shape().rank(); if (slice->slice_starts() != std::vector<int64_t>(slice_op_dim, 0) || slice->slice_strides() != std::vector<int64_t>(slice_op_dim, 1)) { return absl::OkStatus(); } } bool can_overwrite_bias = [bias]() { if (bias->user_count() > 1) { return false; } if (bias->opcode() != HloOpcode::kParameter) { return true; } if (!bias->parent()->IsEntryComputation()) { return false; } const auto &in_out_alias_config = bias->GetModule()->input_output_alias_config(); return in_out_alias_config.ParameterHasAlias(bias->parameter_number(), {}); }(); bool want_to_fuse_bias = IsCublasLtMatmulF8(*gemm) || IsCublasLtMatmul(*gemm) || can_overwrite_bias; auto gpu_config = gemm->backend_config<GpuBackendConfig>().value(); GemmBackendConfig &config = *gpu_config.mutable_gemm_backend_config(); bool supported_epilogue = ((config.epilogue() == GemmBackendConfig::DEFAULT) || (config.epilogue() == GemmBackendConfig::BIAS)); if ((config.beta() != 0) || !want_to_fuse_bias || (gemm->user_count() != 1) || !supported_epilogue) { return absl::OkStatus(); } config.set_beta(1.0); std::vector<HloInstruction *> operands(gemm->operands().begin(), gemm->operands().end()); HloInstruction *maybe_constant_folded_bias = MaybeConstantFoldBias(bias); if (bitcast) { maybe_constant_folded_bias = instr->AddInstruction(HloInstruction::CreateBitcast( slice->shape(), maybe_constant_folded_bias)); } maybe_constant_folded_bias = PadOperandToTargetShape(gemm->shape(), maybe_constant_folded_bias); operands.insert(operands.begin() + 2, maybe_constant_folded_bias); std::unique_ptr<HloInstruction> fused_op = gemm->CloneWithNewOperands(gemm->shape(), operands); fused_op->mutable_shape()->set_element_type(bias->shape().element_type()); TF_RETURN_IF_ERROR(fused_op->set_backend_config(gpu_config)); if (IsLegacyCublasMatmul(*fused_op) || can_overwrite_bias) { xla::Cast<HloCustomCallInstruction>(fused_op.get()) ->set_output_to_operand_aliasing({{{}, {2, {}}}}); } TF_RETURN_IF_ERROR(SetName(instr->GetModule(), fused_op.get())); if (slice) { fused_op = slice->CloneWithNewOperands( slice->shape(), {slice->parent()->AddInstruction(std::move(fused_op))}); } if (bitcast) { fused_op = bitcast->CloneWithNewOperands( bitcast->shape(), {bitcast->parent()->AddInstruction(std::move(fused_op))}); } return ReplaceWithNewInstruction(instr, std::move(fused_op)); } absl::StatusOr<bool> FuseVectorBiasAdd(HloInstruction *instr, HloInstruction *broadcast, HloInstruction *gemm, HloInstruction *slice = nullptr, HloInstruction *convert = nullptr, HloInstruction *bitcast = nullptr) { if (!bitcast) { TF_RET_CHECK(ShapeUtil::Compatible( broadcast->shape(), (slice ? slice->shape() : gemm->shape()))); } if (!SupportsEpilogueFusion(gemm->shape().element_type())) { return false; } HloInstruction *bias = broadcast->mutable_operand(0); TF_ASSIGN_OR_RETURN(auto gpu_config, gemm->backend_config<GpuBackendConfig>()); GemmBackendConfig &config = *gpu_config.mutable_gemm_backend_config(); const DotDimensionNumbers &dot_dims = config.dot_dimension_numbers(); size_t num_col_dims = gemm->operand(1)->shape().rank() - dot_dims.rhs_batch_dimensions_size() - dot_dims.rhs_contracting_dimensions_size(); if ((gemm->user_count() != 1) || (config.epilogue() != GemmBackendConfig::DEFAULT) || (bias->shape().rank() != num_col_dims)) { return false; } absl::Span<const int64_t> broadcast_dims = broadcast->dimensions(); for (size_t i = 0; i < num_col_dims; ++i) { int64_t dim = (bitcast ? bitcast : gemm)->shape().layout().minor_to_major(i); auto it = absl::c_find(broadcast_dims, dim); if (it == broadcast_dims.end()) { return false; } int64_t vector_dim = it - broadcast_dims.begin(); if (bias->shape().layout().minor_to_major(i) != vector_dim) { return false; } } std::vector<HloInstruction *> operands(gemm->operands().begin(), gemm->operands().end()); if (gemm->custom_call_target() == kCublasLtMatmulF8CallTarget && config.beta() != 0.0) { return true; } if (gemm->custom_call_target() == kCublasLtMatmulF8CallTarget && bias->shape().element_type() == F32) { if (convert == nullptr) { return false; } HloInstruction *bias_f16_or_bf16 = convert->mutable_operand(0); auto compatible_bias_type = [](const PrimitiveType bias_type, const PrimitiveType output_type) { if (bias_type == BF16) { return output_type == F8E4M3FN || output_type == F8E5M2 || output_type == F32 || output_type == BF16; } else if (bias_type == F16) { return output_type == F16 || output_type == F8E4M3FN || output_type == F8E5M2; } return false; }; if (compatible_bias_type(bias_f16_or_bf16->shape().element_type(), gemm->shape().element_type())) { bias = bias_f16_or_bf16; } else { VLOG(1) << "Epilogue fusion of FP32 vector bias into FP8 GEMM is " "currently not supported. See the cublasLT support matrix."; return false; } } if (gemm->custom_call_target() == kCublasLtMatmulF8CallTarget && bitcast) { bias = PadOperandToMultipleOf16( config.dot_dimension_numbers().rhs_batch_dimensions(), bias); } operands.push_back(bias); config.set_epilogue(GemmBackendConfig::BIAS); std::unique_ptr<HloInstruction> result = gemm->CloneWithNewOperands(gemm->shape(), operands); TF_RETURN_IF_ERROR(result->set_backend_config(gpu_config)); TF_RETURN_IF_ERROR(SetName(result->GetModule(), result.get())); if (slice) { result = slice->CloneWithNewOperands( slice->shape(), {slice->parent()->AddInstruction(std::move(result))}); } if (bitcast) { result = bitcast->CloneWithNewOperands( bitcast->shape(), {bitcast->parent()->AddInstruction(std::move(result))}); } TF_RETURN_IF_ERROR(ReplaceWithNewInstruction(instr, std::move(result))); return true; } absl::Status FuseReluActivation(HloInstruction *instr, HloInstruction *broadcast, HloInstruction *gemm, HloInstruction *slice_or_bitcast = nullptr) { TF_RET_CHECK(ShapeUtil::Compatible( broadcast->shape(), (slice_or_bitcast ? slice_or_bitcast->shape() : gemm->shape()))); if (!SupportsEpilogueFusion(gemm->shape().element_type())) { return absl::OkStatus(); } if (gemm->user_count() != 1) { return absl::OkStatus(); } TF_ASSIGN_OR_RETURN(auto gpu_config, gemm->backend_config<GpuBackendConfig>()); GemmBackendConfig &config = *gpu_config.mutable_gemm_backend_config(); if (config.epilogue() == GemmBackendConfig::DEFAULT) { config.set_epilogue(GemmBackendConfig::RELU); } else if (config.epilogue() == GemmBackendConfig::BIAS) { config.set_epilogue(GemmBackendConfig::BIAS_RELU); } else { return absl::OkStatus(); } std::unique_ptr<HloInstruction> result = gemm->Clone(); TF_RETURN_IF_ERROR(result->set_backend_config(gpu_config)); TF_RETURN_IF_ERROR(SetName(result->GetModule(), result.get())); if (slice_or_bitcast) { result = slice_or_bitcast->CloneWithNewOperands( slice_or_bitcast->shape(), {slice_or_bitcast->parent()->AddInstruction(std::move(result))}); } return ReplaceWithNewInstruction(instr, std::move(result)); } absl::Status FuseGeluActivation(HloInstruction *multiply, HloInstruction *gemm, HloInstruction *slice_or_bitcast = nullptr) { if (!SupportsEpilogueFusion(gemm->shape().element_type())) { return absl::OkStatus(); } if (IsCuda(gpu_version_) && toolkit_version_ < stream_executor::SemanticVersion{12, 4, 0} && IsCublasLtMatmulF8(*gemm)) { return absl::OkStatus(); } bool has_aux = gemm->user_count() > 4; TF_ASSIGN_OR_RETURN(auto gpu_config, gemm->backend_config<GpuBackendConfig>()); GemmBackendConfig &config = *gpu_config.mutable_gemm_backend_config(); if (config.epilogue() == GemmBackendConfig::DEFAULT) { config.set_epilogue(has_aux ? GemmBackendConfig::GELU_AUX : GemmBackendConfig::GELU); } else if (config.epilogue() == GemmBackendConfig::BIAS) { config.set_epilogue(has_aux ? GemmBackendConfig::BIAS_GELU_AUX : GemmBackendConfig::BIAS_GELU); } else { return absl::OkStatus(); } std::unique_ptr<HloInstruction> output = gemm->CloneWithNewShape( has_aux ? ShapeUtil::MakeTupleShape({gemm->shape(), gemm->shape()}) : gemm->shape()); TF_RETURN_IF_ERROR(output->set_backend_config(gpu_config)); TF_RETURN_IF_ERROR(SetName(multiply->GetModule(), output.get())); if (slice_or_bitcast) { output = slice_or_bitcast->CloneWithNewOperands( slice_or_bitcast->shape(), {gemm->parent()->AddInstruction(std::move(output))}); } if (has_aux) { HloInstruction *tuple_output = gemm->parent()->AddInstruction(std::move(output)); TF_RETURN_IF_ERROR(ReplaceWithNewInstruction( gemm, HloInstruction::CreateGetTupleElement(tuple_output, 1))); output = HloInstruction::CreateGetTupleElement(tuple_output, 0); } return ReplaceWithNewInstruction(multiply, std::move(output)); } private: se::GpuComputeCapability gpu_version_; stream_executor::SemanticVersion toolkit_version_; GemmRewriterOptions options_; absl::StatusOr<absl::string_view> GetNonFp8GemmCustomCallTarget( const HloInstruction &instr, const GemmBackendConfig &gemm_backend_config) const { if (!instr.GetModule() ->config() .debug_options() .xla_gpu_enable_cublaslt()) { return absl::string_view(kGemmCallTarget); } const HloInstruction *lhs = instr.operand(0); const HloInstruction *rhs = instr.operand(1); if (lhs->shape().element_type() == S8 || rhs->shape().element_type() == S8) { return absl::string_view(kGemmCallTarget); } TF_ASSIGN_OR_RETURN(bool gemm_is_supported_by_cublas_lt, GemmIsSupportedByCublasLt(instr, gemm_backend_config)); if (gemm_is_supported_by_cublas_lt) { return absl::string_view(kCublasLtMatmulCallTarget); } return absl::string_view(kGemmCallTarget); } absl::StatusOr<bool> TypesAreSupportedByLegacyCublas( const HloInstruction &instr, const GemmBackendConfig &gemm_backend_config, const HloInstruction *bias = nullptr) const { const PrimitiveType a_dtype = instr.operand(0)->shape().element_type(); const PrimitiveType b_dtype = instr.operand(1)->shape().element_type(); const PrimitiveType output_type = bias ? bias->shape().element_type() : instr.shape().element_type(); const std::array<PrimitiveType, 12> supported_type = { PrimitiveType::S8, PrimitiveType::F16, PrimitiveType::BF16, PrimitiveType::F32, PrimitiveType::S32, PrimitiveType::F64, PrimitiveType::C64, PrimitiveType::C128}; if (!absl::c_linear_search(supported_type, output_type)) return false; TF_ASSIGN_OR_RETURN(const se::blas::DataType output_dtype, se::gpu::AsBlasDataType(output_type)); TF_ASSIGN_OR_RETURN( const se::blas::ComputationType compute_type, se::gpu::GetBlasComputationType( instr.precision_config().algorithm(), a_dtype, output_type, stream_executor::blas::kDefaultComputePrecision)); se::blas::DataType scale_type = se::gpu::GetScaleType(output_dtype, compute_type); using se::blas::ComputationType; using se::blas::DataType; const std::array< std::tuple<ComputationType, DataType , PrimitiveType , PrimitiveType , DataType >, 32> supported_type_combinations = {{ {ComputationType::kF16, DataType::kHalf, PrimitiveType::F16, PrimitiveType::F16, DataType::kHalf}, {ComputationType::kI32, DataType::kInt32, PrimitiveType::S8, PrimitiveType::S8, DataType::kInt32}, {ComputationType::kF32, DataType::kFloat, PrimitiveType::BF16, PrimitiveType::BF16, DataType::kBF16}, {ComputationType::kF32, DataType::kFloat, PrimitiveType::F16, PrimitiveType::F16, DataType::kHalf}, {ComputationType::kF32, DataType::kFloat, PrimitiveType::S8, PrimitiveType::S8, DataType::kFloat}, {ComputationType::kF32, DataType::kFloat, PrimitiveType::BF16, PrimitiveType::BF16, DataType::kFloat}, {ComputationType::kF32, DataType::kFloat, PrimitiveType::F16, PrimitiveType::F16, DataType::kFloat}, {ComputationType::kF32, DataType::kFloat, PrimitiveType::F32, PrimitiveType::F32, DataType::kFloat}, {ComputationType::kF32, DataType::kComplexFloat, PrimitiveType::C64, PrimitiveType::C64, DataType::kComplexFloat}, {ComputationType::kF16AsF32, DataType::kFloat, PrimitiveType::F32, PrimitiveType::F32, DataType::kFloat}, {ComputationType::kF16AsF32, DataType::kComplexFloat, PrimitiveType::C64, PrimitiveType::C64, DataType::kComplexFloat}, {ComputationType::kBF16AsF32, DataType::kFloat, PrimitiveType::F32, PrimitiveType::F32, DataType::kFloat}, {ComputationType::kBF16AsF32, DataType::kComplexFloat, PrimitiveType::C64, PrimitiveType::C64, DataType::kComplexFloat}, {ComputationType::kTF32AsF32, DataType::kFloat, PrimitiveType::F32, PrimitiveType::F32, DataType::kFloat}, {ComputationType::kTF32AsF32, DataType::kComplexFloat, PrimitiveType::C64, PrimitiveType::C64, DataType::kComplexFloat}, {ComputationType::kF64, DataType::kDouble, PrimitiveType::F64, PrimitiveType::F64, DataType::kDouble}, {ComputationType::kF64, DataType::kComplexDouble, PrimitiveType::C128, PrimitiveType::C128, DataType::kComplexDouble}, }}; return absl::c_linear_search( supported_type_combinations, std::make_tuple(compute_type, scale_type, a_dtype, b_dtype, output_dtype)); } absl::StatusOr<bool> TypesAreSupportedByCublasLt( const HloInstruction &instr, const GemmBackendConfig &backend_config, const HloInstruction *bias = nullptr) const { const PrimitiveType a_dtype = instr.operand(0)->shape().element_type(); const PrimitiveType b_dtype = instr.operand(1)->shape().element_type(); const PrimitiveType output_type = bias ? bias->shape().element_type() : instr.shape().element_type(); const std::array<PrimitiveType, 12> supported_type = { PrimitiveType::F8E5M2FNUZ, PrimitiveType::F8E4M3FNUZ, PrimitiveType::F8E5M2, PrimitiveType::F8E4M3FN, PrimitiveType::S8, PrimitiveType::F16, PrimitiveType::BF16, PrimitiveType::F32, PrimitiveType::S32, PrimitiveType::F64, PrimitiveType::C64, PrimitiveType::C128}; if (!absl::c_linear_search(supported_type, output_type)) return false; TF_ASSIGN_OR_RETURN(const se::blas::DataType output_dtype, se::gpu::AsBlasDataType(output_type)); const int max_precision = *absl::c_max_element( backend_config.precision_config().operand_precision()); const PrecisionConfig::Algorithm algorithm = backend_config.precision_config().algorithm(); if (!algorithm_util::IsSupportedByCublasOrCublasLt(algorithm, gpu_version_)) return false; TF_ASSIGN_OR_RETURN( const se::blas::ComputationType compute_type, se::gpu::GetBlasComputationType( algorithm, a_dtype, instr.shape().element_type(), max_precision)); se::blas::DataType scale_type = se::gpu::GetScaleType(output_dtype, compute_type); using se::blas::ComputationType; using se::blas::DataType; using TypeCombinations = std::initializer_list<std::tuple< ComputationType, DataType , PrimitiveType , PrimitiveType , DataType >>; const TypeCombinations supported_cublas_type_combinations = { {ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E4M3FN, PrimitiveType::F8E4M3FN, DataType::kBF16}, {ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E4M3FN, PrimitiveType::F8E4M3FN, DataType::kF8E4M3FN}, {ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E4M3FN, PrimitiveType::F8E4M3FN, DataType::kHalf}, {ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E4M3FN, PrimitiveType::F8E4M3FN, DataType::kFloat}, {ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E4M3FN, PrimitiveType::F8E5M2, DataType::kBF16}, {ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E4M3FN, PrimitiveType::F8E5M2, DataType::kF8E4M3FN}, {ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E4M3FN, PrimitiveType::F8E5M2, DataType::kF8E5M2}, {ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E4M3FN, PrimitiveType::F8E5M2, DataType::kHalf}, {ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E4M3FN, PrimitiveType::F8E5M2, DataType::kFloat}, {ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E5M2, PrimitiveType::F8E4M3FN, DataType::kBF16}, {ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E5M2, PrimitiveType::F8E4M3FN, DataType::kF8E4M3FN}, {ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E5M2, PrimitiveType::F8E4M3FN, DataType::kF8E5M2}, {ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E5M2, PrimitiveType::F8E4M3FN, DataType::kHalf}, {ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E5M2, PrimitiveType::F8E4M3FN, DataType::kFloat}, {ComputationType::kF32, DataType::kComplexFloat, PrimitiveType::C64, PrimitiveType::C64, DataType::kComplexFloat}, {ComputationType::kF16AsF32, DataType::kFloat, PrimitiveType::F32, PrimitiveType::F32, DataType::kFloat}, {ComputationType::kF16AsF32, DataType::kComplexFloat, PrimitiveType::C64, PrimitiveType::C64, DataType::kComplexFloat}, {ComputationType::kBF16AsF32, DataType::kFloat, PrimitiveType::F32, PrimitiveType::F32, DataType::kFloat}, {ComputationType::kBF16AsF32, DataType::kComplexFloat, PrimitiveType::C64, PrimitiveType::C64, DataType::kComplexFloat}, {ComputationType::kTF32AsF32, DataType::kFloat, PrimitiveType::F32, PrimitiveType::F32, DataType::kFloat}, {ComputationType::kTF32AsF32, DataType::kComplexFloat, PrimitiveType::C64, PrimitiveType::C64, DataType::kComplexFloat}, {ComputationType::kF64, DataType::kDouble, PrimitiveType::F64, PrimitiveType::F64, DataType::kDouble}, {ComputationType::kF64, DataType::kComplexDouble, PrimitiveType::C128, PrimitiveType::C128, DataType::kComplexDouble}, }; if (IsCuda(gpu_version_) && absl::c_linear_search(supported_cublas_type_combinations, std::tuple{compute_type, scale_type, a_dtype, b_dtype, output_dtype})) { return true; } const TypeCombinations supported_hipblas_type_combinations = { {ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E4M3FNUZ, PrimitiveType::F8E4M3FNUZ, DataType::kBF16}, {ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E4M3FNUZ, PrimitiveType::F8E4M3FNUZ, DataType::kF8E4M3FNUZ}, {ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E4M3FNUZ, PrimitiveType::F8E4M3FNUZ, DataType::kHalf}, {ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E4M3FNUZ, PrimitiveType::F8E4M3FNUZ, DataType::kFloat}, {ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E4M3FNUZ, PrimitiveType::F8E5M2FNUZ, DataType::kBF16}, {ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E4M3FNUZ, PrimitiveType::F8E5M2FNUZ, DataType::kF8E4M3FNUZ}, {ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E4M3FNUZ, PrimitiveType::F8E5M2FNUZ, DataType::kF8E5M2FNUZ}, {ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E4M3FNUZ, PrimitiveType::F8E5M2FNUZ, DataType::kHalf}, {ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E4M3FNUZ, PrimitiveType::F8E5M2FNUZ, DataType::kFloat}, {ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E5M2FNUZ, PrimitiveType::F8E4M3FNUZ, DataType::kBF16}, {ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E5M2FNUZ, PrimitiveType::F8E4M3FNUZ, DataType::kF8E4M3FNUZ}, {ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E5M2FNUZ, PrimitiveType::F8E4M3FNUZ, DataType::kF8E5M2FNUZ}, {ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E5M2FNUZ, PrimitiveType::F8E4M3FNUZ, DataType::kHalf}, {ComputationType::kF32, DataType::kFloat, PrimitiveType::F8E5M2FNUZ, PrimitiveType::F8E4M3FNUZ, DataType::kFloat}, }; if (IsRocm(gpu_version_) && absl::c_linear_search(supported_hipblas_type_combinations, std::tuple{compute_type, scale_type, a_dtype, b_dtype, output_dtype})) { return true; } const TypeCombinations supported_type_combinations = { {ComputationType::kF16, DataType::kHalf, PrimitiveType::F16, PrimitiveType::F16, DataType::kHalf}, {ComputationType::kI32, DataType::kInt32, PrimitiveType::S8, PrimitiveType::S8, DataType::kInt32}, {ComputationType::kI32, DataType::kFloat, PrimitiveType::S8, PrimitiveType::S8, DataType::kInt8}, {ComputationType::kF32, DataType::kFloat, PrimitiveType::BF16, PrimitiveType::BF16, DataType::kBF16}, {ComputationType::kF32, DataType::kFloat, PrimitiveType::F16, PrimitiveType::F16, DataType::kHalf}, {ComputationType::kF32, DataType::kFloat, PrimitiveType::S8, PrimitiveType::S8, DataType::kFloat}, {ComputationType::kF32, DataType::kFloat, PrimitiveType::BF16, PrimitiveType::BF16, DataType::kFloat}, {ComputationType::kF32, DataType::kFloat, PrimitiveType::F16, PrimitiveType::F16, DataType::kFloat}, {ComputationType::kF32, DataType::kFloat, PrimitiveType::F32, PrimitiveType::F32, DataType::kFloat}, }; return absl::c_linear_search( supported_type_combinations, std::make_tuple(compute_type, scale_type, a_dtype, b_dtype, output_dtype)); } absl::StatusOr<bool> GemmIsSupportedByCublasLt( const HloInstruction &instr, const GemmBackendConfig &gemm_backend_config) const { const HloInstruction *lhs = instr.operand(0); const Shape &output_shape = instr.shape(); TF_ASSIGN_OR_RETURN( bool types_are_supported_by_cublas_lt, TypesAreSupportedByCublasLt(instr, gemm_backend_config)); if (!types_are_supported_by_cublas_lt) { return false; } constexpr int64_t kMaxBatchCount = 65535; const auto &batch_dimensions = gemm_backend_config.dot_dimension_numbers().lhs_batch_dimensions(); int batch_count = (batch_dimensions.empty() ? 0 : 1); for (auto batch_dimension : batch_dimensions) { batch_count *= lhs->shape().dimensions(batch_dimension); } if (batch_count > kMaxBatchCount) { return false; } if (auto isrocm = std::get_if<se::RocmComputeCapability>(&gpu_version_); isrocm) { if (!isrocm->has_hipblaslt()) { return false; } } constexpr int kMaxDimensionSize{4194240}; if (output_shape.element_type() != C64) { return true; } if (std::holds_alternative<se::CudaComputeCapability>(gpu_version_)) { if (std::get<se::CudaComputeCapability>(gpu_version_).IsAtLeastAmpere()) { return true; } } TF_ASSIGN_OR_RETURN(GemmConfig gemm_config, GemmConfig::For(&instr, gemm_backend_config)); return gemm_config.rhs_layout.num_cols <= kMaxDimensionSize; } absl::StatusOr<HloInstruction *> TurnF8DotWithUnsupportedOutputTypeIntoF32( HloInstruction *instr) { Shape output_f32_shape = instr->shape(); output_f32_shape.set_element_type(F32); HloInstruction *f32_dot = instr->AddInstruction(instr->CloneWithNewShape(output_f32_shape)); HloInstruction *convert = instr->AddInstruction( HloInstruction::CreateConvert(instr->shape(), f32_dot)); TF_RETURN_IF_ERROR(ReplaceInstruction(instr, convert)); return f32_dot; } absl::StatusOr<HloInstruction *> TurnF8DotIntoF16Dot(HloInstruction *instr) { DCHECK(IsF8Type(instr->operand(0))); DCHECK(IsF8Type(instr->operand(1))); PrimitiveType conv_type = instr->shape().element_type() == BF16 ? BF16 : F16; for (int i = 0; i < 2; ++i) { Shape operand_f16_shape = instr->operand(i)->shape(); operand_f16_shape.set_element_type(conv_type); HloInstruction *convert = instr->AddInstruction(HloInstruction::CreateConvert( operand_f16_shape, instr->mutable_operand(i))); TF_RETURN_IF_ERROR(instr->ReplaceOperandWith(i, convert)); } if (IsF8Type(instr)) { Shape output_f16_shape = instr->shape(); output_f16_shape.set_element_type(F16); HloInstruction *f16_dot = instr->AddInstruction(instr->CloneWithNewShape(output_f16_shape)); HloInstruction *convert_to_f8 = instr->AddInstruction( HloInstruction::CreateConvert(instr->shape(), f16_dot)); TF_RETURN_IF_ERROR(ReplaceInstruction(instr, convert_to_f8)); return f16_dot; } else { return instr; } } }; class GemmWorkspaceRewriteVisitor : public DfsHloRewriteVisitor { public: explicit GemmWorkspaceRewriteVisitor( const se::GpuComputeCapability &gpu_version) : gpu_version_(gpu_version) {} absl::Status HandleCustomCall(HloInstruction *instr) override { bool has_aux_output = false; if (instr->custom_call_target() == kCublasLtMatmulCallTarget || instr->custom_call_target() == kCublasLtMatmulF8CallTarget) { TF_ASSIGN_OR_RETURN(const auto gpu_config, instr->backend_config<xla::gpu::GpuBackendConfig>()); const xla::gpu::GemmBackendConfig &config = gpu_config.gemm_backend_config(); xla::gpu::GemmBackendConfig_Epilogue epilogue = config.epilogue(); TF_ASSIGN_OR_RETURN( has_aux_output, xla::gpu::gpublas_lt::EpilogueHasAuxiliaryOutput(epilogue)); if (!((instr->shape().IsTuple() && instr->shape().tuple_shapes_size() == has_aux_output + config.damax_output() + 1) || instr->shape().IsArray())) { return absl::OkStatus(); } } else if (instr->custom_call_target() != kGemmCallTarget || !instr->shape().IsArray()) { return absl::OkStatus(); } auto *cuda_cc = std::get_if<se::CudaComputeCapability>(&gpu_version_); int64_t workspace = cuda_cc == nullptr ? GemmConfig::kDefaultWorkspace : cuda_cc->IsAtLeastHopper() ? GemmConfig::kHopperWorkspace : GemmConfig::kDefaultWorkspace; if (instr->custom_call_target() == kGemmCallTarget) { int64_t operands_byte_size = 0; for (auto &operand : instr->operands()) { operands_byte_size += ShapeUtil::ByteSizeOf(operand->shape()); } workspace = std::min(workspace, operands_byte_size); } std::vector<Shape> output_shapes = instr->shape().IsArray() ? std::vector<Shape>{instr->shape()} : instr->shape().tuple_shapes(); output_shapes.emplace_back(ShapeUtil::MakeShape(S8, {workspace})); Shape output_shape = ShapeUtil::MakeTupleShape(output_shapes); HloInstruction *new_call = instr->AddInstruction( instr->CloneWithNewOperands(output_shape, instr->operands())); auto *custom_call = xla::Cast<HloCustomCallInstruction>(new_call); if (!custom_call->output_to_operand_aliasing().empty()) { custom_call->set_output_to_operand_aliasing({{{0}, {2, {}}}}); } if (instr->shape().IsTuple()) { for (auto user : instr->users()) { auto user_get_tuple = dynamic_cast<HloGetTupleElementInstruction *>(user); TF_RET_CHECK(user_get_tuple); HloInstruction *get_output = instr->AddInstruction(HloInstruction::CreateGetTupleElement( new_call, user_get_tuple->tuple_index())); TF_RETURN_IF_ERROR(ReplaceInstruction(user_get_tuple, get_output)); } return absl::OkStatus(); } else { HloInstruction *get_output = instr->AddInstruction( HloInstruction::CreateGetTupleElement(new_call, 0)); return ReplaceInstruction(instr, get_output); } } private: se::GpuComputeCapability gpu_version_; }; absl::StatusOr<bool> RunOnComputation(HloComputation *computation, se::GpuComputeCapability gpu_version, se::SemanticVersion toolkit_version, GemmRewriterOptions options) { GemmRewriterVisitor visitor(gpu_version, toolkit_version, options); TF_RETURN_IF_ERROR(computation->Accept(&visitor)); GemmWorkspaceRewriteVisitor workspace_visitor(gpu_version); TF_RETURN_IF_ERROR(computation->Accept(&workspace_visitor)); return visitor.changed(); } } GemmRewriter::GemmRewriter(se::GpuComputeCapability gpu_version, se::SemanticVersion toolkit_version, GemmRewriterOptions options) : gpu_version_(gpu_version), toolkit_version_(toolkit_version), options_(options) {} absl::StatusOr<bool> GemmRewriter::Run( HloModule *module, const absl::flat_hash_set<absl::string_view> &execution_threads) { bool changed = false; for (HloComputation *computation : module->MakeNonfusionComputations(execution_threads)) { TF_ASSIGN_OR_RETURN(bool result, RunOnComputation(computation, gpu_version_, toolkit_version_, options_)); changed |= result; } return changed; } } }
#include "xla/service/gpu/transforms/gemm_rewriter.h" #include <array> #include <cstdint> #include <functional> #include <memory> #include <optional> #include <string> #include <tuple> #include <utility> #include <variant> #include <vector> #include "absl/container/flat_hash_map.h" #include "absl/status/statusor.h" #include "absl/strings/str_replace.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/error_spec.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/pass/hlo_pass_interface.h" #include "xla/service/buffer_assignment.h" #include "xla/service/executable.h" #include "xla/service/gpu/gpu_executable.h" #include "xla/service/gpu/tests/gpu_codegen_test.h" #include "xla/service/hlo_module_config.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/stream_executor/device_description.h" #include "xla/stream_executor/device_memory_allocator.h" #include "xla/stream_executor/semantic_version.h" #include "xla/stream_executor/stream_executor_memory_allocator.h" #include "xla/test.h" #include "xla/tests/filecheck.h" #include "xla/tests/verified_hlo_module.h" #include "xla/tsl/lib/core/status_test_util.h" #include "xla/xla.pb.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { namespace m = ::xla::match; class GemmRewriteTest : public GpuCodegenTest { const auto& device_desc() const { return backend().default_stream_executor()->GetDeviceDescription(); } protected: const se::GpuComputeCapability& Capability() const { return device_desc().gpu_compute_capability(); } stream_executor::SemanticVersion GetToolkitVersion() const { return backend() .default_stream_executor() ->GetDeviceDescription() .runtime_version(); } bool IsCuda() const { return std::holds_alternative<se::CudaComputeCapability>(Capability()); } bool IsRocm() const { return std::holds_alternative<se::RocmComputeCapability>(Capability()); } se::GpuComputeCapability CudaHopperOrRocmMI300() { if (IsCuda()) { return se::CudaComputeCapability::Hopper(); } else { return se::RocmComputeCapability{"gfx942"}; } } DebugOptions GetDebugOptionsForTest() override { DebugOptions debug_options = GpuCodegenTest::GetDebugOptionsForTest(); debug_options.set_xla_gpu_enable_triton_gemm(false); debug_options.set_xla_gpu_gemm_rewrite_size_threshold(0); return debug_options; } bool SkipGpuBlasLtTest() { return !IsCuda() && !std::get<se::RocmComputeCapability>(Capability()).has_hipblaslt() && GetDebugOptionsForTest().xla_gpu_enable_cublaslt(); } bool HasFp8Support() const { if (IsCuda()) { return std::get<se::CudaComputeCapability>(Capability()).IsAtLeast(8, 9); } return std::get<se::RocmComputeCapability>(Capability()).has_fp8_support(); } bool HasCudaComputeCapability(const se::CudaComputeCapability& cc) const { return IsCuda() && std::get<se::CudaComputeCapability>(Capability()).IsAtLeast(cc); } }; TEST_F(GemmRewriteTest, CheckCustomCallTarget) { if (SkipGpuBlasLtTest()) { GTEST_SKIP() << "BlasLt is not supported on this GPU architecture"; } const char* hlo_text = R"( HloModule SimpleGemm ENTRY AddDotsFunc { x = f32[2,3] parameter(0) y = f32[3,4] parameter(1) ROOT dot_a = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; DebugOptions debug_options = GetDebugOptionsForTest(); if (debug_options.xla_gpu_enable_cublaslt()) { MatchOptimizedHlo(hlo_text, R"(; CHECK: custom_call_target="__cublas$lt$matmul")"); } else { MatchOptimizedHlo(hlo_text, R"(; CHECK: custom_call_target="__cublas$gemm")"); } } TEST_F(GemmRewriteTest, TestBatchedAutotuning) { if (HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) { GTEST_SKIP() << "There is no autotuning starting with the Nvidia Ampere generation"; } const char* hlo_text = R"( HloModule ComplexDotMultipleNonContracting ENTRY %test { %lhs = f32[7,17,10,13]{3,2,1,0} parameter(0) %rhs = f32[7,9,10,13,6]{4,3,2,1,0} parameter(1) ROOT %dot = f32[10,7,17,9,6]{4,3,2,1,0} dot(%lhs, %rhs), lhs_batch_dims={2,0}, rhs_batch_dims={2,0}, lhs_contracting_dims={3}, rhs_contracting_dims={3} } )"; MatchOptimizedHlo(hlo_text, R"( ; CHECK: selected_algorithm )"); } TEST_F(GemmRewriteTest, SimpleRewriteDeterministic) { if (SkipGpuBlasLtTest()) { GTEST_SKIP() << "BlasLt is not supported on this GPU architecture"; } const char* hlo_text = R"( HloModule SimpleGemm ENTRY AddDotsFunc { x = f32[128,128] parameter(0) y = f32[128,128] parameter(1) ROOT dot_a = f32[128,128] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; ErrorSpec error_spec = [&] { DebugOptions debug_options = GetDebugOptionsForTest(); if (debug_options.xla_gpu_enable_cublaslt()) { return ErrorSpec{1e-3, 1e-3}; } else { return ErrorSpec{1e-3, 1e-3}; } }(); auto get_module = [&]() { HloModuleConfig config; DebugOptions debug_options = GetDebugOptionsForTest(); debug_options.set_xla_gpu_exclude_nondeterministic_ops(true); config.set_debug_options(debug_options); return ParseAndReturnVerifiedModule(hlo_text, config); }; se::StreamExecutorMemoryAllocator allocator( backend().default_stream_executor()); TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> optimized_module, backend().compiler()->RunHloPasses( *get_module(), backend().default_stream_executor(), &allocator)); absl::StatusOr<bool> filecheck_result = RunFileCheck(optimized_module->ToString(), R"( ; CHECK: custom_call_target="__cublas${{(lt\$matmul|gemm)}}" )"); TF_ASSERT_OK(filecheck_result.status()); EXPECT_TRUE(filecheck_result.value()); EXPECT_TRUE(RunAndCompare(*get_module(), error_spec)); } TEST_F(GemmRewriteTest, BF16GemmCodeGen) { const char* hlo_text = R"( HloModule bf16codegendgemm ENTRY bf16gemm { %parameter.1 = bf16[3]{0} parameter(0) %parameter.2 = bf16[3]{0} parameter(1) ROOT %dot.3 = bf16[] dot(bf16[3]{0} %parameter.1, bf16[3]{0} %parameter.2), lhs_contracting_dims={0}, rhs_contracting_dims={0}, operand_precision={highest,highest} } )"; if (HasCudaComputeCapability(se::CudaComputeCapability::Hopper())) { MatchOptimizedHlo(hlo_text, R"( ; CHECK: [[P0:%[^ ]+]] = bf16[3]{0} parameter(0) ; CHECK: [[P1:%[^ ]+]] = bf16[3]{0} parameter(1) ; CHECK: [[INSTR_2:%[^ ]+]] = bf16[3]{0} multiply([[P0]], [[P1]]) ; CHECK: [[INSTR_3:%[^ ]+]] = f32[3]{0} convert([[INSTR_2]]) ; CHECK: [[INSTR_4:%[^ ]+]] = f32[] constant(0) ; CHECK: [[INSTR_5:%[^ ]+]] = f32[] reduce([[INSTR_3]], [[INSTR_4]]), dimensions={0}, to_apply=[[INSTR_6:%[^ ]+]] ; CHECK: ROOT [[INSTR_7:%[^ ]+]] = bf16[] convert([[INSTR_5]]) )"); } else { MatchOptimizedHlo(hlo_text, R"( ; CHECK: [[P1:%[^ ]+]] = bf16[3]{0} parameter(1) ; CHECK: [[INSTR_1:%[^ ]+]] = f32[3]{0} convert([[P1]]) ; CHECK: [[P0:%[^ ]+]] = bf16[3]{0} parameter(0) ; CHECK: [[INSTR_3:%[^ ]+]] = f32[3]{0} convert([[P0]]) ; CHECK: [[INSTR_4:%[^ ]+]] = f32[3]{0} multiply([[INSTR_1]], [[INSTR_3]]) ; CHECK: [[INSTR_5:%[^ ]+]] = f32[] constant(0) ; CHECK: [[INSTR_6:%[^ ]+]] = f32[] reduce([[INSTR_4]], [[INSTR_5]]), dimensions={0}, to_apply=[[INSTR_7:%[^ ]+]] ; CHECK: ROOT [[INSTR_8:%[^ ]+]] = bf16[] convert([[INSTR_6]]) )"); } EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-4, 1e-4})); } TEST_F(GemmRewriteTest, BF16Transpose) { const char* hlo_text = R"( HloModule broadcast ENTRY broadcast { p = bf16[9] parameter(0) ROOT out = bf16[1,9] broadcast(p), dimensions={1} } )"; MatchOptimizedHlo(hlo_text, R"( ; CHECK: bf16[1,9]{1,0} bitcast ; CHECK: bf16[1,9]{1,0} copy )"); EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); } class ParameterizedGemmRewriteTest : public GemmRewriteTest, public ::testing::WithParamInterface<bool> { public: ParameterizedGemmRewriteTest() { const bool kUsingCublasLt = GetParam(); replacements_[kCustomCallTargetPlaceholder] = kUsingCublasLt ? "__cublas$lt$matmul" : "__cublas$gemm"; } DebugOptions GetDebugOptionsForTest() override { DebugOptions debug_options = GemmRewriteTest::GetDebugOptionsForTest(); debug_options.set_xla_gpu_enable_cublaslt(GetParam()); debug_options.set_xla_gpu_enable_triton_gemm(false); return debug_options; } void MatchOptimizedHlo(absl::string_view hlo, const absl::string_view pattern, bool print_operand_shape = false) { GemmRewriteTest::MatchOptimizedHlo( hlo, absl::StrReplaceAll(pattern, replacements_), print_operand_shape); } absl::string_view CustomCallTarget() { return replacements_[kCustomCallTargetPlaceholder]; } protected: void SetUp() override { if (SkipGpuBlasLtTest()) { GTEST_SKIP() << "BlasLt is not supported on this GPU architecture"; } } protected: absl::flat_hash_map<absl::string_view, absl::string_view> replacements_; private: static constexpr const char* kCustomCallTargetPlaceholder{ "<<CUBLAS_CUSTOM_CALL_TARGET_PLACEHOLDER>>"}; }; TEST_P(ParameterizedGemmRewriteTest, Simple) { const char* hlo_text = R"( HloModule test ENTRY test { x = f32[2,3] parameter(0) y = f32[3,4] parameter(1) ROOT dot_a = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3], {{.*}}: f32[3,4]) -> f32[2,4] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1) ; CHECK-NEXT: [[GEMM:%[^ ]+]] = {{.*}} custom-call([[P0]], [[P1]]), ; CHECK: custom_call_target="<<CUBLAS_CUSTOM_CALL_TARGET_PLACEHOLDER>>", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_P(ParameterizedGemmRewriteTest, SimpleRewrite) { const char* hlo_text = R"( HloModule SimpleGemm ENTRY AddDotsFunc { x = f32[2,3] parameter(0) y = f32[3,4] parameter(1) ROOT dot_a = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[2,3], {{.*}}: f32[3,4]) -> f32[2,4] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1) ; CHECK-NEXT: [[GEMM:%[^ ]+]] = {{.*}} custom-call([[P0]], [[P1]]), ; CHECK: custom_call_target="<<CUBLAS_CUSTOM_CALL_TARGET_PLACEHOLDER>>", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_P(ParameterizedGemmRewriteTest, MultipleContractingDims) { const char* hlo_text = R"( HloModule MultipleContractingCheckGemm ENTRY AddDotsFunc { x = f32[3,4,2] parameter(0) y = f32[3,4,5] parameter(1) ROOT dot_a = f32[2,5] dot(x, y), lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1}, operand_precision={highest,highest} } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-NOT: copy ; ; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[3,4,2], {{.*}}: f32[3,4,5]) -> f32[2,5] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[3,4,2]{2,1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f32[3,4,5]{2,1,0} parameter(1) ; CHECK-DAG: [[BITCAST0:%[^ ]+]] = f32[2,12]{0,1} bitcast([[P0]]) ; CHECK-DAG: [[BITCAST1:%[^ ]+]] = f32[12,5]{1,0} bitcast([[P1]]) ; CHECK-NEXT: [[GEMM:%[^ ]+]] = {{.*}} custom-call([[BITCAST0]], [[BITCAST1]]), ; CHECK: custom_call_target="<<CUBLAS_CUSTOM_CALL_TARGET_PLACEHOLDER>>", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["HIGHEST","HIGHEST"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_P(ParameterizedGemmRewriteTest, ArgTransposeFoldCheck) { const char* hlo_text = R"( HloModule ArgTransposeFoldGemm ENTRY AddDotsFunc { x = f32[3,2] parameter(0) y = f32[3,4] parameter(1) x_transposed = f32[2,3] transpose(x), dimensions={1, 0} ROOT dot_a = f32[2,4] dot(x_transposed, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[3,2], {{.*}}: f32[3,4]) -> f32[2,4] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[3,2]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1) ; CHECK-NEXT: [[GEMM:%[^ ]+]] = {{.*}} custom-call([[P0]], [[P1]]), ; CHECK: custom_call_target="<<CUBLAS_CUSTOM_CALL_TARGET_PLACEHOLDER>>", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["0"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_P(ParameterizedGemmRewriteTest, BatchedArgRowColTransposeFoldCheck) { const char* hlo_text = R"( HloModule BatchedArgRowColTransposeFoldGemm ENTRY AddDotsFunc { x = f32[5,3,2] parameter(0) y = f32[5,3,4] parameter(1) x_transposed = f32[5,2,3] transpose(x), dimensions={0, 2, 1} ROOT dot_a = f32[5,2,4] dot(x_transposed, y), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0} } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[5,3,2], {{.*}}: f32[5,3,4]) -> f32[5,2,4] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[5,3,2]{2,1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[5,3,4]{2,1,0} parameter(1) ; CHECK-NEXT: [[GEMM:%[^ ]+]] = {{.*}} custom-call([[P0]], [[P1]]), ; CHECK: custom_call_target="<<CUBLAS_CUSTOM_CALL_TARGET_PLACEHOLDER>>", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":["0"] ; CHECK-DAG: "rhs_batch_dimensions":["0"] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_P(ParameterizedGemmRewriteTest, BatchRowTransposeFoldCheck) { const char* hlo_text = R"( HloModule BatchRowTransposeFoldCheck ENTRY AddDotsFunc { x = f32[2,5,3] parameter(0) y = f32[5,3,4] parameter(1) x_transposed = f32[5,2,3] transpose(x), dimensions={1, 0, 2} ROOT dot_a = f32[5,2,4] dot(x_transposed, y), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0} } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{2.5e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[2,5,3], {{.*}}: f32[5,3,4]) -> f32[5,2,4] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,5,3]{2,1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[5,3,4]{2,1,0} parameter(1) ; CHECK-NEXT: [[GEMM:%[^ ]+]] = {{.*}} custom-call([[P0]], [[P1]]), ; CHECK: custom_call_target="<<CUBLAS_CUSTOM_CALL_TARGET_PLACEHOLDER>>", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["2"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":["1"] ; CHECK-DAG: "rhs_batch_dimensions":["0"] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_P(ParameterizedGemmRewriteTest, BatchFromMinorDimTransposeIsNotFolded) { const char* hlo_text = R"( HloModule BatchFromMinorDimTransposeDoesntFold ENTRY AddDotsFunc { x = f32[3,2,5] parameter(0) y = f32[5,3,4] parameter(1) x_transposed = f32[5,2,3] transpose(x), dimensions={2, 1, 0} ROOT dot_a = f32[5,2,4] dot(x_transposed, y), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0} } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{2.5e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[3,2,5], {{.*}}: f32[5,3,4]) -> f32[5,2,4] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[3,2,5]{2,1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f32[5,3,4]{2,1,0} parameter(1) ; CHECK-DAG: [[FUSION:%[^ ]+]] = f32[5,2,3]{2,1,0} transpose([[P0]]) ; CHECK-NEXT: [[GEMM:%[^ ]+]] = {{.*}} custom-call([[FUSION]], [[P1]]), ; CHECK: custom_call_target="<<CUBLAS_CUSTOM_CALL_TARGET_PLACEHOLDER>>", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["2"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":["0"] ; CHECK-DAG: "rhs_batch_dimensions":["0"] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_P(ParameterizedGemmRewriteTest, LargeBatch) { const char* hlo_text = R"( HloModule BatchedArgRowColTransposeFoldGemm ENTRY AddDotsFunc { x = f32[20000,4,3,2] parameter(0) y = f32[20000,4,3,4] parameter(1) ROOT dot_a = f32[20000,4,2,4] dot(x, y), lhs_contracting_dims={2}, rhs_contracting_dims={2}, lhs_batch_dims={0,1}, rhs_batch_dims={0,1} } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[20000,4,3,2], {{.*}}: f32[20000,4,3,4]) -> f32[20000,4,2,4] { ; CHECK: [[P0:%[^ ]+]] = f32[20000,4,3,2]{3,2,1,0} parameter(0) ; CHECK: [[BC0:%[^ ]+]] = f32[80000,3,2]{2,1,0} bitcast([[P0]]) ; CHECK: [[P1:%[^ ]+]] = f32[20000,4,3,4]{3,2,1,0} parameter(1) ; CHECK: [[BC1:%[^ ]+]] = f32[80000,3,4]{2,1,0} bitcast([[P1]]) ; CHECK: [[GEMM:%[^ ]+]] = (f32[80000,2,4]{2,1,0}, s8[{{[0-9]+}}]{0}) custom-call([[BC0]], [[BC1]]), ; CHECK: custom_call_target="__cublas$gemm", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":["0"] ; CHECK-DAG: "rhs_batch_dimensions":["0"] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK: } ; CHECK: [[OUT:%[^ ]+]] = f32[80000,2,4]{2,1,0} get-tuple-element([[GEMM]]), index=0 ; CHECK: ROOT {{[^ ]+}} = f32[20000,4,2,4]{3,2,1,0} bitcast([[OUT]]) )"); } TEST_P(ParameterizedGemmRewriteTest, InstrTransposeFoldCheck) { const char* hlo_text = R"( HloModule InstrTransposeFoldGemm ENTRY AddDotsFunc { x = f32[2,3] parameter(0) y = f32[3,4] parameter(1) dot_a = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT out = f32[4,2] transpose(dot_a), dimensions={1, 0} } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[2,3], {{.*}}: f32[3,4]) -> f32[4,2] { ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1) ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0) ; CHECK-NEXT: [[GEMM:%[^ ]+]] = {{.*}} custom-call([[P1]], [[P0]]), ; CHECK: custom_call_target="<<CUBLAS_CUSTOM_CALL_TARGET_PLACEHOLDER>>", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["0"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_P(ParameterizedGemmRewriteTest, BatchedInstrLayoutTransposed) { const char* hlo_text = R"( HloModule BatchedInstrLayoutCheck ENTRY AddDotsFunc { x = f32[5,2,3] parameter(0) y = f32[5,3,4] parameter(1) dot_a = f32[5,2,4] dot(x, y), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0} ROOT out = f32[2,5,4] transpose(dot_a), dimensions={1, 0, 2} } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{2.5e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[5,2,3], {{.*}}: f32[5,3,4]) -> f32[2,5,4] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[5,2,3]{2,1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[5,3,4]{2,1,0} parameter(1) ; CHECK-NEXT: [[GEMM:%[^ ]+]] = {{.*}} custom-call([[P0]], [[P1]]), ; CHECK: custom_call_target="<<CUBLAS_CUSTOM_CALL_TARGET_PLACEHOLDER>>", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["2"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":["0"] ; CHECK-DAG: "rhs_batch_dimensions":["0"] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } ; CHECK: ROOT [[OUT:%[^ ]+]] = f32[2,5,4]{2,1,0} bitcast )"); } TEST_P(ParameterizedGemmRewriteTest, BatchedInstrLayoutBatchNotInMinorDim) { const char* hlo_text = R"( HloModule BatchedInstrLayoutBatchNotInMinorDim ENTRY AddDotsFunc { x = f32[5,2,3] parameter(0) y = f32[5,3,4] parameter(1) dot_a = f32[5,2,4] dot(x, y), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0} ROOT out = f32[2,4,5] transpose(dot_a), dimensions={1, 2, 0} } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{2.5e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[5,2,3], {{.*}}: f32[5,3,4]) -> f32[2,4,5] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[5,2,3]{2,1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[5,3,4]{2,1,0} parameter(1) ; CHECK-NEXT: [[GEMM:%[^ ]+]] = {{.*}} custom-call([[P0]], [[P1]]), ; CHECK: custom_call_target="<<CUBLAS_CUSTOM_CALL_TARGET_PLACEHOLDER>>", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["2"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":["0"] ; CHECK-DAG: "rhs_batch_dimensions":["0"] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } ; CHECK: ROOT [[OUT:%[^ ]+]] = f32[2,4,5]{2,1,0} [[OP:[^ ]+]] )"); } TEST_P(ParameterizedGemmRewriteTest, AlphaSimpleRewrite) { const char* hlo_text = R"( HloModule AlphaSimpleRewrite ENTRY AddDotsFunc { x = f32[2,2] parameter(0) y = f32[2,2] parameter(1) k = f32[] constant(3.0) k_broadcast = f32[2, 2] broadcast(k), dimensions={} dot_a = f32[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}, operand_precision={highest,highest} ROOT dot_a_multiplied = f32[2, 2] multiply(dot_a, k_broadcast) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[2,2], {{.*}}: f32[2,2]) -> f32[2,2] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,2]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[2,2]{1,0} parameter(1) ; CHECK-NEXT: [[GEMM:%[^ ]+]] = {{.*}} custom-call([[P0]], [[P1]]), ; CHECK: custom_call_target="<<CUBLAS_CUSTOM_CALL_TARGET_PLACEHOLDER>>", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":3 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["HIGHEST","HIGHEST"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_P(ParameterizedGemmRewriteTest, F64C64_CublasLtSupportTest) { { const char* hlo_text = R"( HloModule F64_rewrite ENTRY AddDotsFunc { x = f64[2,2] parameter(0) y = f64[2,2] parameter(1) k = f64[] constant(3.0) k_broadcast = f64[2, 2] broadcast(k), dimensions={} dot_a = f64[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT dot_a_multiplied = f64[2, 2] multiply(dot_a, k_broadcast) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-4, 1e-5})); } { const char* hlo_text = R"( HloModule C64_rewrite ENTRY AddDotsFunc { x = c64[2,2] parameter(0) y = c64[2,2] parameter(1) k = c64[] constant((3.0, 3.0)) k_broadcast = c64[2, 2] broadcast(k), dimensions={} dot_a = c64[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT dot_a_multiplied = c64[2, 2] multiply(dot_a, k_broadcast) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-4, 1e-5})); } } TEST_P(ParameterizedGemmRewriteTest, ComplexAlphaSimpleRewrite) { if (!IsCuda() && GetDebugOptionsForTest().xla_gpu_enable_cublaslt()) { GTEST_SKIP() << "TODO: Unsupported C64 gpublas-lt datatype on ROCM"; } const char* hlo_text = R"( HloModule ComplexAlphaSimpleRewrite ENTRY AddDotsFunc { x = c64[2,2] parameter(0) y = c64[2,2] parameter(1) k = c64[] constant((3.0, 3.0)) k_broadcast = c64[2, 2] broadcast(k), dimensions={} dot_a = c64[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT dot_a_multiplied = c64[2, 2] multiply(dot_a, k_broadcast) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-4, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: c64[2,2], {{.*}}: c64[2,2]) -> c64[2,2] { ; CHECK-NEXT: [[P0:%[^ ]+]] = c64[2,2]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = c64[2,2]{1,0} parameter(1) ; CHECK-NEXT: [[GEMM:%[^ ]+]] = {{.*}} custom-call([[P0]], [[P1]]), ; CHECK: custom_call_target="<<CUBLAS_CUSTOM_CALL_TARGET_PLACEHOLDER>>", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":3 ; CHECK-DAG: "alpha_imag":3 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_P(ParameterizedGemmRewriteTest, AlphaMultipleUsersNoRewrite) { const char* hlo_text = R"( HloModule AlphaMultipleUsersNoRewrite ENTRY AddDotsFunc { x = f32[2,2] parameter(0) y = f32[2,2] parameter(1) k = f32[] constant(3.0) k_broadcast = f32[2, 2] broadcast(k), dimensions={} dot_a = f32[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}, operand_precision={highest,highest} dot_a_multiplied = f32[2, 2] multiply(dot_a, k_broadcast) ROOT out = f32[2,2] add(dot_a_multiplied, dot_a) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK: {{[^ ]+}} = {{.*}} custom-call({{[^,]+}}, {{[^)]+}}), ; CHECK: custom_call_target="<<CUBLAS_CUSTOM_CALL_TARGET_PLACEHOLDER>>", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["HIGHEST","HIGHEST"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_P(ParameterizedGemmRewriteTest, AlphaVectorNoRewrite) { const char* hlo_text = R"( HloModule AlphaVectorNoRewrite ENTRY AddDotsFunc { x = f32[2,2] parameter(0) y = f32[2,2] parameter(1) alpha = f32[2] constant({1, 2}) alpha_broadcast = f32[2,2] broadcast(alpha), dimensions={1} dot = f32[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT dot_a_multiplied = f32[2, 2] multiply(dot, alpha_broadcast) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[2,2], {{.*}}: f32[2,2]) -> f32[2,2] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,2]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[2,2]{1,0} parameter(1) ; CHECK-NEXT: [[GEMM:%[^ ]+]] = {{.*}} custom-call([[P0]], [[P1]]), ; CHECK: custom_call_target="<<CUBLAS_CUSTOM_CALL_TARGET_PLACEHOLDER>>", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_P(ParameterizedGemmRewriteTest, BF16Gemm) { const char* hlo_text = R"( HloModule bf16gemm ENTRY bf16gemm { %parameter.1 = bf16[12,4]{1,0} parameter(0) %parameter.2 = bf16[4,8]{1,0} parameter(1) ROOT %dot.8 = bf16[12,8] dot(bf16[12,4] %parameter.1, bf16[4,8] %parameter.2), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); if (!IsCuda() || HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) { MatchOptimizedHlo(hlo_text, R"( ; CHECK: {{.*}} custom-call(bf16[16,8]{1,0} {{.*}}, bf16[8,8]{1,0} {{.*}}), custom_call_target="<<CUBLAS_CUSTOM_CALL_TARGET_PLACEHOLDER>>" )", true); } else { GTEST_SKIP() << "Pre-Ampere casts up bf16 to fp32"; } } TEST_P(ParameterizedGemmRewriteTest, BF16GemmStrided) { const char* hlo_text = R"( HloModule bf16gemm ENTRY bf16gemm { %parameter.1 = bf16[3,3,4] parameter(0) %parameter.2 = bf16[3,3,2] parameter(1) ROOT %dot.3 = bf16[3,4,2]{2,1,0} dot(bf16[3,3,4]{2,1,0} %parameter.1, bf16[3,3,2]{2,1,0} %parameter.2), lhs_batch_dims={0}, lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1}, operand_precision={highest,highest} } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); if (!IsCuda() || HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) { MatchOptimizedHlo(hlo_text, R"( ; CHECK: {{.*}} custom-call(bf16[3,8,8]{2,1,0} {{.*}}, bf16[3,8,8]{2,1,0} {{.*}}), custom_call_target="<<CUBLAS_CUSTOM_CALL_TARGET_PLACEHOLDER>>" )", true); } else { GTEST_SKIP() << "Pre-Ampere casts up bf16 to fp32"; } } TEST_P(ParameterizedGemmRewriteTest, Int8Gemm) { const char* hlo_text = R"( HloModule int8gemm ENTRY int8gemm { %parameter.1 = s8[12,4]{1,0} parameter(0) %parameter.2 = s8[4,8]{1,0} parameter(1) ROOT %dot.8 = s32[12,8] dot(s8[12,4] %parameter.1, s8[4,8] %parameter.2), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); if (!IsCuda() || HasCudaComputeCapability(se::CudaComputeCapability::Volta())) { MatchOptimizedHlo(hlo_text, R"( ; CHECK: {{.*}} custom-call(s8[12,4]{1,0} [[A:%[^ ]+]], s8[4,8]{0,1} [[B:%[^ ]+]]), custom_call_target="__cublas$gemm" )", true); } else { MatchOptimizedHlo(hlo_text, R"( ; CHECK: {{.*}} dot(s32[12,4]{1,0} [[A:%[^ ]+]], s32[4,8]{1,0} [[B:%[^ ]+]]), lhs_contracting_dims={1}, rhs_contracting_dims={0} )", true); } } TEST_F(GemmRewriteTest, Int8GemmRankGreaterThanTwo) { if (!IsCuda()) { GTEST_SKIP() << "DoBlasGemmWithAlgorithm is not yet implemented on ROCm"; } const char* hlo_text = R"( HloModule int8gemm ENTRY main.4 { Arg_0.1 = s8[1,8,2]{2,1,0} parameter(0) Arg_1.2 = s8[2,4]{1,0} parameter(1) ROOT dot.3 = s32[1,8,4]{2,1,0} dot(Arg_0.1, Arg_1.2), lhs_contracting_dims={2}, rhs_contracting_dims={0} } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); if (!IsCuda() || HasCudaComputeCapability(se::CudaComputeCapability::Volta())) { MatchOptimizedHlo(hlo_text, R"( ; CHECK: [[GEMM:%[^ ]+]] = (s32[8,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call(s8[8,4]{1,0} %{{.*}}, s8[4,4]{0,1} %{{.*}}), custom_call_target="__cublas$gemm", )", true); } } TEST_P(ParameterizedGemmRewriteTest, Int8GemmNoAlphaRewrite) { const char* hlo_text = R"( HloModule int8gemm ENTRY int8gemm { %parameter.1 = s8[12,4]{1,0} parameter(0) %parameter.2 = s8[4,8]{1,0} parameter(1) k = s32[] constant(2) k_broadcast = s32[12,8] broadcast(k), dimensions={} %dot.8 = s32[12,8] dot(s8[12,4] %parameter.1, s8[4,8] %parameter.2), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT dot_multiplied = s32[12,8] multiply(%dot.8, k_broadcast) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); if (!IsCuda() || HasCudaComputeCapability(se::CudaComputeCapability::Volta())) { MatchOptimizedHlo(hlo_text, R"( ; CHECK: {{.*}} custom-call(s8[12,4]{1,0} [[A:%[^ ]+]], s8[4,8]{0,1} [[B:%[^ ]+]]), ; CHECK: custom_call_target="__cublas$gemm", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 )", true); } else { MatchOptimizedHlo(hlo_text, R"( ; CHECK: {{.*}} dot(s32[12,4]{1,0} [[A:%[^ ]+]], s32[4,8]{1,0} [[B:%[^ ]+]]), lhs_contracting_dims={1}, rhs_contracting_dims={0} )", true); } } TEST_P(ParameterizedGemmRewriteTest, Int8GemmNoBetaRewrite) { const char* hlo_text = R"( HloModule int8gemm ENTRY int8gemm { %parameter.1 = s8[12,4]{1,0} parameter(0) %parameter.2 = s8[4,8]{1,0} parameter(1) bias = s32[12,8] parameter(2) %dot.8 = s32[12,8] dot(s8[12,4] %parameter.1, s8[4,8] %parameter.2), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT out = s32[12,8] add(%dot.8, bias) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); if (!IsCuda() || HasCudaComputeCapability(se::CudaComputeCapability::Volta())) { MatchOptimizedHlo(hlo_text, R"( ; CHECK: {{.*}} custom-call(s8[12,4]{1,0} [[A:%[^ ]+]], s8[4,8]{0,1} [[B:%[^ ]+]]), ; CHECK: custom_call_target="__cublas$gemm", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 )", true); } else { MatchOptimizedHlo(hlo_text, R"( ; CHECK: {{.*}} dot(s32[12,4]{1,0} [[A:%[^ ]+]], s32[4,8]{1,0} [[B:%[^ ]+]]), lhs_contracting_dims={1}, rhs_contracting_dims={0} )", true); } } TEST_P(ParameterizedGemmRewriteTest, Int8GemmNotMultipleOfFour) { if (!IsCuda()) { GTEST_SKIP() << "DoBlasGemmWithAlgorithm is not yet implemented on ROCm"; } const char* hlo_text = R"( HloModule int8gemm ENTRY int8gemm { %parameter.1 = s8[13,4]{1,0} parameter(0) %parameter.2 = s8[4,9]{1,0} parameter(1) ROOT %dot.9 = s32[13,9] dot(s8[13,4] %parameter.1, s8[4,9] %parameter.2), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); if (!IsCuda() || HasCudaComputeCapability(se::CudaComputeCapability::Volta())) { MatchOptimizedHlo(hlo_text, R"( ; CHECK: {{.*}} custom-call(s8[16,4]{1,0} [[A:%[^ ]+]], s8[4,12]{0,1} [[B:%[^ ]+]]), custom_call_target="__cublas$gemm" )", true); } else { MatchOptimizedHlo(hlo_text, R"( ; CHECK: {{.*}} dot(s32[13,4]{1,0} [[A:%[^ ]+]], s32[4,9]{1,0} [[B:%[^ ]+]]), lhs_contracting_dims={1}, rhs_contracting_dims={0} )", true); } } TEST_P(ParameterizedGemmRewriteTest, GemmTypeCombinationCheck) { if (!IsCuda()) { GTEST_SKIP() << "DoBlasGemmWithAlgorithm is not yet implemented on ROCm"; } std::vector<std::tuple<absl::string_view, absl::string_view, bool>> type_combinations = {{"s8", "s8", true}, {"s32", "s32", true}, {"bf16", "bf16", true}, {"f16", "f16", true}, {"f32", "f32", true}, {"f64", "f64", true}, {"c64", "c64", true}, {"c128", "c128", true}, {"s8", "s32", true}, {"s8", "f32", true}, {"f16", "f32", true}, {"bf16", "f32", true}}; if (!IsCuda() || HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) { std::vector<std::tuple<absl::string_view, absl::string_view, bool>> more_type_combinations = { {"s8", "bf16", false}, {"s8", "f16", false}, {"s8", "f64", false}, {"s8", "c64", false}, {"s8", "c128", false}, {"s32", "f32", false}, {"s32", "f64", false}, {"s32", "c64", false}, {"s32", "c128", false}, {"f16", "bf16", false}, {"f16", "f64", false}, {"f16", "c64", false}, {"f16", "c128", false}, {"bf16", "f16", false}, {"bf16", "f64", false}, {"bf16", "c64", false}, {"bf16", "c128", false}, {"f32", "f64", false}, {"f32", "c64", false}, {"f32", "c128", false}, {"f64", "c64", false}, {"f64", "c128", false}, }; type_combinations.insert(type_combinations.end(), more_type_combinations.begin(), more_type_combinations.end()); } for (const auto& type_combination : type_combinations) { absl::flat_hash_map<absl::string_view, absl::string_view> replacements; replacements["<<ABType>>"] = std::get<0>(type_combination); replacements["<<DType>>"] = std::get<1>(type_combination); const char* hlo_template = R"( HloModule type_combo ENTRY type_combo { %parameter.1 = <<ABType>>[4,4]{1,0} parameter(0) %parameter.2 = <<ABType>>[4,4]{1,0} parameter(1) ROOT %dot = <<DType>>[4,4] dot(%parameter.1, %parameter.2), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; const auto hlo_text = absl::StrReplaceAll(hlo_template, replacements); if (std::get<2>(type_combination)) { EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3})); } else { EXPECT_FALSE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3})); } } } TEST_P(ParameterizedGemmRewriteTest, UpcastingBf16ToF64) { const char* hlo_text = R"( HloModule test ENTRY test { Arg_0.1 = bf16[4,3]{1,0} parameter(0) Arg_1.2 = bf16[3,6]{1,0} parameter(1) ROOT dot.3 = f64[4,6]{1,0} dot(Arg_0.1, Arg_1.2), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); GemmRewriter pass(Capability(), GetToolkitVersion()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(m::GetTupleElement(m::CustomCall({"__cublas$gemm"}), 0))); } TEST_P(ParameterizedGemmRewriteTest, UpcastingC64ToC128) { const char* hlo_text = R"( HloModule test ENTRY test { Arg_0.1 = c64[4,3]{1,0} parameter(0) Arg_1.2 = c64[3,6]{1,0} parameter(1) ROOT dot.3 = c128[4,6]{1,0} dot(Arg_0.1, Arg_1.2), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); GemmRewriter pass(Capability(), GetToolkitVersion()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(m::GetTupleElement(m::CustomCall({"__cublas$gemm"}), 0))); } TEST_P(ParameterizedGemmRewriteTest, UpcastingF16ToF32) { const char* hlo_text = R"( HloModule test ENTRY test { Arg_0.1 = f16[4,3]{1,0} parameter(0) Arg_1.2 = f16[3,6]{1,0} parameter(1) ROOT dot.3 = f32[4,6]{1,0} dot(Arg_0.1, Arg_1.2), lhs_contracting_dims={1}, rhs_contracting_dims={0}, operand_precision={highest, highest} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); GemmRewriter pass(Capability(), GetToolkitVersion()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(m::GetTupleElement(m::CustomCall({CustomCallTarget()}), 0))); } TEST_P(ParameterizedGemmRewriteTest, UpcastingF16ToF64) { const char* hlo_text = R"( HloModule test ENTRY test { Arg_0.1 = f16[4,3]{1,0} parameter(0) Arg_1.2 = f16[3,6]{1,0} parameter(1) ROOT dot.3 = f64[4,6]{1,0} dot(Arg_0.1, Arg_1.2), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); GemmRewriter pass(Capability(), GetToolkitVersion()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(m::GetTupleElement(m::CustomCall({"__cublas$gemm"}), 0))); } TEST_P(ParameterizedGemmRewriteTest, UpcastingF32ToF64) { const char* hlo_text = R"( HloModule test ENTRY test { Arg_0.1 = f32[4,3]{1,0} parameter(0) Arg_1.2 = f32[3,6]{1,0} parameter(1) ROOT dot.3 = f64[4,6]{1,0} dot(Arg_0.1, Arg_1.2), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); GemmRewriter pass(Capability(), GetToolkitVersion()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(m::GetTupleElement(m::CustomCall({"__cublas$gemm"}), 0))); } TEST_P(ParameterizedGemmRewriteTest, DoNotUpconvertOutput) { const char* hlo_text = R"( HloModule test ENTRY main { param_0 = f16[240,88]{1,0} parameter(0) param_1 = f16[88,4]{1,0} parameter(1) dot = f16[240,4]{1,0} dot(param_0, param_1), lhs_contracting_dims={1}, rhs_contracting_dims={0}, operand_precision={highest,highest} constant_255 = f16[] constant(255) broadcast = f16[240,4]{1,0} broadcast(constant_255), dimensions={} multiply = f16[240,4]{1,0} multiply(dot, broadcast) ROOT result = f32[240,4]{1,0} convert(multiply) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); GemmRewriter pass(Capability(), GetToolkitVersion()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Convert( m::GetTupleElement(m::CustomCall({CustomCallTarget()}), 0)))); } TEST_P(ParameterizedGemmRewriteTest, UnsupportedMixTypeGemm) { const char* hlo_text = R"( HloModule test ENTRY main { param_0 = f32[240,88]{1,0} parameter(0) param_1 = f32[88,4]{1,0} parameter(1) dot = f32[240,4]{1,0} dot(param_0, param_1), lhs_contracting_dims={1}, rhs_contracting_dims={0}, operand_precision={highest,highest} constant_255 = f32[] constant(255) broadcast = f32[240,4]{1,0} broadcast(constant_255), dimensions={} multiply = f32[240,4]{1,0} multiply(dot, broadcast) ROOT result = u8[240,4]{1,0} convert(multiply) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); GemmRewriter pass(Capability(), GetToolkitVersion()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Convert( m::GetTupleElement(m::CustomCall({CustomCallTarget()}), 0)))); } TEST_P(ParameterizedGemmRewriteTest, CheckIsGemmAliasedBeforeFusion) { const char* hlo_text = R"( HloModule test ENTRY main { Arg_0.1 = f16[8,16]{1,0} parameter(0) Arg_1.2 = f16[16,32]{1,0} parameter(1) dot.8 = f16[8,32]{1,0} dot(Arg_0.1, Arg_1.2), lhs_contracting_dims={1}, rhs_contracting_dims={0} Arg_2.3 = f16[8,32]{1,0} parameter(2) constant.5 = f16[] constant(1) broadcast.6 = f16[8,32]{1,0} broadcast(constant.5), dimensions={} add.7 = f16[8,32]{1,0} add(Arg_2.3, broadcast.6) add.9 = f16[8,32]{1,0} add(dot.8, add.7) convert.10 = f32[8,32]{1,0} convert(add.9) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); GemmRewriter pass(Capability(), GetToolkitVersion()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_TRUE(changed); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Convert( m::GetTupleElement(m::CustomCall({CustomCallTarget()}), 0)))); } INSTANTIATE_TEST_SUITE_P(CublasTestsBothLegacyAndLt, ParameterizedGemmRewriteTest, ::testing::Bool()); class LegacyCublasGemmRewriteTest : public GemmRewriteTest { public: DebugOptions GetDebugOptionsForTest() override { DebugOptions debug_options = GemmRewriteTest::GetDebugOptionsForTest(); debug_options.set_xla_gpu_enable_triton_gemm(false); debug_options.set_xla_gpu_enable_cublaslt(false); return debug_options; } }; TEST_F(LegacyCublasGemmRewriteTest, MatrixVectorMultiplication) { const char* hlo_text = R"( HloModule m ENTRY e { p0 = f32[2048] parameter(0) p1 = f32[2048, 16384] parameter(1) ROOT d = f32[16384] dot(p0, p1), lhs_contracting_dims={0}, rhs_contracting_dims={0} })"; RunAndFilecheckHloRewrite( hlo_text, GemmRewriter( se::CudaComputeCapability{se::CudaComputeCapability::AMPERE, 0}, stream_executor::SemanticVersion{12, 4, 0}), R"( ; CHECK: %[[P0:.+]] = f32[2048]{0} parameter(0) ; CHECK: %[[P1:.+]] = f32[2048,16384]{1,0} parameter(1) ; CHECK: %[[CUSTOM_CALL:.+]] = (f32[16384]{0}, s8[4194304]{0}) custom-call(%[[P0]], %[[P1]]), custom_call_target="__cublas$gemm" )"); } TEST_F(LegacyCublasGemmRewriteTest, MatrixVectorMultiplicationWithBatch) { const char* hlo_text = R"( HloModule m ENTRY e { p0 = f32[10, 10, 2048] parameter(0) p1 = f32[10, 10, 2048, 16384] parameter(1) ROOT d = f32[10, 10, 16384] dot(p0, p1), lhs_batch_dims={0, 1}, rhs_batch_dims={0, 1}, lhs_contracting_dims={2}, rhs_contracting_dims={2} })"; RunAndFilecheckHloRewrite( hlo_text, GemmRewriter( se::CudaComputeCapability{se::CudaComputeCapability::AMPERE, 0}, stream_executor::SemanticVersion{12, 4, 0}), R"( ; CHECK: %[[P0:.+]] = f32[10,10,2048]{2,1,0} parameter(0) ; CHECK: %[[P1:.+]] = f32[10,10,2048,16384]{3,2,1,0} parameter(1) ; CHECK: %[[CUSTOM_CALL:.+]] = (f32[10,10,16384]{2,1,0}, s8[4194304]{0}) custom-call(%[[P0]], %[[P1]]), custom_call_target="__cublas$gemm" )"); } TEST_F(LegacyCublasGemmRewriteTest, SparseDotNotSupported) { const char* hlo_text = R"( HloModule test ENTRY main { lhs = f16[5,16] parameter(0) rhs = f16[32,10] parameter(1) meta = u16[5,2] parameter(2) ROOT dot = f32[5,10] dot(lhs, rhs, meta), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sparsity=L.1@2:4 })"; auto hlo_pass = GemmRewriter( se::CudaComputeCapability{se::CudaComputeCapability::AMPERE, 0}, stream_executor::SemanticVersion{12, 4, 0}); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_text)); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunHloPass(&hlo_pass, module.get())); EXPECT_FALSE(changed); } TEST_F(LegacyCublasGemmRewriteTest, AlphaBetaRewrite) { const char* hlo_text = R"( HloModule NonZeroAlphaBeta ENTRY AddDotsFunc { x = f32[2,2] parameter(0) y = f32[2,2] parameter(1) param_2 = f32[2,2] parameter(2) bias = f32[2,2] negate(param_2) k = f32[] constant(3.0) k_broadcast = f32[2, 2] broadcast(k), dimensions={} dot_a = f32[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}, operand_precision={highest,highest} dot_a_multiplied = f32[2, 2] multiply(dot_a, k_broadcast) ROOT out = f32[2,2] add(dot_a_multiplied, bias) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[2,2], {{.*}}: f32[2,2], {{.*}}: f32[2,2]) -> f32[2,2] { ; CHECK-DAG: [[X:%[^ ]+]] = f32[2,2]{1,0} parameter(0) ; CHECK-DAG: [[Y:%[^ ]+]] = f32[2,2]{1,0} parameter(1) ; CHECK: [[O:%[^ ]+]] = (f32[2,2]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[X]], [[Y]], {{[^,)]+}}), ; CHECK: custom_call_target="__cublas$gemm", ; CHECK: output_to_operand_aliasing={ ; CHECK-SAME: {0}: (2, {}) ; CHECK-SAME: } ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":3 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":1 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["HIGHEST","HIGHEST"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } ; CHECK: ROOT [[OUT:%[^ ]+]] = f32[2,2]{1,0} get-tuple-element([[O]]), index=0 )"); } TEST_F(LegacyCublasGemmRewriteTest, BiasMultipleUsersNoOverwrite) { const char* hlo_text = R"( HloModule BiasMultipleUsersNoOverwrite ENTRY AddDotsFunc { x = f32[2,2] parameter(0) y = f32[2,2] parameter(1) bias = f32[2,2] parameter(2) k = f32[] constant(3.0) k_broadcast = f32[2, 2] broadcast(k), dimensions={} dot_a = f32[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}, operand_precision={highest,highest} dot_a_multiplied = f32[2, 2] multiply(dot_a, k_broadcast) biased_out = f32[2,2] add(dot_a_multiplied, bias) ROOT out = f32[2,2] add(biased_out, bias) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[2,2], {{.*}}: f32[2,2], {{.*}}: f32[2,2]) -> f32[2,2] { ; CHECK-DAG: [[P0:%[^ ]+]] = f32[2,2]{1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f32[2,2]{1,0} parameter(1) ; CHECK-NEXT: [[CUSTOM_CALL:%[^ ]+]] = (f32[2,2]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]]), ; CHECK: custom_call_target="__cublas$gemm", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":3 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["HIGHEST","HIGHEST"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_F(LegacyCublasGemmRewriteTest, BiasParameterNoOverwrite) { const char* hlo_text = R"( HloModule BiasParameterNoOverwrite ENTRY AddDotsFunc { x = f32[2,2] parameter(0) y = f32[2,2] parameter(1) bias = f32[2,2] parameter(2) dot_a = f32[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT out = f32[2,2] add(dot_a, bias) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[2,2], {{.*}}: f32[2,2], {{.*}}: f32[2,2]) -> f32[2,2] { ; CHECK-DAG: [[P0:%[^ ]+]] = f32[2,2]{1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f32[2,2]{1,0} parameter(1) ; CHECK-NEXT: [[GEMM:%[^ ]+]] = (f32[2,2]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]]), ; CHECK: custom_call_target="__cublas$gemm", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_F(LegacyCublasGemmRewriteTest, BiasTupleParameterOverwrite) { const char* hlo_text = R"( HloModule BiasTupleParameterOverwrite ENTRY AddDotsFunc { x = f32[2,2] parameter(0) y = f32[2,2] parameter(1) param_2 = (f32[2,2], f32[3,3]) parameter(2) bias = f32[2,2] get-tuple-element(param_2), index=0 dot_a = f32[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT out = f32[2,2] add(dot_a, bias) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[2,2], {{.*}}: f32[2,2], {{.*}}: (f32[2,2], f32[3,3])) -> f32[2,2] { ; CHECK-DAG: [[P0:%[^ ]+]] = f32[2,2]{1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f32[2,2]{1,0} parameter(1) ; CHECK-DAG: [[P2:%[^ ]+]] = (f32[2,2]{1,0}, f32[3,3]{1,0}) parameter(2) ; CHECK-DAG: [[BIAS:%[^ ]+]] = f32[2,2]{1,0} get-tuple-element([[P2]]), index=0 ; CHECK-DAG: [[BIAS_COPY:%[^ ]+]] = f32[2,2]{1,0} copy([[BIAS]]) ; CHECK-NEXT: [[GEMM:%[^ ]+]] = (f32[2,2]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[BIAS_COPY]]), ; CHECK: custom_call_target="__cublas$gemm", ; CHECK: output_to_operand_aliasing={ ; CHECK-SAME: {0}: (2, {}) ; CHECK-SAME: } ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":1 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_F(LegacyCublasGemmRewriteTest, AliasedBiasOverwrite) { const char* hlo_text = R"( HloModule AliasedBiasOverwrite, input_output_alias={ {}: (2, {}, must-alias) } ENTRY AddDotsFunc { x = f32[2,2] parameter(0) y = f32[2,2] parameter(1) bias = f32[2,2] parameter(2) k = f32[] constant(3.0) k_broadcast = f32[2, 2] broadcast(k), dimensions={} dot_a = f32[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}, operand_precision={highest,highest} dot_a_multiplied = f32[2, 2] multiply(dot_a, k_broadcast) ROOT out = f32[2,2] add(dot_a_multiplied, bias) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[2,2], {{.*}}: f32[2,2], {{.*}}: f32[2,2]) -> f32[2,2] { ; CHECK-DAG: [[X:%[^ ]+]] = f32[2,2]{1,0} parameter(0) ; CHECK-DAG: [[Y:%[^ ]+]] = f32[2,2]{1,0} parameter(1) ; CHECK-DAG: [[BIAS:%[^ ]+]] = f32[2,2]{1,0} parameter(2) ; CHECK: [[GEMM:%[^ ]+]] = (f32[2,2]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[X]], [[Y]], [[BIAS]]), ; CHECK: custom_call_target="__cublas$gemm", ; CHECK: output_to_operand_aliasing={ ; CHECK-SAME: {0}: (2, {}) ; CHECK-SAME: } ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":3 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":1 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["HIGHEST","HIGHEST"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_F(LegacyCublasGemmRewriteTest, LargerBiasMultipleUsersNoRewrite) { const char* hlo_text = R"( HloModule LargerBiasMultipleUsersNoRewrite ENTRY AddDotsFunc { x = f32[1024,1024] parameter(0) y = f32[1024,1024] parameter(1) bias = f32[1024,1024] parameter(2) dot_a = f32[1024,1024] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} biased_out = f32[1024,1024] add(dot_a, bias) ROOT out = f32[1024,1024] add(biased_out, bias) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[1024,1024], {{.*}}: f32[1024,1024], {{.*}}: f32[1024,1024]) -> f32[1024,1024] { ; CHECK-DAG: [[P0:%[^ ]+]] = f32[1024,1024]{1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f32[1024,1024]{1,0} parameter(1) ; CHECK-NEXT: [[GEMM:%[^ ]+]] = (f32[1024,1024]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]]), ; CHECK: custom_call_target="__cublas$gemm", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_F(LegacyCublasGemmRewriteTest, BF16GemmWithBias) { const char* hlo_text = R"( HloModule BF16GemmWithBias ENTRY BF16GemmWithBias { x = bf16[8,8]{1,0} parameter(0) y = bf16[8,8]{1,0} parameter(1) dot.5 = bf16[8,8]{1,0} dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} param_2 = bf16[8,8]{1,0} parameter(2) bias = bf16[8,8]{1,0} negate(param_2) ROOT add.6 = bf16[8,8]{1,0} add(dot.5, bias) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{2e-3, 2e-3})); if (IsCuda() && !HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) { GTEST_SKIP() << "Pre-Ampere casts up bf16 to fp32"; } MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %BF16GemmWithBias ({{.*}}: bf16[8,8], {{.*}}: bf16[8,8], {{.*}}: bf16[8,8]) -> bf16[8,8] { ; CHECK-DAG: [[X:%[^ ]+]] = bf16[8,8]{1,0} parameter(0) ; CHECK-DAG: [[Y:%[^ ]+]] = bf16[8,8]{1,0} parameter(1) ; CHECK: [[GEMM:%[^ ]+]] = (bf16[8,8]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[X]], [[Y]], {{[^,)]+}}), ; CHECK: custom_call_target="__cublas$gemm", ; CHECK: output_to_operand_aliasing={ ; CHECK-SAME: {0}: (2, {}) ; CHECK-SAME: } ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":1 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_F(LegacyCublasGemmRewriteTest, MatrixBias) { const char* hlo_text = R"( HloModule test ENTRY test { x = f32[2,3] parameter(0) y = f32[3,4] parameter(1) param_2 = f32[2,4] parameter(2) bias = f32[2,4] negate(param_2) dot_a = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT out = f32[2,4] add(dot_a, bias) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3], {{.*}}: f32[3,4], {{.*}}: f32[2,4]) -> f32[2,4] { ; CHECK-DAG: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1) ; CHECK: [[GEMM:%[^ ]+]] = (f32[2,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], {{[^,)]+}}), ; CHECK: custom_call_target="__cublas$gemm", ; CHECK: output_to_operand_aliasing={ ; CHECK-SAME: {0}: (2, {}) ; CHECK-SAME: } ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":1 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_F(LegacyCublasGemmRewriteTest, MatrixBiasWhereBiasIsNotAParameter) { const char* hlo_text = R"( HloModule test ENTRY test { w = f32[2,3] parameter(0) x = f32[3,4] parameter(1) first_dot = f32[2,4] dot(w, x), lhs_contracting_dims={1}, rhs_contracting_dims={0} y = f32[2,3] parameter(2) z = f32[3,4] parameter(3) second_dot = f32[2,4] dot(y, z), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT out = f32[2,4] add(second_dot, first_dot) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3], {{.*}}: f32[3,4], {{.*}}: f32[2,3], {{.*}}: f32[3,4]) -> f32[2,4] { ; CHECK-DAG: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1) ; CHECK-DAG: [[P2:%[^ ]+]] = f32[2,3]{1,0} parameter(2) ; CHECK-DAG: [[P3:%[^ ]+]] = f32[3,4]{1,0} parameter(3) ; CHECK-NEXT: [[FIRST_GEMM:%[^ ]+]] = (f32[2,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]]), ; CHECK: custom_call_target="__cublas$gemm", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } ; CHECK: [[FIRST_GEMM_OUT:%[^ ]+]] = f32[2,4]{1,0} get-tuple-element([[FIRST_GEMM]]), index=0 ; CHECK-NEXT: [[SECOND_GEMM:%[^ ]+]] = (f32[2,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P2]], [[P3]], [[FIRST_GEMM_OUT]]), ; CHECK: custom_call_target="__cublas$gemm", ; CHECK: output_to_operand_aliasing={ ; CHECK-SAME: {0}: (2, {}) ; CHECK-SAME: } ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":1 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_F(LegacyCublasGemmRewriteTest, MatrixBiasMixType) { std::vector<std::tuple<absl::string_view, absl::string_view>> type_combinations = { {"f16", "f32"}, {"bf16", "f32"}, }; const char* hlo_text_template = R"( HloModule test ENTRY test { x = <<ABType>>[16,32] parameter(0) y = <<ABType>>[32,16] parameter(1) z = <<DType>>[16,16] parameter(2) dot_a = <<ABType>>[16,16] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} bias = <<DType>>[16,16] negate(z) convert = <<DType>>[16,16] convert(dot_a) ROOT out = <<DType>>[16,16] add(convert, bias) } )"; for (const auto& type_combination : type_combinations) { absl::flat_hash_map<absl::string_view, absl::string_view> replacements; replacements["<<ABType>>"] = std::get<0>(type_combination); replacements["<<DType>>"] = std::get<1>(type_combination); const auto hlo_text = absl::StrReplaceAll(hlo_text_template, replacements); EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3})); if (std::get<0>(type_combination) == "bf16" && IsCuda() && !HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) { continue; } TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> optimized_module, GetOptimizedModule(hlo_text)); EXPECT_THAT(optimized_module->entry_computation()->root_instruction(), GmockMatch(m::GetTupleElement( m::CustomCall(m::Parameter(0), m::Parameter(1), m::Negate(m::Parameter(2))), 0))); } } TEST_F(LegacyCublasGemmRewriteTest, MatrixBiasMixTypeBatched) { std::vector<std::tuple<absl::string_view, absl::string_view>> type_combinations = { {"f16", "f32"}, {"bf16", "f32"}, }; const char* hlo_text_template = R"( HloModule test ENTRY test { x = <<ABType>>[4,16,32] parameter(0) y = <<ABType>>[4,32,16] parameter(1) z = <<DType>>[4,16,16] parameter(2) dot_a = <<ABType>>[4,16,16] dot(x, y), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0} bias = <<DType>>[4,16,16] negate(z) convert = <<DType>>[4,16,16] convert(dot_a) ROOT out = <<DType>>[4,16,16] add(convert, bias) })"; for (const auto& type_combination : type_combinations) { absl::flat_hash_map<absl::string_view, absl::string_view> replacements; replacements["<<ABType>>"] = std::get<0>(type_combination); replacements["<<DType>>"] = std::get<1>(type_combination); const auto hlo_text = absl::StrReplaceAll(hlo_text_template, replacements); EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3})); if (std::get<0>(type_combination) == "bf16" && IsCuda() && !HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) { continue; } TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> optimized_module, GetOptimizedModule(hlo_text)); EXPECT_THAT(optimized_module->entry_computation()->root_instruction(), GmockMatch(m::GetTupleElement( m::CustomCall(m::Parameter(0), m::Parameter(1), m::Negate(m::Parameter(2))), 0))); } } TEST_F(LegacyCublasGemmRewriteTest, MatrixBiasMixTypeNotSupported) { if (IsCuda() && !HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) { GTEST_SKIP() << "Pre-Ampere rewrites to cutlass_gemm_with_upcast instead of cublas."; } const char* hlo_text = R"( HloModule test ENTRY test { x = bf16[16,32] parameter(0) y = bf16[32,16] parameter(1) z = f64[16,16] parameter(2) dot_a = bf16[16,16] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} bias = f64[16,16] negate(z) convert = f64[16,16] convert(dot_a) ROOT out = f64[16,16] add(convert, bias) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3})); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> optimized_module, GetOptimizedModule(hlo_text)); MatchOptimizedHlo(hlo_text, R"( ; CHECK: %[[custom_call:.*]] = {{.*}} custom-call{{.*}}__cublas$gemm ; CHECK: %[[gte:.*]] = {{.*}} get-tuple-element{{.*}}%[[custom_call]] ; CHECK: ROOT {{.*}} fusion({{.*}}%[[gte]] )"); } TEST_F(LegacyCublasGemmRewriteTest, MatrixBiasMixTypeAddWithMoreConsumers) { const char* hlo_text = R"( HloModule test ENTRY test { x = bf16[16,32] parameter(0) y = bf16[32,16] parameter(1) z = f32[16,16] parameter(2) dot_a = bf16[16,16] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} bias = f32[16,16] negate(z) convert = f32[16,16] convert(dot_a) add_bias = f32[16,16] add(convert, bias) ROOT out = f32[16,16] negate(add_bias) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3})); if (IsCuda() && !HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) { GTEST_SKIP() << "Pre-Ampere casts up bf16 to fp32"; } TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> optimized_module, GetOptimizedModule(hlo_text)); MatchOptimizedHlo(hlo_text, R"( ; CHECK: %[[custom_call:.*]] = {{.*}} custom-call{{.*}}__cublas$gemm ; CHECK: %[[gte:.*]] = {{.*}} get-tuple-element{{.*}}%[[custom_call]] ; CHECK: ROOT {{.*}} fusion({{.*}}%[[gte]] )"); } TEST_F(LegacyCublasGemmRewriteTest, MergeBitcastAndAdd) { const char* hlo_text = R"( HloModule test ENTRY test { x = f32[2,2] parameter(0) y = f32[2,2] parameter(1) bias = f32[4] parameter(2) dot = f32[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT out = f32[4] add(f32[4] bitcast(dot), bias) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); GemmRewriter pass(Capability(), GetToolkitVersion()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch( m::Bitcast( m::GetTupleElement( m::CustomCall( {"__cublas$gemm"}, m::Parameter(0), m::Parameter(1), m::Bitcast(m::Parameter(2)).WithShape(F32, {2, 2})), 0)) .WithShape(F32, {4}))); } TEST_F(LegacyCublasGemmRewriteTest, FoldConstantBias) { const char* hlo_text = R"( HloModule test ENTRY test { x = f32[2,2] parameter(0) y = f32[2,2] parameter(1) bias = f32[2,2] broadcast(f32[2] constant({0, 0})), dimensions={0} dot1 = f32[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} param_2 = f32[2,2] parameter(2) bias1 = f32[2,2] negate(param_2) sum1 = add(dot1, bias1) dot2 = f32[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} sum2 = add(dot2, f32[2,2] reshape(bias)) dot3 = f32[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} bias3 = f32[2,2] transpose(bias), dimensions={1,0} sum3 = add(dot3, bias3) dot4 = f32[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} sum4 = add(dot4, f32[2,2] bitcast(bias)) ROOT root = tuple(sum1, sum2, sum3, sum4) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); GemmRewriter pass(Capability(), GetToolkitVersion()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); SCOPED_TRACE(module->ToString()); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(m::Tuple( m::GetTupleElement(m::CustomCall(m::Parameter(0), m::Parameter(1), m::Negate(m::Parameter(2))), 0), m::GetTupleElement( m::CustomCall(m::Parameter(0), m::Parameter(1), m::Constant()), 0), m::GetTupleElement( m::CustomCall(m::Parameter(0), m::Parameter(1), m::Constant()), 0), m::GetTupleElement( m::CustomCall(m::Parameter(0), m::Parameter(1), m::Constant()), 0)))); } class CublasLtGemmRewriteTest : public GemmRewriteTest { public: DebugOptions GetDebugOptionsForTest() override { DebugOptions debug_options = GemmRewriteTest::GetDebugOptionsForTest(); debug_options.set_xla_gpu_enable_cublaslt(true); debug_options.set_xla_gpu_enable_triton_gemm(false); return debug_options; } protected: void SetUp() override { if (SkipGpuBlasLtTest()) { GTEST_SKIP() << "BlasLt is not supported on this GPU architecture"; } } }; TEST_F(CublasLtGemmRewriteTest, AlphaBetaRewrite) { const char* hlo_text = R"( HloModule NonZeroAlphaBeta ENTRY AddDotsFunc { x = f32[2,2] parameter(0) y = f32[2,2] parameter(1) bias = f32[2,2] parameter(2) k = f32[] constant(3.0) k_broadcast = f32[2, 2] broadcast(k), dimensions={} dot_a = f32[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}, operand_precision={highest,highest} dot_a_multiplied = f32[2, 2] multiply(dot_a, k_broadcast) ROOT out = f32[2,2] add(dot_a_multiplied, bias) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[2,2], {{.*}}: f32[2,2], {{.*}}: f32[2,2]) -> f32[2,2] { ; CHECK-DAG: [[X:%[^ ]+]] = f32[2,2]{1,0} parameter(0) ; CHECK-DAG: [[Y:%[^ ]+]] = f32[2,2]{1,0} parameter(1) ; CHECK-DAG: [[BIAS:%[^ ]+]] = f32[2,2]{1,0} parameter(2) ; CHECK-NEXT: [[GEMM:%[^ ]+]] = (f32[2,2]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[X]], [[Y]], [[BIAS]]), ; CHECK: custom_call_target="__cublas$lt$matmul", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":3 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":1 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["HIGHEST","HIGHEST"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } ; CHECK-NEXT ROOT [[OUT:%[^ ]+]] = f32[2,2]{1,0} get-tuple-element(%cublas-lt-matmul.2.0), index=0 )"); } TEST_F(CublasLtGemmRewriteTest, BiasMultipleUsersNoOverwrite) { const char* hlo_text = R"( HloModule BiasMultipleUsersNoOverwrite ENTRY AddDotsFunc { x = f32[2,2] parameter(0) y = f32[2,2] parameter(1) bias = f32[2,2] parameter(2) k = f32[] constant(3.0) k_broadcast = f32[2, 2] broadcast(k), dimensions={} dot_a = f32[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}, operand_precision={highest,highest} dot_a_multiplied = f32[2, 2] multiply(dot_a, k_broadcast) biased_out = f32[2,2] add(dot_a_multiplied, bias) ROOT out = f32[2,2] add(biased_out, bias) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[2,2], {{.*}}: f32[2,2], {{.*}}: f32[2,2]) -> f32[2,2] { ; CHECK-DAG: [[P0:%[^ ]+]] = f32[2,2]{1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f32[2,2]{1,0} parameter(1) ; CHECK-DAG: [[BIAS:%[^ ]+]] = f32[2,2]{1,0} parameter(2) ; CHECK-NEXT: [[GEMM:%[^ ]+]] = (f32[2,2]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[BIAS]]), ; CHECK: custom_call_target="__cublas$lt$matmul", ; CHECK-NOT: output_to_operand_aliasing ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":3 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":1 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["HIGHEST","HIGHEST"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_F(CublasLtGemmRewriteTest, LargerBiasMultipleUsersNoRewrite) { const char* hlo_text = R"( HloModule LargerBiasMultipleUsersNoRewrite ENTRY AddDotsFunc { x = f32[1024,1024] parameter(0) y = f32[1024,1024] parameter(1) bias = f32[1024,1024] parameter(2) dot_a = f32[1024,1024] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} biased_out = f32[1024,1024] add(dot_a, bias) ROOT out = f32[1024,1024] add(biased_out, bias) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[1024,1024], {{.*}}: f32[1024,1024], {{.*}}: f32[1024,1024]) -> f32[1024,1024] { ; CHECK-DAG: [[P0:%[^ ]+]] = f32[1024,1024]{1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f32[1024,1024]{1,0} parameter(1) ; CHECK-DAG: [[BIAS:%[^ ]+]] = f32[1024,1024]{1,0} parameter(2) ; CHECK-NEXT: [[GEMM_TUPLE:%[^ ]+]] = (f32[1024,1024]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[BIAS]]), ; CHECK: custom_call_target="__cublas$lt$matmul", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":1 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } ; CHECK-NEXT: [[GEMM:%[^ ]+]] = f32[1024,1024]{1,0} get-tuple-element([[GEMM_TUPLE]]), index=0 ; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = f32[1024,1024]{1,0} add([[GEMM]], [[BIAS]]) )"); } TEST_F(CublasLtGemmRewriteTest, BF16GemmWithBias) { const char* hlo_text = R"( HloModule test ENTRY BF16GemmWithBias { x = bf16[8,8]{1,0} parameter(0) y = bf16[8,8]{1,0} parameter(1) dot.5 = bf16[8,8]{1,0} dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} bias = bf16[8,8]{1,0} parameter(2) ROOT add.6 = bf16[8,8]{1,0} add(dot.5, bias) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3})); if (IsCuda() && !HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) { GTEST_SKIP() << "Pre-Ampere casts up bf16 to fp32"; } MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %BF16GemmWithBias ({{.*}}: bf16[8,8], {{.*}}: bf16[8,8], {{.*}}: bf16[8,8]) -> bf16[8,8] { ; CHECK-DAG: [[X:%[^ ]+]] = bf16[8,8]{1,0} parameter(0) ; CHECK-DAG: [[Y:%[^ ]+]] = bf16[8,8]{1,0} parameter(1) ; CHECK-DAG: [[BIAS:%[^ ]+]] = bf16[8,8]{1,0} parameter(2) ; CHECK-NEXT: [[GEMM:%[^ ]+]] = (bf16[8,8]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[X]], [[Y]], [[BIAS]]), ; CHECK: custom_call_target="__cublas$lt$matmul", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":1 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_F(CublasLtGemmRewriteTest, MatrixBias) { const char* hlo_text = R"( HloModule test ENTRY test { x = f32[2,3] parameter(0) y = f32[3,4] parameter(1) z = f32[2,4] parameter(2) dot_a = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT out = f32[2,4] add(dot_a, z) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3], {{.*}}: f32[3,4], {{.*}}: f32[2,4]) -> f32[2,4] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1) ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[2,4]{1,0} parameter(2) ; CHECK-NEXT: [[GEMM:%[^ ]+]] = (f32[2,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[P2]]), ; CHECK: custom_call_target="__cublas$lt$matmul", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":1 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_F(CublasLtGemmRewriteTest, MatrixBiasWhereBiasIsNotAParameter) { const char* hlo_text = R"( HloModule test ENTRY test { w = f32[2,3] parameter(0) x = f32[3,4] parameter(1) first_dot = f32[2,4] dot(w, x), lhs_contracting_dims={1}, rhs_contracting_dims={0} y = f32[2,3] parameter(2) z = f32[3,4] parameter(3) second_dot = f32[2,4] dot(y, z), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT out = f32[2,4] add(second_dot, first_dot) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3], {{.*}}: f32[3,4], {{.*}}: f32[2,3], {{.*}}: f32[3,4]) -> f32[2,4] { ; CHECK-DAG: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1) ; CHECK-DAG: [[P2:%[^ ]+]] = f32[2,3]{1,0} parameter(2) ; CHECK-DAG: [[P3:%[^ ]+]] = f32[3,4]{1,0} parameter(3) ; CHECK-NEXT: [[FIRST_GEMM_TUPLE:%[^ ]+]] = (f32[2,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]]), ; CHECK: custom_call_target="__cublas$lt$matmul", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } ; CHECK: [[FIRST_GEMM:%[^ ]+]] = f32[2,4]{1,0} get-tuple-element([[FIRST_GEMM_TUPLE]]), index=0 ; CHECK-NEXT: [[SECOND_GEMM:%[^ ]+]] = (f32[2,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P2]], [[P3]], [[FIRST_GEMM]]), ; CHECK: custom_call_target="__cublas$lt$matmul", ; CHECK: output_to_operand_aliasing={ ; CHECK: {0}: (2, {}) ; CHECK: } ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":1 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_F(CublasLtGemmRewriteTest, VectorBias) { const char* hlo_text = R"( HloModule test ENTRY test { x = f32[2,3] parameter(0) y = f32[3,4] parameter(1) z = f32[4] parameter(2) dot_a = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} z_bcast = f32[2,4] broadcast(z), dimensions={1} ROOT out = f32[2,4] add(dot_a, z_bcast) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3], {{.*}}: f32[3,4], {{.*}}: f32[4]) -> f32[2,4] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1) ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4]{0} parameter(2) ; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[2,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[P2]]), ; CHECK: custom_call_target="__cublas$lt$matmul", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"BIAS" ; CHECK: } )"); } TEST_F(CublasLtGemmRewriteTest, VectorBiasMultipleUsers) { const char* hlo_text = R"( HloModule test ENTRY test { x = f32[4,4] parameter(0) y = f32[4,4] parameter(1) z = f32[4] parameter(2) c = f32[] constant(5) dot_a = f32[4,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}, operand_precision={highest,highest} z_bcast = f32[4,4] broadcast(z), dimensions={1} add_a = f32[4,4] add(dot_a, z_bcast) c_bcast = f32[4,4] broadcast(c), dimensions={} dot_b = f32[4,4] dot(dot_a, c_bcast), lhs_contracting_dims={1}, rhs_contracting_dims={0}, operand_precision={highest,highest} ROOT out = f32[4,4] dot(add_a, dot_b), lhs_contracting_dims={1}, rhs_contracting_dims={0}, operand_precision={highest,highest} } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK: [[FUSED_COMPUTATION:%[^ ]+]] ([[DUMMY0:[^ ]+]]: f32[4,4], [[DUMMY1:[^ ]+]]: f32[4]) -> f32[4,4] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[4,4]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[4]{0} parameter(1) ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4,4]{1,0} broadcast([[P1]]), dimensions={1} ; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = f32[4,4]{1,0} add([[P0]], [[P2]]) } ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[4,4], {{.*}}: f32[4,4], {{.*}}: f32[4]) -> f32[4,4] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[4,4]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[4,4]{1,0} parameter(1) ; CHECK-NEXT: [[MATMUL0_TUPLE:%[^ ]+]] = (f32[4,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]]), ; CHECK: custom_call_target="__cublas$lt$matmul", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["HIGHEST","HIGHEST"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } ; CHECK-NEXT: [[MATMUL0:%[^ ]+]] = f32[4,4]{1,0} get-tuple-element([[MATMUL0_TUPLE]]), index=0 ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4]{0} parameter(2) ; CHECK-NEXT: [[FUSION:%[^ ]+]] = f32[4,4]{1,0} fusion([[MATMUL0]], [[P2]]), kind=kLoop, calls=[[FUSED_COMPUTATION]] ; CHECK: [[MATMUL1_TUPLE:%[^ ]+]] = (f32[4,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[MATMUL0]] ; CHECK: custom_call_target="__cublas$lt$matmul", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["HIGHEST","HIGHEST"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } ; CHECK-NEXT: [[MATMUL1:%[^ ]+]] = f32[4,4]{1,0} get-tuple-element([[MATMUL1_TUPLE]]), index=0 ; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[4,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[FUSION]], [[MATMUL1]]), ; CHECK: custom_call_target="__cublas$lt$matmul", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["HIGHEST","HIGHEST"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_F(CublasLtGemmRewriteTest, BatchedVectorBias) { const char* hlo_text = R"( HloModule test ENTRY test { x = f32[2,3,4] parameter(0) y = f32[4,5,6] parameter(1) z = f32[3,5,6] parameter(2) dot_a = f32[2,3,5,6] dot(x, y), lhs_contracting_dims={2}, rhs_contracting_dims={0}, operand_precision={highest,highest} z_bcast = f32[2,3,5,6] broadcast(z), dimensions={1,2,3} ROOT out = f32[2,3,5,6] add(dot_a, z_bcast) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3,4], {{.*}}: f32[4,5,6], {{.*}}: f32[3,5,6]) -> f32[2,3,5,6] { ; CHECK: [[MATMUL_TUPLE:%[^ ]+]] = (f32[6,30]{1,0}, s8[{{[0-9]+}}]{0}) custom-call( ; CHECK: custom_call_target="__cublas$lt$matmul", ; CHECK: output_to_operand_aliasing={ ; CHECK: {0}: (2, {}) ; CHECK: } ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":1 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["HIGHEST","HIGHEST"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } ; CHECK-NEXT: [[MATMUL:%[^ ]+]] = f32[6,30]{1,0} get-tuple-element([[MATMUL_TUPLE]]), index=0 ; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = f32[2,3,5,6]{3,2,1,0} bitcast([[MATMUL]]) )"); } TEST_F(CublasLtGemmRewriteTest, BatchedSharedVectorBias) { const char* hlo_text = R"( HloModule test ENTRY test { x = f32[2,3,4] parameter(0) y = f32[4,5,6] parameter(1) z = f32[6] parameter(2) dot_a = f32[2,3,5,6] dot(x, y), lhs_contracting_dims={2}, rhs_contracting_dims={0}, operand_precision={highest,highest} z_bcast = f32[2,3,5,6] broadcast(z), dimensions={3} ROOT out = f32[2,3,5,6] add(dot_a, z_bcast) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3,4], {{.*}}: f32[4,5,6], {{.*}}: f32[6]) -> f32[2,3,5,6] { ; CHECK: [[MATMUL_TUPLE:%[^ ]+]] = (f32[6,30]{1,0}, s8[{{[0-9]+}}]{0}) custom-call( ; CHECK: custom_call_target="__cublas$lt$matmul", ; CHECK: output_to_operand_aliasing={ ; CHECK: {0}: (2, {}) ; CHECK: } ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":1 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["HIGHEST","HIGHEST"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } ; CHECK: [[MATMUL:%[^ ]+]] = f32[6,30]{1,0} get-tuple-element([[MATMUL_TUPLE]]), index=0 ; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = f32[2,3,5,6]{3,2,1,0} bitcast([[MATMUL]]) )"); } TEST_F(CublasLtGemmRewriteTest, VectorBiasIncorrectAxisFusedAsMatrix) { const char* hlo_text = R"( HloModule test ENTRY test { x = f32[2,3] parameter(0) y = f32[3,4] parameter(1) z = f32[2] parameter(2) dot_a = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} z_bcast = f32[2,4] broadcast(z), dimensions={0} add = f32[2,4] add(dot_a, z_bcast) ROOT out = f32[4,2] transpose(add), dimensions={1,0} } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3], {{.*}}: f32[3,4], {{.*}}: f32[2]) -> f32[4,2] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1) ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[2]{0} parameter(2) ; CHECK-NEXT: [[MATMUL_TUPLE:%[^ ]+]] = (f32[2,4]{0,1}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[P2]]), ; CHECK: custom_call_target="__cublas$lt$matmul", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"BIAS" ; CHECK: } ; CHECK-NEXT: [[MATMUL:%[^ ]+]] = f32[2,4]{0,1} get-tuple-element([[MATMUL_TUPLE]]), index=0 ; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = f32[4,2]{1,0} bitcast([[MATMUL]]) )"); } TEST_F(CublasLtGemmRewriteTest, VectorBiasSliced) { const char* hlo_text = R"( HloModule test ENTRY test { x = f32[4,3] parameter(0) y = f32[3,4] parameter(1) z = f32[3] parameter(2) dot_a = f32[4,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} slice_a = f32[2,3] slice(dot_a), slice={[0:2], [0:3]} z_bcast = f32[2,3] broadcast(z), dimensions={1} ROOT out = f32[2,3] add(slice_a, z_bcast) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[4,3], {{.*}}: f32[3,4], {{.*}}: f32[3]) -> f32[2,3] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[4,3]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1) ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[3]{0} parameter(2) ; CHECK-NEXT: [[MATMUL:%[^ ]+]] = (f32[4,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[P2]]), ; CHECK: custom_call_target="__cublas$lt$matmul", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"BIAS" ; CHECK: } ; CHECK-NEXT: [[GETTUPLE:%[^ ]+]] = f32[4,4]{1,0} get-tuple-element([[MATMUL]]), index=0 ; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = f32[2,3]{1,0} slice([[GETTUPLE]]), slice={[0:2], [0:3]} )"); } TEST_F(CublasLtGemmRewriteTest, VectorBiasSlicedMultipleUsers) { const char* hlo_text = R"( HloModule test ENTRY test { x = f32[2,3] parameter(0) y = f32[3,4] parameter(1) z = f32[2] parameter(2) c = f32[] constant(5) dot_a = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} slice_a = f32[2,2] slice(dot_a), slice={[0:2], [0:2]} z_bcast = f32[2,2] broadcast(z), dimensions={1} add_a = f32[2,2] add(slice_a, z_bcast) c_bcast = f32[2,2] broadcast(c), dimensions={} dot_b = f32[2,2] dot(slice_a, c_bcast), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT out = f32[2,2] dot(add_a, dot_b), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3], {{.*}}: f32[3,4], {{.*}}: f32[2]) -> f32[2,2] { ; CHECK-DAG: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1) ; CHECK-DAG: [[P2:%[^ ]+]] = f32[2]{0} parameter(2) ; CHECK-NEXT: [[MATMUL0_TUPLE:%[^ ]+]] = (f32[2,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]]), ; CHECK: custom_call_target="__cublas$lt$matmul", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } ; CHECK: [[MATMUL1_TUPLE:%[^ ]+]] = (f32[2,2]{1,0}, s8[{{[0-9]+}}]{0}) custom-call( ; CHECK: custom_call_target="__cublas$lt$matmul", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } ; CHECK: [[MATMUL1:%[^ ]+]] = f32[2,2]{1,0} get-tuple-element([[MATMUL1_TUPLE]]), index=0 ; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[2,2]{1,0}, s8[{{[0-9]+}}]{0}) custom-call{{.*}}[[MATMUL1]] ; CHECK: custom_call_target="__cublas$lt$matmul", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_F(CublasLtGemmRewriteTest, VectorBiasTransposed) { const char* hlo_text = R"( HloModule test ENTRY test { x = f32[2,3] parameter(0) y = f32[3,4] parameter(1) z = f32[2] parameter(2) dot_a = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} z_bcast = f32[2,4] parameter(3) ROOT out = f32[2,4] add(dot_a, z_bcast) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1) ; CHECK-NEXT: [[P2_BCAST:%[^ ]+]] = f32[2,4]{1,0} parameter(3) ; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[2,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[P2_BCAST]]), ; CHECK: custom_call_target="__cublas$lt$matmul", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":1 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_F(CublasLtGemmRewriteTest, VectorBiasThenMatrixBias) { const char* hlo_text = R"( HloModule test ENTRY test { x = f32[2,3] parameter(0) y = f32[3,4] parameter(1) z = f32[4] parameter(2) z2 = f32[2,4] parameter(3) dot_a = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} z_bcast = f32[2,4] broadcast(z), dimensions={1} add0 = f32[2,4] add(dot_a, z_bcast) ROOT add1 = f32[2,4] add(add0, z2) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3], {{.*}}: f32[3,4], {{.*}}: f32[4], {{.*}}: f32[2,4]) -> f32[2,4] { ; CHECK-DAG: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1) ; CHECK-DAG: [[VECTOR_BIAS:%[^ ]+]] = f32[4]{0} parameter(2) ; CHECK-DAG: [[MATRIX_BIAS:%[^ ]+]] = f32[2,4]{1,0} parameter(3) ; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[2,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[MATRIX_BIAS]], [[VECTOR_BIAS]]), ; CHECK: custom_call_target="__cublas$lt$matmul", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":1 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"BIAS" ; CHECK: } )"); } TEST_F(CublasLtGemmRewriteTest, BF16VectorBias) { const char* hlo_text = R"( HloModule test ENTRY test { x = bf16[16,24] parameter(0) y = bf16[24,32] parameter(1) z = bf16[32] parameter(2) dot_a = bf16[16,32] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} z_bcast = bf16[16,32] broadcast(z), dimensions={1} ROOT out = bf16[16,32] add(dot_a, z_bcast) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{3e-3, 1e-3})); if (IsCuda() && !HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) { GTEST_SKIP() << "Pre-Ampere casts up bf16 to fp32"; } MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: bf16[16,24], {{.*}}: bf16[24,32], {{.*}}: bf16[32]) -> bf16[16,32] { ; CHECK-NEXT: [[P0:%[^ ]+]] = bf16[16,24]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = bf16[24,32]{1,0} parameter(1) ; CHECK-NEXT: [[P2:%[^ ]+]] = bf16[32]{0} parameter(2) ; CHECK-NEXT: [[OUT:%[^ ]+]] = (bf16[16,32]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[P2]]), ; CHECK: custom_call_target="__cublas$lt$matmul", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"BIAS" )"); } TEST_F(CublasLtGemmRewriteTest, BF16VectorBiasPadded) { if (IsCuda() && !HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) { GTEST_SKIP() << "Padding of GEMM bf16 operands only implemented on " "architectures with bf16 Tensor Cores."; } const char* hlo_text = R"( HloModule test ENTRY test { x = bf16[2,3] parameter(0) y = bf16[3,4] parameter(1) z = bf16[4] parameter(2) dot_a = bf16[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} z_bcast = bf16[2,4] broadcast(z), dimensions={1} ROOT out = bf16[2,4] add(dot_a, z_bcast) })"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-DAG: ENTRY %test ({{.*}}: bf16[2,3], {{.*}}: bf16[3,4], {{.*}}: bf16[4]) -> bf16[2,4] { ; CHECK-DAG: bf16[8,8]{1,0} pad({{.*}}), padding=0_6x0_5 ; CHECK-DAG: bf16[8,8]{1,0} pad({{.*}}), padding=0_5x0_4 )"); } TEST_F(CublasLtGemmRewriteTest, ReluActivation) { const char* hlo_text = R"( HloModule test ENTRY test { x = f32[2,3] parameter(0) y = f32[3,4] parameter(1) dot_a = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} c = f32[] constant(0) c_bcast = f32[2,4] broadcast(c), dimensions={} ROOT out = f32[2,4] maximum(dot_a, c_bcast) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3], {{.*}}: f32[3,4]) -> f32[2,4] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1) ; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[2,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]]), ; CHECK: custom_call_target="__cublas$lt$matmul", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"RELU" ; CHECK: } )"); } TEST_F(CublasLtGemmRewriteTest, BatchedReluActivation) { const char* hlo_text = R"( HloModule test ENTRY test { x = f32[2,3,4] parameter(0) y = f32[4,5,6] parameter(1) dot_a = f32[2,3,5,6] dot(x, y), lhs_contracting_dims={2}, rhs_contracting_dims={0}, operand_precision={highest,highest} c = f32[] constant(0) c_bcast = f32[2,3,5,6] broadcast(c), dimensions={} ROOT out = f32[2,3,5,6] maximum(dot_a, c_bcast) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3,4], {{.*}}: f32[4,5,6]) -> f32[2,3,5,6] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,3,4]{2,1,0} parameter(0) ; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = f32[6,4]{1,0} bitcast([[P0]]) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[4,5,6]{2,1,0} parameter(1) ; CHECK-NEXT: [[P1_BITCAST:%[^ ]+]] = f32[4,30]{1,0} ; CHECK-NEXT: [[MATMUL_TUPLE:%[^ ]+]] = (f32[6,30]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0_BITCAST]], [[P1_BITCAST]]), ; CHECK: custom_call_target="__cublas$lt$matmul", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["HIGHEST","HIGHEST"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"RELU" ; CHECK: } ; CHECK: [[MATMUL:%[^ ]+]] = f32[6,30]{1,0} get-tuple-element([[MATMUL_TUPLE]]), index=0 ; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = f32[2,3,5,6]{3,2,1,0} bitcast([[MATMUL]]) )"); } TEST_F(CublasLtGemmRewriteTest, ReluActivationSliced) { const char* hlo_text = R"( HloModule test ENTRY test { x = f32[2,3] parameter(0) y = f32[3,4] parameter(1) dot_a = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} c = f32[] constant(0) c_bcast = f32[2,2] broadcast(c), dimensions={} slice_a = f32[2,2] slice(dot_a), slice={[0:2], [0:2]} ROOT out = f32[2,2] maximum(slice_a, c_bcast) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3], {{.*}}: f32[3,4]) -> f32[2,2] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1) ; CHECK-NEXT: [[MATMUL_TUPLE:%[^ ]+]] = (f32[2,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]]), ; CHECK: custom_call_target="__cublas$lt$matmul", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"RELU" ; CHECK: } ; CHECK: [[MATMUL:%[^ ]+]] = f32[2,4]{1,0} get-tuple-element([[MATMUL_TUPLE]]), index=0 ; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = f32[2,2]{1,0} slice([[MATMUL]]), slice={[0:2], [0:2]} )"); } TEST_F(CublasLtGemmRewriteTest, MatrixBiasReluActivation) { const char* hlo_text = R"( HloModule test ENTRY test { x = f32[2,3] parameter(0) y = f32[3,4] parameter(1) z = f32[2,4] parameter(2) dot_a = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} add = f32[2,4] add(dot_a, z) c = f32[] constant(0) c_bcast = f32[2,4] broadcast(c), dimensions={} ROOT out = f32[2,4] maximum(add, c_bcast) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3], {{.*}}: f32[3,4], {{.*}}: f32[2,4]) -> f32[2,4] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1) ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[2,4]{1,0} parameter(2) ; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[2,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[P2]]), ; CHECK: custom_call_target="__cublas$lt$matmul", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":1 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"RELU" ; CHECK: } )"); } TEST_F(CublasLtGemmRewriteTest, SquareMatrixBiasReluActivation) { const char* hlo_text = R"( HloModule test ENTRY test { x = f32[4,4] parameter(0) y = f32[4,4] parameter(1) z = f32[4,4] parameter(2) dot_a = f32[4,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} add = f32[4,4] add(dot_a, z) c = f32[] constant(0) c_bcast = f32[4,4] broadcast(c), dimensions={} ROOT out = f32[4,4] maximum(add, c_bcast) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[4,4], {{.*}}: f32[4,4], {{.*}}: f32[4,4]) -> f32[4,4] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[4,4]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[4,4]{1,0} parameter(1) ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4,4]{1,0} parameter(2) ; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[4,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[P2]]), ; CHECK: custom_call_target="__cublas$lt$matmul", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":1 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"RELU" ; CHECK: } )"); } TEST_F(CublasLtGemmRewriteTest, VectorBiasReluActivation) { const char* hlo_text = R"( HloModule test ENTRY test { x = f32[2,3] parameter(0) y = f32[3,4] parameter(1) z = f32[4] parameter(2) dot_a = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} z_bcast = f32[2,4] broadcast(z), dimensions={1} add = f32[2,4] add(dot_a, z_bcast) c = f32[] constant(0) c_bcast = f32[2,4] broadcast(c), dimensions={} ROOT out = f32[2,4] maximum(add, c_bcast) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3], {{.*}}: f32[3,4], {{.*}}: f32[4]) -> f32[2,4] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1) ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4]{0} parameter(2) ; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[2,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[P2]]), ; CHECK: custom_call_target="__cublas$lt$matmul", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"BIAS_RELU" ; CHECK: } )"); } TEST_F(CublasLtGemmRewriteTest, BatchedVectorBiasReluActivation) { const char* hlo_text = R"( HloModule test ENTRY test { x = f32[2,3,4] parameter(0) y = f32[4,5,6] parameter(1) z = f32[3,5,6] parameter(2) dot_a = f32[2,3,5,6] dot(x, y), lhs_contracting_dims={2}, rhs_contracting_dims={0}, operand_precision={highest,highest} z_bcast = f32[2,3,5,6] broadcast(z), dimensions={1,2,3} add = f32[2,3,5,6] add(dot_a, z_bcast) c = f32[] constant(0) c_bcast = f32[2,3,5,6] broadcast(c), dimensions={} ROOT out = f32[2,3,5,6] maximum(add, c_bcast) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3,4], {{.*}}: f32[4,5,6], {{.*}}: f32[3,5,6]) -> f32[2,3,5,6] { ; CHECK: [[MATMUL_TUPLE:%[^ ]+]] = (f32[6,30]{1,0}, s8[{{[0-9]+}}]{0}) custom-call( ; CHECK: custom_call_target="__cublas$lt$matmul", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":1 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["HIGHEST","HIGHEST"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"RELU" ; CHECK: } ; CHECK-NEXT: [[MATMUL:%[^ ]+]] = f32[6,30]{1,0} get-tuple-element([[MATMUL_TUPLE]]), index=0 ; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = f32[2,3,5,6]{3,2,1,0} bitcast([[MATMUL]]) )"); } TEST_F(CublasLtGemmRewriteTest, VectorBiasTransposedReluActivation) { const char* hlo_text = R"( HloModule test ENTRY test { x = f32[2,3] parameter(0) y = f32[3,4] parameter(1) z = f32[2] parameter(2) dot_a = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} z_bcast = f32[2,4] broadcast(z), dimensions={0} add = f32[2,4] add(dot_a, z_bcast) c = f32[] constant(0) c_bcast = f32[2,4] broadcast(c), dimensions={} maximum = f32[2,4] maximum(add, c_bcast) ROOT out = f32[4,2] transpose(maximum), dimensions={1,0} } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3], {{.*}}: f32[3,4], {{.*}}: f32[2]) -> f32[4,2] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1) ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[2]{0} parameter(2) ; CHECK-NEXT: [[MATMUL_TUPLE:%[^ ]+]] = (f32[2,4]{0,1}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[P2]]), ; CHECK: custom_call_target="__cublas$lt$matmul", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"BIAS_RELU" ; CHECK: } ; CHECK-NEXT: [[MATMUL:%[^ ]+]] = f32[2,4]{0,1} get-tuple-element([[MATMUL_TUPLE]]), index=0 ; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = f32[4,2]{1,0} bitcast([[MATMUL]]) )"); } TEST_F(CublasLtGemmRewriteTest, VectorBiasThenMatrixBiasReluActivation) { const char* hlo_text = R"( HloModule test ENTRY test { x = f32[2,3] parameter(0) y = f32[3,4] parameter(1) z_vec = f32[4] parameter(2) z_matrix = f32[2,4] parameter(3) dot_a = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} z_bcast = f32[2,4] broadcast(z_vec), dimensions={1} add0 = f32[2,4] add(dot_a, z_bcast) add1 = f32[2,4] add(add0, z_matrix) c = f32[] constant(0) c_bcast = f32[2,4] broadcast(c), dimensions={} ROOT out = f32[2,4] maximum(add1, c_bcast) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3], {{.*}}: f32[3,4], {{.*}}: f32[4], {{.*}}: f32[2,4]) -> f32[2,4] { ; CHECK-DAG: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1) ; CHECK-DAG: [[P2:%[^ ]+]] = f32[4]{0} parameter(2) ; CHECK-DAG: [[P3:%[^ ]+]] = f32[2,4]{1,0} parameter(3) ; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[2,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[P3]], [[P2]]), ; CHECK: custom_call_target="__cublas$lt$matmul", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":1 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"BIAS_RELU" ; CHECK: } )"); } TEST_F(CublasLtGemmRewriteTest, ApproxGeluActivation) { const char* hlo_text = R"( HloModule test ENTRY test { x = f32[2,3] parameter(0) y = f32[3,4] parameter(1) dot = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} mul.0 = f32[2,4] multiply(dot, dot) mul.1 = f32[2,4] multiply(dot, mul.0) const.0 = f32[] constant(0.044715) bcast.0 = f32[2,4] broadcast(const.0), dimensions={} mul.2 = f32[2,4] multiply(mul.1, bcast.0) add.0 = f32[2,4] add(dot, mul.2) const.1 = f32[] constant(0.797884583) bcast.1 = f32[2,4] broadcast(const.1), dimensions={} mul.3 = f32[2,4] multiply(add.0, bcast.1) tanh = f32[2,4] tanh(mul.3) const.2 = f32[] constant(1) bcast.2 = f32[2,4] broadcast(const.2), dimensions={} add.2 = f32[2,4] add(tanh, bcast.2) const.3 = f32[] constant(0.5) bcast.3 = f32[2,4] broadcast(const.3), dimensions={} mul.4 = f32[2,4] multiply(add.2, bcast.3) ROOT out = f32[2,4] multiply(dot, mul.4) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3], {{.*}}: f32[3,4]) -> f32[2,4] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1) ; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[2,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]]), ; CHECK: custom_call_target="__cublas$lt$matmul", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"GELU" ; CHECK: } )"); } TEST_F(CublasLtGemmRewriteTest, ApproxGeluActivationWrongConstant) { const char* hlo_text = R"( HloModule test ENTRY test { x = f32[2,3] parameter(0) y = f32[3,4] parameter(1) dot = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} mul.0 = f32[2,4] multiply(dot, dot) mul.1 = f32[2,4] multiply(dot, mul.0) const.0 = f32[] constant(0.05) bcast.0 = f32[2,4] broadcast(const.0), dimensions={} mul.2 = f32[2,4] multiply(mul.1, bcast.0) add.0 = f32[2,4] add(dot, mul.2) const.1 = f32[] constant(0.797884583) bcast.1 = f32[2,4] broadcast(const.1), dimensions={} mul.3 = f32[2,4] multiply(add.0, bcast.1) tanh = f32[2,4] tanh(mul.3) const.2 = f32[] constant(1) bcast.2 = f32[2,4] broadcast(const.2), dimensions={} add.2 = f32[2,4] add(tanh, bcast.2) const.3 = f32[] constant(0.5) bcast.3 = f32[2,4] broadcast(const.3), dimensions={} mul.4 = f32[2,4] multiply(add.2, bcast.3) ROOT out = f32[2,4] multiply(dot, mul.4) } )"; MatchOptimizedHlo(hlo_text, R"( ; CHECK-NOT: GELU )"); } TEST_F(CublasLtGemmRewriteTest, VectorBiasThenApproxGeluActivation) { #if TENSORFLOW_USE_ROCM && TF_ROCM_VERSION >= 60000 auto rocm_switch = false; #else auto rocm_switch = true; #endif if (!IsCuda() && rocm_switch) { GTEST_SKIP() << "TODO: Unsupported blas-lt epilogue on ROCM"; } const char* hlo_text = R"( HloModule test ENTRY test { x = f32[2,3] parameter(0) y = f32[3,4] parameter(1) z = f32[4] parameter(2) dot = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} z_bcast = f32[2,4] broadcast(z), dimensions={1} add = f32[2,4] add(dot, z_bcast) mul.0 = f32[2,4] multiply(add, add) mul.1 = f32[2,4] multiply(add, mul.0) const.0 = f32[] constant(0.044715) bcast.0 = f32[2,4] broadcast(const.0), dimensions={} mul.2 = f32[2,4] multiply(mul.1, bcast.0) add.0 = f32[2,4] add(add, mul.2) const.1 = f32[] constant(0.797884583) bcast.1 = f32[2,4] broadcast(const.1), dimensions={} mul.3 = f32[2,4] multiply(add.0, bcast.1) tanh = f32[2,4] tanh(mul.3) const.2 = f32[] constant(1) bcast.2 = f32[2,4] broadcast(const.2), dimensions={} add.2 = f32[2,4] add(tanh, bcast.2) const.3 = f32[] constant(0.5) bcast.3 = f32[2,4] broadcast(const.3), dimensions={} mul.4 = f32[2,4] multiply(add.2, bcast.3) ROOT out = f32[2,4] multiply(add, mul.4) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3], {{.*}}: f32[3,4], {{.*}}: f32[4]) -> f32[2,4] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1) ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4]{0} parameter(2) ; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[2,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[P2]]), ; CHECK: custom_call_target="__cublas$lt$matmul", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"BIAS_GELU" ; CHECK: } )"); } TEST_F(CublasLtGemmRewriteTest, ApproxGeluActivationWithAux) { if (!IsCuda()) { GTEST_SKIP() << "TODO: Unsupported blas-lt epilogue on ROCM"; } const char* hlo_text = R"( HloModule test ENTRY test { x = f32[2,3] parameter(0) y = f32[3,4] parameter(1) dot = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} mul.0 = f32[2,4] multiply(dot, dot) mul.1 = f32[2,4] multiply(dot, mul.0) const.0 = f32[] constant(0.044715) bcast.0 = f32[2,4] broadcast(const.0), dimensions={} mul.2 = f32[2,4] multiply(mul.1, bcast.0) add.0 = f32[2,4] add(dot, mul.2) const.1 = f32[] constant(0.797884583) bcast.1 = f32[2,4] broadcast(const.1), dimensions={} mul.3 = f32[2,4] multiply(add.0, bcast.1) tanh = f32[2,4] tanh(mul.3) const.2 = f32[] constant(1) bcast.2 = f32[2,4] broadcast(const.2), dimensions={} add.2 = f32[2,4] add(tanh, bcast.2) const.3 = f32[] constant(0.5) bcast.3 = f32[2,4] broadcast(const.3), dimensions={} mul.4 = f32[2,4] multiply(add.2, bcast.3) mul.5 = f32[2,4] multiply(dot, mul.4) ROOT out = (f32[2,4], f32[2,4]) tuple(mul.5, dot) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3], {{.*}}: f32[3,4]) -> (f32[2,4], f32[2,4]) { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1) ; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[2,4]{1,0}, f32[2,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]]), ; CHECK: custom_call_target="__cublas$lt$matmul", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"GELU_AUX" ; CHECK: } )"); } TEST_F(CublasLtGemmRewriteTest, VectorBiasThenApproxGeluActivationWithAux) { if (!IsCuda()) { GTEST_SKIP() << "TODO: Unsupported blas-lt epilogue on ROCM"; } const char* hlo_text = R"( HloModule test ENTRY test { x = f32[2,3] parameter(0) y = f32[3,4] parameter(1) z = f32[4] parameter(2) dot = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} z_bcast = f32[2,4] broadcast(z), dimensions={1} add = f32[2,4] add(dot, z_bcast) mul.0 = f32[2,4] multiply(add, add) mul.1 = f32[2,4] multiply(add, mul.0) const.0 = f32[] constant(0.044715) bcast.0 = f32[2,4] broadcast(const.0), dimensions={} mul.2 = f32[2,4] multiply(mul.1, bcast.0) add.0 = f32[2,4] add(add, mul.2) const.1 = f32[] constant(0.797884583) bcast.1 = f32[2,4] broadcast(const.1), dimensions={} mul.3 = f32[2,4] multiply(add.0, bcast.1) tanh = f32[2,4] tanh(mul.3) const.2 = f32[] constant(1) bcast.2 = f32[2,4] broadcast(const.2), dimensions={} add.2 = f32[2,4] add(tanh, bcast.2) const.3 = f32[] constant(0.5) bcast.3 = f32[2,4] broadcast(const.3), dimensions={} mul.4 = f32[2,4] multiply(add.2, bcast.3) mul.5 = f32[2,4] multiply(add, mul.4) ROOT out = (f32[2,4], f32[2,4]) tuple(mul.5, add) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3], {{.*}}: f32[3,4], {{.*}}: f32[4]) -> (f32[2,4], f32[2,4]) { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1) ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4]{0} parameter(2) ; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[2,4]{1,0}, f32[2,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[P2]]), ; CHECK: custom_call_target="__cublas$lt$matmul", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"BIAS_GELU_AUX" ; CHECK: } )"); } TEST_F(CublasLtGemmRewriteTest, ApproxGeluActivationBF16) { if (IsCuda() && !HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) { GTEST_SKIP() << "Padding of GEMM bf16 operands only implemented on " "architectures with bf16 Tensor Cores."; } const char* hlo_text = R"( HloModule test ENTRY test { x = bf16[2,3] parameter(0) y = bf16[3,4] parameter(1) dot = bf16[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} mul.0 = bf16[2,4] multiply(dot, dot) mul.1 = bf16[2,4] multiply(dot, mul.0) const.0 = bf16[] constant(0.044715) bcast.0 = bf16[2,4] broadcast(const.0), dimensions={} mul.2 = bf16[2,4] multiply(mul.1, bcast.0) add.0 = bf16[2,4] add(dot, mul.2) const.1 = bf16[] constant(0.797884583) bcast.1 = bf16[2,4] broadcast(const.1), dimensions={} mul.3 = bf16[2,4] multiply(add.0, bcast.1) tanh = bf16[2,4] tanh(mul.3) const.2 = bf16[] constant(1) bcast.2 = bf16[2,4] broadcast(const.2), dimensions={} add.2 = bf16[2,4] add(tanh, bcast.2) const.3 = bf16[] constant(0.5) bcast.3 = bf16[2,4] broadcast(const.3), dimensions={} mul.4 = bf16[2,4] multiply(add.2, bcast.3) ROOT out = bf16[2,4] multiply(dot, mul.4) })"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{5e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-DAG: ENTRY %test ({{.*}}: bf16[2,3], {{.*}}: bf16[3,4]) -> bf16[2,4] { ; CHECK-DAG: bf16[8,8]{1,0} pad({{.*}}), padding=0_6x0_5 ; CHECK-DAG: bf16[8,8]{1,0} pad({{.*}}), padding=0_5x0_4 )"); } TEST_F(CublasLtGemmRewriteTest, ApproxGeluActivationBitcast) { const char* hlo_text = R"( HloModule test ENTRY test { x = f32[2,3] parameter(0) y = f32[3,4] parameter(1) dot = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} dot_bitcast = f32[2,2,2] bitcast(dot) mul.0 = f32[2,2,2] multiply(dot_bitcast, dot_bitcast) mul.1 = f32[2,2,2] multiply(dot_bitcast, mul.0) const.0 = f32[] constant(0.044715) bcast.0 = f32[2,2,2] broadcast(const.0), dimensions={} mul.2 = f32[2,2,2] multiply(mul.1, bcast.0) add.0 = f32[2,2,2] add(dot_bitcast, mul.2) const.1 = f32[] constant(0.797884583) bcast.1 = f32[2,2,2] broadcast(const.1), dimensions={} mul.3 = f32[2,2,2] multiply(add.0, bcast.1) tanh = f32[2,2,2] tanh(mul.3) const.2 = f32[] constant(1) bcast.2 = f32[2,2,2] broadcast(const.2), dimensions={} add.2 = f32[2,2,2] add(tanh, bcast.2) const.3 = f32[] constant(0.5) bcast.3 = f32[2,2,2] broadcast(const.3), dimensions={} mul.4 = f32[2,2,2] multiply(add.2, bcast.3) ROOT out = f32[2,2,2] multiply(dot_bitcast, mul.4) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); GemmRewriter pass(Capability(), GetToolkitVersion()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch( m::Bitcast(m::GetTupleElement( m::CustomCall({"__cublas$lt$matmul"}, m::Parameter(0).WithShape(F32, {2, 3}), m::Parameter(1).WithShape(F32, {3, 4})), 0)) .WithShape(F32, {2, 2, 2}))); } TEST_F(CublasLtGemmRewriteTest, MatrixBiasF16) { const char* hlo_text = R"( HloModule test ENTRY test { x = f16[8,16] parameter(0) y = f16[16,8] parameter(1) z = f16[8,8] parameter(2) dot_a = f16[8,8] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT out = f16[8,8] add(dot_a, z) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f16[8,16], {{.*}}: f16[16,8], {{.*}}: f16[8,8]) -> f16[8,8] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f16[8,16]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = f16[16,8]{1,0} parameter(1) ; CHECK-NEXT: [[P2:%[^ ]+]] = f16[8,8]{1,0} parameter(2) ; CHECK-NEXT: [[OUT:%[^ ]+]] = (f16[8,8]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[P2]]), ; CHECK: custom_call_target="__cublas$lt$matmul", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":1 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_F(CublasLtGemmRewriteTest, VectorBiasF32UnpaddedWithBitcast) { const char* hlo_text = R"( HloModule test ENTRY test { x = f32[2,3]{1,0} parameter(0) y = f32[3,4]{1,0} parameter(1) z = f32[2]{0} parameter(2) dot_a = f32[2,4]{0,1} dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} bitc = f32[4,2]{1,0} bitcast(f32[2,4]{0,1} dot_a) z_bcast = f32[4,2] broadcast(z), dimensions={1} ROOT add = f32[4,2]{1,0} add(bitc, z_bcast) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); GemmRewriter pass(Capability(), GetToolkitVersion()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch( m::Bitcast(m::GetTupleElement( m::CustomCall({"__cublas$lt$matmul"}, m::Parameter(0), m::Parameter(1), m::Parameter(2).WithShape(F32, {2})), 0) .WithShape(F32, {2, 4})) .WithShape(F32, {4, 2}))); } TEST_F(CublasLtGemmRewriteTest, VectorBiasF16Unpadded) { const char* hlo_text = R"( HloModule test ENTRY test { x = f16[8,16] parameter(0) y = f16[16,8] parameter(1) z = f16[8] parameter(2) dot_a = f16[8,8] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} z_bcast = f16[8,8] broadcast(z), dimensions={1} ROOT add = f16[8,8] add(dot_a, z_bcast) })"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{8e-3, 2e-3})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-NOT: pad(" ; CHECK: custom-call ; CHECK-SAME: custom_call_target="__cublas$lt$matmul" )"); } TEST_F(CublasLtGemmRewriteTest, VectorBiasF16Padded) { if (IsCuda() && !HasCudaComputeCapability(se::CudaComputeCapability::Volta())) { GTEST_SKIP() << "Padding of GEMM operands only implemented on " "architectures with Tensor Cores."; } const char* hlo_text = R"( HloModule test ENTRY test { x = f16[6,12] parameter(0) y = f16[12,6] parameter(1) z = f16[6] parameter(2) dot_a = f16[6,6] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} z_bcast = f16[6,6] broadcast(z), dimensions={1} ROOT add = f16[6,6] add(dot_a, z_bcast) })"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-DAG: ENTRY %test ({{.*}}: f16[6,12], {{.*}}: f16[12,6], {{.*}}: f16[6]) -> f16[6,6] { ; CHECK-DAG: f16[8,16]{1,0} pad({{.*}}), padding=0_2x0_4 ; CHECK-DAG: f16[16,8]{1,0} pad({{.*}}), padding=0_4x0_2 )"); } TEST_F(CublasLtGemmRewriteTest, ReluActivationF16Unpadded) { const char* hlo_text = R"( HloModule test ENTRY test { x = f16[8,16] parameter(0) y = f16[16,8] parameter(1) dot_a = f16[8,8] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} c = f16[] constant(0) c_bcast = f16[8,8] broadcast(c), dimensions={} ROOT out = f16[8,8] maximum(dot_a, c_bcast) })"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-NOT: pad(" ; CHECK: custom-call ; CHECK-SAME: custom_call_target="__cublas$lt$matmul" )"); } TEST_F(CublasLtGemmRewriteTest, ReluActivationF16Padded) { if (IsCuda() && !HasCudaComputeCapability(se::CudaComputeCapability::Volta())) { GTEST_SKIP() << "Padding of GEMM operands only implemented on " "architectures with Tensor Cores."; } const char* hlo_text = R"( HloModule test ENTRY test { x = f16[6,12] parameter(0) y = f16[12,6] parameter(1) dot_a = f16[6,6] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} c = f16[] constant(0) c_bcast = f16[6,6] broadcast(c), dimensions={} ROOT out = f16[6,6] maximum(dot_a, c_bcast) })"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-DAG: ENTRY %test ({{.*}}: f16[6,12], {{.*}}: f16[12,6]) -> f16[6,6] { ; CHECK-DAG: f16[8,16]{1,0} pad({{.*}}), padding=0_2x0_4 ; CHECK-DAG: f16[16,8]{1,0} pad({{.*}}), padding=0_4x0_2 )"); } TEST_F(CublasLtGemmRewriteTest, MatrixBiasReluActivationF16) { const char* hlo_text = R"( HloModule test ENTRY test { x = f16[8,16] parameter(0) y = f16[16,8] parameter(1) z = f16[8,8] parameter(2) dot_a = f16[8,8] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} add = f16[8,8] add(dot_a, z) c = f16[] constant(0) c_bcast = f16[8,8] broadcast(c), dimensions={} ROOT out = f16[8,8] maximum(add, c_bcast) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f16[8,16], {{.*}}: f16[16,8], {{.*}}: f16[8,8]) -> f16[8,8] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f16[8,16]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = f16[16,8]{1,0} parameter(1) ; CHECK-NEXT: [[P2:%[^ ]+]] = f16[8,8]{1,0} parameter(2) ; CHECK-NEXT: [[OUT:%[^ ]+]] = (f16[8,8]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[P2]]), ; CHECK: custom_call_target="__cublas$lt$matmul", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":1 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"RELU" ; CHECK: } )"); } TEST_F(CublasLtGemmRewriteTest, VectorBiasReluActivationF16Unpadded) { const char* hlo_text = R"( HloModule test ENTRY test { x = f16[8,16] parameter(0) y = f16[16,8] parameter(1) z = f16[8] parameter(2) dot_a = f16[8,8] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} z_bcast = f16[8,8] broadcast(z), dimensions={1} add = f16[8,8] add(dot_a, z_bcast) c = f16[] constant(0) c_bcast = f16[8,8] broadcast(c), dimensions={} ROOT out = f16[8,8] maximum(add, c_bcast) })"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-NOT: pad(" ; CHECK: custom-call ; CHECK-SAME: custom_call_target="__cublas$lt$matmul" )"); } TEST_F(CublasLtGemmRewriteTest, VectorBiasReluActivationF16Padded) { if (IsCuda() && !HasCudaComputeCapability(se::CudaComputeCapability::Volta())) { GTEST_SKIP() << "Padding of GEMM operands only implemented on " "architectures with Tensor Cores."; } const char* hlo_text = R"( HloModule test ENTRY test { x = f16[6,12] parameter(0) y = f16[12,6] parameter(1) z = f16[6] parameter(2) dot_a = f16[6,6] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} z_bcast = f16[6,6] broadcast(z), dimensions={1} add = f16[6,6] add(dot_a, z_bcast) c = f16[] constant(0) c_bcast = f16[6,6] broadcast(c), dimensions={} ROOT out = f16[6,6] maximum(add, c_bcast) })"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-DAG: ENTRY %test ({{.*}}: f16[6,12], {{.*}}: f16[12,6], {{.*}}: f16[6]) -> f16[6,6] { ; CHECK-DAG: f16[8,16]{1,0} pad({{.*}}), padding=0_2x0_4 ; CHECK-DAG: f16[16,8]{1,0} pad({{.*}}), padding=0_4x0_2 )"); } TEST_F(CublasLtGemmRewriteTest, MatrixBiasBF16) { const char* hlo_text = R"( HloModule test ENTRY test { x = bf16[8,16] parameter(0) y = bf16[16,8] parameter(1) z = bf16[8,8] parameter(2) dot_a = bf16[8,8] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT out = bf16[8,8] add(dot_a, z) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3})); if (IsCuda() && !HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) { GTEST_SKIP() << "Pre-Ampere casts up bf16 to fp32"; } MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: bf16[8,16], {{.*}}: bf16[16,8], {{.*}}: bf16[8,8]) -> bf16[8,8] { ; CHECK-DAG: [[P0:%[^ ]+]] = bf16[8,16]{1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = bf16[16,8]{1,0} parameter(1) ; CHECK-DAG: [[P2:%[^ ]+]] = bf16[8,8]{1,0} parameter(2) ; CHECK-NEXT: [[OUT:%[^ ]+]] = (bf16[8,8]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[P2]]), ; CHECK: custom_call_target="__cublas$lt$matmul", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":1 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_F(CublasLtGemmRewriteTest, MatrixBiasBitcastBF16) { const char* hlo_text = R"( HloModule test ENTRY test { x = bf16[8,16] parameter(0) y = bf16[16,8] parameter(1) bias = bf16[2,4,8] parameter(2) dot = bf16[8,8] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} bitcast = bf16[2,4,8] bitcast(dot) ROOT out = bf16[2,4,8] add(bitcast, bias) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); GemmRewriter pass(Capability(), GetToolkitVersion()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch( m::Bitcast( m::GetTupleElement( m::CustomCall( {"__cublas$lt$matmul"}, m::Parameter(0).WithShape(BF16, {8, 16}), m::Parameter(1).WithShape(BF16, {16, 8}), m::Bitcast(m::Parameter(2)).WithShape(BF16, {8, 8})), 0)) .WithShape(BF16, {2, 4, 8}))); } TEST_F(CublasLtGemmRewriteTest, VectorBiasBF16Unpadded) { if (IsCuda() && !HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) { GTEST_SKIP() << "Pre-Ampere rewrites to cutlass_gemm_with_upcast instead of cublas."; } const char* hlo_text = R"( HloModule test ENTRY test { x = bf16[8,16] parameter(0) y = bf16[16,8] parameter(1) z = bf16[8] parameter(2) dot_a = bf16[8,8] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} z_bcast = bf16[8,8] broadcast(z), dimensions={1} ROOT add = bf16[8,8] add(dot_a, z_bcast) })"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{8e-3, 2e-3})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-NOT: pad(" ; CHECK: custom-call ; CHECK-SAME: custom_call_target="__cublas$lt$matmul" )"); } TEST_F(CublasLtGemmRewriteTest, VectorBiasBF16Padded) { if (IsCuda() && !HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) { GTEST_SKIP() << "Padding of GEMM operands in bfloat16 only implemented on " "Ampere and newer architectures."; } const char* hlo_text = R"( HloModule test ENTRY test { x = bf16[6,12] parameter(0) y = bf16[12,6] parameter(1) z = bf16[6] parameter(2) dot_a = bf16[6,6] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} z_bcast = bf16[6,6] broadcast(z), dimensions={1} ROOT add = bf16[6,6] add(dot_a, z_bcast) })"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-DAG: ENTRY %test ({{.*}}: bf16[6,12], {{.*}}: bf16[12,6], {{.*}}: bf16[6]) -> bf16[6,6] { ; CHECK-DAG: bf16[8,16]{1,0} pad({{.*}}), padding=0_2x0_4 ; CHECK-DAG: bf16[16,8]{1,0} pad({{.*}}), padding=0_4x0_2 )"); } TEST_F(CublasLtGemmRewriteTest, ReluActivationBF16Unpadded) { if (IsCuda() && !HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) { GTEST_SKIP() << "Pre-Ampere rewrites to cutlass_gemm_with_upcast instead of cublas."; } const char* hlo_text = R"( HloModule test ENTRY test { x = bf16[8,16] parameter(0) y = bf16[16,8] parameter(1) dot_a = bf16[8,8] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} c = bf16[] constant(0) c_bcast = bf16[8,8] broadcast(c), dimensions={} ROOT out = bf16[8,8] maximum(dot_a, c_bcast) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-NOT: pad(" ; CHECK: custom-call ; CHECK-SAME: custom_call_target="__cublas$lt$matmul" )"); } TEST_F(CublasLtGemmRewriteTest, ReluActivationBF16Padded) { if (IsCuda() && !HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) { GTEST_SKIP() << "Padding of GEMM operands in bfloat16 only implemented on " "Ampere and newer architectures."; } const char* hlo_text = R"( HloModule test ENTRY test { x = bf16[6,12] parameter(0) y = bf16[12,6] parameter(1) dot_a = bf16[6,6] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} c = bf16[] constant(0) c_bcast = bf16[6,6] broadcast(c), dimensions={} ROOT out = bf16[6,6] maximum(dot_a, c_bcast) })"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-DAG: ENTRY %test ({{.*}}: bf16[6,12], {{.*}}: bf16[12,6]) -> bf16[6,6] { ; CHECK-DAG: bf16[8,16]{1,0} pad({{.*}}), padding=0_2x0_4 ; CHECK-DAG: bf16[16,8]{1,0} pad({{.*}}), padding=0_4x0_2 )"); } TEST_F(CublasLtGemmRewriteTest, VectorBiasReluActivationBF16Unpadded) { if (IsCuda() && !HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) { GTEST_SKIP() << "Pre-Ampere rewrites to cutlass_gemm_with_upcast instead of cublas."; } const char* hlo_text = R"( HloModule test ENTRY test { x = bf16[8,16] parameter(0) y = bf16[16,8] parameter(1) z = bf16[8] parameter(2) dot_a = bf16[8,8] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} z_bcast = bf16[8,8] broadcast(z), dimensions={1} add = bf16[8,8] add(dot_a, z_bcast) c = bf16[] constant(0) c_bcast = bf16[8,8] broadcast(c), dimensions={} ROOT out = bf16[8,8] maximum(add, c_bcast) })"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{8e-3, 2e-3})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-NOT: pad(" ; CHECK: custom-call ; CHECK-SAME: custom_call_target="__cublas$lt$matmul" )"); } TEST_F(CublasLtGemmRewriteTest, VectorBiasReluActivationBF16Padded) { if (IsCuda() && !HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) { GTEST_SKIP() << "Padding of GEMM operands in bfloat16 only implemented on " "Ampere and newer architectures."; } const char* hlo_text = R"( HloModule test ENTRY test { x = bf16[6,12] parameter(0) y = bf16[12,6] parameter(1) z = bf16[6] parameter(2) dot_a = bf16[6,6] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} z_bcast = bf16[6,6] broadcast(z), dimensions={1} add = bf16[6,6] add(dot_a, z_bcast) c = bf16[] constant(0) c_bcast = bf16[6,6] broadcast(c), dimensions={} ROOT out = bf16[6,6] maximum(add, c_bcast) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-DAG: ENTRY %test ({{.*}}: bf16[6,12], {{.*}}: bf16[12,6], {{.*}}: bf16[6]) -> bf16[6,6] { ; CHECK-DAG: bf16[8,16]{1,0} pad({{.*}}), padding=0_2x0_4 ; CHECK-DAG: bf16[16,8]{1,0} pad({{.*}}), padding=0_4x0_2 )"); } TEST_F(CublasLtGemmRewriteTest, VectorBiasReluActivationF64) { if (!IsCuda()) { GTEST_SKIP() << "TODO: Unsupported blas-lt F64 datatype on ROCM"; } const char* hlo_text = R"( HloModule test ENTRY test { x = f64[2,3] parameter(0) y = f64[3,4] parameter(1) z = f64[4] parameter(2) dot_a = f64[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} z_bcast = f64[2,4] broadcast(z), dimensions={1} add = f64[2,4] add(dot_a, z_bcast) c = f64[] constant(0) c_bcast = f64[2,4] broadcast(c), dimensions={} ROOT out = f64[2,4] maximum(add, c_bcast) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-10, 1e-10})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f64[2,3], {{.*}}: f64[3,4], {{.*}}: f64[4]) -> f64[2,4] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f64[2,3]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = f64[3,4]{1,0} parameter(1) ; CHECK-NEXT: [[P2:%[^ ]+]] = f64[4]{0} parameter(2) ; CHECK-NEXT: [[OUT:%[^ ]+]] = (f64[2,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[P2]]), ; CHECK: custom_call_target="__cublas$lt$matmul", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"BIAS_RELU" ; CHECK: } )"); } TEST_F(CublasLtGemmRewriteTest, AlphaSimpleRewriteBiasAddActivation) { const char* hlo_text = R"( HloModule test ENTRY test { x = f32[2,3] parameter(0) y = f32[3,4] parameter(1) z = f32[4] parameter(2) k = f32[] constant(3.0) k_bcast = f32[2,4] broadcast(k), dimensions={} dot_a = f32[2,4] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0}, operand_precision={highest,highest} dot_a_multiplied = f32[2, 4] multiply(dot_a, k_bcast) z_bcast = f32[2,4] broadcast(z), dimensions={1} add = f32[2,4] add(dot_a_multiplied, z_bcast) c = f32[] constant(0) c_bcast = f32[2,4] broadcast(c), dimensions={} ROOT out = f32[2,4] maximum(add, c_bcast) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f32[2,3], {{.*}}: f32[3,4], {{.*}}: f32[4]) -> f32[2,4] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,3]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[3,4]{1,0} parameter(1) ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[4]{0} parameter(2) ; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[2,4]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1]], [[P2]]), ; CHECK: custom_call_target="__cublas$lt$matmul", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":3 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["HIGHEST","HIGHEST"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"BIAS_RELU" ; CHECK: } )"); } TEST_F(CublasLtGemmRewriteTest, FoldConstantBias) { const char* hlo_text = R"( HloModule test ENTRY test { x = f32[2,2] parameter(0) y = f32[2,2] parameter(1) bias = f32[2,2] broadcast(f32[2] constant({0, 0})), dimensions={0} dot1 = f32[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} bias1 = f32[2,2] parameter(2) sum1 = add(dot1, bias1) dot2 = f32[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} sum2 = add(dot2, f32[2,2] reshape(bias)) dot3 = f32[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} bias3 = f32[2,2] transpose(bias), dimensions={1,0} sum3 = add(dot3, bias3) dot4 = f32[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} sum4 = add(dot4, f32[2,2] bitcast(bias)) ROOT root = tuple(sum1, sum2, sum3, sum4) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); GemmRewriter pass(Capability(), GetToolkitVersion()); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); SCOPED_TRACE(module->ToString()); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(m::Tuple( m::GetTupleElement( m::CustomCall(m::Parameter(0), m::Parameter(1), m::Parameter()), 0), m::GetTupleElement( m::CustomCall(m::Parameter(0), m::Parameter(1), m::Constant()), 0), m::GetTupleElement( m::CustomCall(m::Parameter(0), m::Parameter(1), m::Constant()), 0), m::GetTupleElement( m::CustomCall(m::Parameter(0), m::Parameter(1), m::Constant()), 0)))); } TEST_F(CublasLtGemmRewriteTest, MultipleMaximumUsers) { const char* hlo_text = R"( HloModule multiple_maximum_users relu { Arg_0 = f32[3,896,54]{2,1,0} parameter(0) constant = f32[] constant(0) broadcast = f32[3,896,54]{2,1,0} broadcast(constant), dimensions={} ROOT maximum = f32[3,896,54]{2,1,0} maximum(Arg_0, broadcast) } ENTRY main { constant = f32[] constant(1) broadcast_1 = f32[3,896,1024]{2,1,0} broadcast(constant), dimensions={} Arg_2 = f32[1024,54]{1,0} parameter(2) dot = f32[3,896,54]{2,1,0} dot(broadcast_1, Arg_2), lhs_contracting_dims={2}, rhs_contracting_dims={0} Arg_1 = f32[54]{0} parameter(1) broadcast_2 = f32[3,896,54]{2,1,0} broadcast(Arg_1), dimensions={2} add = f32[3,896,54]{2,1,0} add(dot, broadcast_2) call = f32[3,896,54]{2,1,0} call(add), to_apply=relu Arg_0 = f32[1]{0} parameter(0) reshape_1 = f32[1,1,1]{2,1,0} reshape(Arg_0) broadcast_3 = f32[1,1,1]{2,1,0} broadcast(reshape_1), dimensions={0,1,2} reshape_2 = f32[] reshape(broadcast_3) broadcast_4 = f32[3,896,54]{2,1,0} broadcast(reshape_2), dimensions={} multiply = f32[3,896,54]{2,1,0} multiply(call, broadcast_4) ROOT tuple = (f32[3,896,54]{2,1,0}, f32[3,896,54]{2,1,0}) tuple(multiply, call) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-4})); MatchOptimizedHlo(hlo_text, R"( ; CHECK: custom_call_target="__cublas$lt$matmul", )"); } TEST_F(CublasLtGemmRewriteTest, MatrixBiasMixTypeOutOfPlace) { if (!IsCuda()) { GTEST_SKIP() << "TODO: Unsupported mixed datatypes on ROCM"; } std::vector<std::tuple<absl::string_view, absl::string_view>> type_combinations = { {"f16", "f32"}, {"bf16", "f32"}, }; const char* hlo_text_template = R"( HloModule test ENTRY test { x = <<ABType>>[16,32] parameter(0) y = <<ABType>>[32,16] parameter(1) z = <<DType>>[16,16] parameter(2) dot_a = <<ABType>>[16,16] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} convert = <<DType>>[16,16] convert(dot_a) ROOT out = <<DType>>[16,16] add(convert, z) })"; for (const auto& type_combination : type_combinations) { absl::flat_hash_map<absl::string_view, absl::string_view> replacements; replacements["<<ABType>>"] = std::get<0>(type_combination); replacements["<<DType>>"] = std::get<1>(type_combination); const auto hlo_text = absl::StrReplaceAll(hlo_text_template, replacements); EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3})); if (std::get<0>(type_combination) == "bf16" && IsCuda() && !HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) { continue; } TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> optimized_module, GetOptimizedModule(hlo_text)); EXPECT_THAT( optimized_module->entry_computation()->root_instruction(), GmockMatch(m::GetTupleElement( m::CustomCall(m::Parameter(0), m::Parameter(1), m::Parameter(2)), 0))); } } TEST_F(CublasLtGemmRewriteTest, MatrixBiasMixTypeOutOfPlaceBatched) { if (!IsCuda()) { GTEST_SKIP() << "TODO: Unsupported mixed datatypes on ROCM"; } std::vector<std::tuple<absl::string_view, absl::string_view>> type_combinations = { {"f16", "f32"}, {"bf16", "f32"}, }; const char* hlo_text_template = R"( HloModule test ENTRY test { x = <<ABType>>[4,16,32] parameter(0) y = <<ABType>>[4,32,16] parameter(1) z = <<DType>>[4,16,16] parameter(2) dot_a = <<ABType>>[4,16,16] dot(x, y), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0} convert = <<DType>>[4,16,16] convert(dot_a) ROOT out = <<DType>>[4,16,16] add(convert, z) })"; for (const auto& type_combination : type_combinations) { absl::flat_hash_map<absl::string_view, absl::string_view> replacements; replacements["<<ABType>>"] = std::get<0>(type_combination); replacements["<<DType>>"] = std::get<1>(type_combination); const auto hlo_text = absl::StrReplaceAll(hlo_text_template, replacements); EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3})); if (std::get<0>(type_combination) == "bf16" && IsCuda() && !HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) { continue; } TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> optimized_module, GetOptimizedModule(hlo_text)); EXPECT_THAT( optimized_module->entry_computation()->root_instruction(), GmockMatch(m::GetTupleElement( m::CustomCall(m::Parameter(0), m::Parameter(1), m::Parameter(2)), 0))); } } TEST_F(CublasLtGemmRewriteTest, MatrixBiasMixTypeInPlace) { if (!IsCuda()) { GTEST_SKIP() << "TODO: Unsupported mixed datatypes on ROCM"; } std::vector<std::tuple<absl::string_view, absl::string_view>> type_combinations = { {"f16", "f32"}, {"bf16", "f32"}, }; const char* hlo_text_template = R"( HloModule test ENTRY test { x = <<ABType>>[16,32] parameter(0) y = <<ABType>>[32,16] parameter(1) z = <<DType>>[16,16] parameter(2) dot_a = <<ABType>>[16,16] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} bias = <<DType>>[16,16] negate(z) convert = <<DType>>[16,16] convert(dot_a) ROOT out = <<DType>>[16,16] add(convert, bias) })"; for (const auto& type_combination : type_combinations) { absl::flat_hash_map<absl::string_view, absl::string_view> replacements; replacements["<<ABType>>"] = std::get<0>(type_combination); replacements["<<DType>>"] = std::get<1>(type_combination); const auto hlo_text = absl::StrReplaceAll(hlo_text_template, replacements); EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3})); if (std::get<0>(type_combination) == "bf16" && IsCuda() && !HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) { continue; } TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> optimized_module, GetOptimizedModule(hlo_text)); EXPECT_THAT(optimized_module->entry_computation()->root_instruction(), GmockMatch(m::GetTupleElement( m::CustomCall(m::Parameter(0), m::Parameter(1), m::Negate(m::Parameter(2))), 0))); } } TEST_F(CublasLtGemmRewriteTest, MatrixBiasMixTypeNotSupported) { const char* hlo_text = R"( HloModule test ENTRY test { x = bf16[16,32] parameter(0) y = bf16[32,16] parameter(1) z = f64[16,16] parameter(2) dot_a = bf16[16,16] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} bias = f64[16,16] negate(z) convert = f64[16,16] convert(dot_a) ROOT out = f64[16,16] add(convert, bias) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-3, 1e-3})); if (IsCuda() && !HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) { GTEST_SKIP() << "Pre-Ampere casts up bf16 to fp32"; } TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> optimized_module, GetOptimizedModule(hlo_text)); MatchOptimizedHlo(hlo_text, R"( ; CHECK: %[[custom_call:.*]] = {{.*}} custom-call{{.*}}__cublas$lt$matmul ; CHECK: %[[tuple:.*]] = bf16[16,16]{1,0} get-tuple-element(%[[custom_call]]), index=0 ; CHECK: ROOT {{.*}} fusion({{.*}}%[[tuple]] )"); } class ParameterizedFp8GemmRewriteTest : public ParameterizedGemmRewriteTest { public: ParameterizedFp8GemmRewriteTest() { replacements_[kF8E4M3DatatypePlaceholder] = IsCuda() ? "f8e4m3fn" : "f8e4m3fnuz"; replacements_[kF8E5M2DatatypePlaceholder] = IsCuda() ? "f8e5m2" : "f8e5m2fnuz"; replacements_[kF8E4M3AmaxPlaceholder] = IsCuda() ? "448." : "240."; } void SetUp() override { if (IsCuda() && GetToolkitVersion() < se::SemanticVersion{12, 0, 0}) { GTEST_SKIP() << "F8 gemm rewrite is only supported in CUDA 12 and above."; } if (IsRocm() && GetToolkitVersion() < se::SemanticVersion{6, 0, 0}) { GTEST_SKIP() << "F8 gemm rewrite is only supported in ROCm 6.0 and above."; } } protected: void CheckFp8IfSupported(absl::string_view hlo_text, ErrorSpec error_spec = ErrorSpec{1e-2, 1e-2}) { if (!HasFp8Support()) { return; } std::string replaced_hlo_text = absl::StrReplaceAll(hlo_text, replacements_); EXPECT_TRUE(RunAndCompare(absl::StrReplaceAll(hlo_text, replacements_), error_spec)); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> optimized_module, GetOptimizedModule(replaced_hlo_text)); const HloInstruction* call = FindInstruction(optimized_module.get(), HloOpcode::kCustomCall); ASSERT_NE(call, nullptr); EXPECT_EQ(call->custom_call_target(), "__cublas$lt$matmul$f8"); } void MatchOptimizedHlo(absl::string_view hlo, const absl::string_view pattern, bool print_operand_shape = false) { GemmRewriteTest::MatchOptimizedHlo( absl::StrReplaceAll(hlo, replacements_), absl::StrReplaceAll(pattern, replacements_), print_operand_shape); } void RunAndFilecheckHloRewrite( absl::string_view hlo, HloPassInterface&& hlo_pass, std::optional<absl::string_view> expected, std::function<void(HloModule*)> after_pass_checks = nullptr, const HloModuleConfig* config = nullptr) { if (expected.has_value()) { std::string replaced_pattern = absl::StrReplaceAll(expected.value(), replacements_); GemmRewriteTest::RunAndFilecheckHloRewrite( absl::StrReplaceAll(hlo, replacements_), std::move(hlo_pass), replaced_pattern, after_pass_checks, config); } } absl::StatusOr<std::unique_ptr<VerifiedHloModule>> ParseAndReturnVerifiedModule(absl::string_view hlo_text, int64_t replica_count = 1, int64_t num_partitions = 1) { return GemmRewriteTest::ParseAndReturnVerifiedModule( absl::StrReplaceAll(hlo_text, replacements_)); } private: static constexpr const char* kF8E4M3DatatypePlaceholder{"<<F8E4M3>>"}; static constexpr const char* kF8E5M2DatatypePlaceholder{"<<F8E5M2>>"}; static constexpr const char* kF8E4M3AmaxPlaceholder{"<<F8E4M3_AMAX>>"}; }; TEST_P(ParameterizedFp8GemmRewriteTest, SupportsF8NonMajorBatchDim) { const char* hlo_text = R"( HloModule t ENTRY main { %bitcast.73421 = f8e4m3fn[16,8,640]{2,1,0} parameter(0) %parameter_1.5 = f8e4m3fn[8,640,5120]{2,1,0} parameter(1) %parameter_2 = f8e4m3fn[8,640,5120]{2,1,0} parameter(2) %concatenate.2145 = f8e4m3fn[8,640,10240]{2,1,0} concatenate( f8e4m3fn[8,640,5120]{2,1,0} %parameter_1.5, f8e4m3fn[8,640,5120]{2,1,0} %parameter_2), dimensions={2} %dot.6237 = f32[8,16,10240]{2,1,0} dot( f8e4m3fn[16,8,640]{2,1,0} %bitcast.73421, f8e4m3fn[8,640,10240]{2,1,0} %concatenate.2145), lhs_batch_dims={1}, lhs_contracting_dims={2}, rhs_batch_dims={0}, rhs_contracting_dims={1} ROOT %convert.20480 = bf16[8,16,10240]{2,1,0} convert( f32[8,16,10240]{2,1,0} %dot.6237) })"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-2, 1e-2})); MatchOptimizedHlo(hlo_text, R"( ; CHECK: custom-call({{.*}}"lhs_batch_dimensions":["1"],"rhs_batch_dimensions":["0"] )"); } TEST_P(ParameterizedFp8GemmRewriteTest, DoNotRewriteToF8OnPreAda) { if (!IsCuda()) { GTEST_SKIP() << "FP8 Rewrite pattern is different on ROCM-6.2 "; } if (HasFp8Support()) { GTEST_SKIP() << "Test requires a pre-Ada GPU"; } const char* hlo_text = R"( HloModule test ENTRY PreAdaTest { x = <<F8E4M3>>[16,32] parameter(0) y = <<F8E4M3>>[32,16] parameter(1) ROOT out = <<F8E4M3>>[16,16] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; EXPECT_TRUE(RunAndCompare(absl::StrReplaceAll(hlo_text, replacements_), ErrorSpec{1e-2, 1e-2})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %PreAdaTest ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16]) -> <<F8E4M3>>[16,16] { ; CHECK: {{.*}} = {{.*}} custom-call({{.*}}, {{.*}}) ; CHECK-DAG: custom_call_target="<<CUBLAS_CUSTOM_CALL_TARGET_PLACEHOLDER>>" )"); } TEST_P(ParameterizedFp8GemmRewriteTest, DoNotRewriteOnPreAdaWithF32Output) { if (HasFp8Support()) { GTEST_SKIP() << "Test requires a pre-Ada GPU or an AMD GPU prior to MI300."; } const char* hlo_text = R"( HloModule test ENTRY PreAdaTest { x = <<F8E4M3>>[16,32] parameter(0) y = <<F8E4M3>>[32,16] parameter(1) ROOT out = f32[16,16] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; EXPECT_TRUE(RunAndCompare(absl::StrReplaceAll(hlo_text, replacements_), ErrorSpec{1e-2, 1e-2})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %PreAdaTest ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16]) -> f32[16,16] { ; CHECK: {{.*}} = {{.*}} custom-call({{.*}}, {{.*}}) ; CHECK-DAG: custom_call_target="<<CUBLAS_CUSTOM_CALL_TARGET_PLACEHOLDER>>" )"); } TEST_P(ParameterizedFp8GemmRewriteTest, UnsupportedTypesF8) { const char* hlo_text = R"( HloModule test ENTRY unsupported_types { x = <<F8E5M2>>[16,16] parameter(0) y = <<F8E5M2>>[16,16] parameter(1) ROOT out = <<F8E5M2>>[16,16] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; EXPECT_TRUE(RunAndCompare(absl::StrReplaceAll(hlo_text, replacements_), ErrorSpec{1e-2, 1e-2})); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(Capability(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK-LABEL: ENTRY %unsupported_types ({{.*}}: <<F8E5M2>>[16,16], {{.*}}: <<F8E5M2>>[16,16]) -> <<F8E5M2>>[16,16] { ; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E5M2>>[16,16]{1,0} parameter(0) ; CHECK-NEXT: [[P0_CONVERT:%[^ ]+]] = f16[16,16]{1,0} convert([[P0]]) ; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E5M2>>[16,16]{1,0} parameter(1) ; CHECK-NEXT: [[P1_CONVERT:%[^ ]+]] = f16[16,16]{1,0} convert([[P1]]) ; CHECK-NEXT: [[DOT:%[^ ]+]] = f16[16,16]{1,0} dot([[P0_CONVERT]], [[P1_CONVERT]]), lhs_contracting_dims={1}, rhs_contracting_dims={0} ; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = <<F8E5M2>>[16,16]{1,0} convert([[DOT]]) )"); } TEST_P(ParameterizedFp8GemmRewriteTest, UnscaledABUnscaledDF8) { const char* hlo_text = R"( HloModule test ENTRY test { x = <<F8E4M3>>[16,32] parameter(0) y = <<F8E4M3>>[32,16] parameter(1) ROOT out = <<F8E4M3>>[16,16] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; CheckFp8IfSupported(hlo_text); std::string checks = R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16]) -> <<F8E4M3>>[16,16] { ; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1) ; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0} ; CHECK-NEXT: [[C1:[^ ]+]] = f32[] constant(1) )"; if (IsRocm() && GetToolkitVersion() < se::SemanticVersion{6, 2, 0}) { checks.append( R"(; CHECK-GCN-NEXT: [[OUT:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[C1]], [[C1]]), )"); } else { checks.append( R"(; CHECK-NEXT: [[OUT:%[^ ]+]] = (<<F8E4M3>>[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[C1]], [[C1]]), )"); } checks.append( R"(; CHECK: custom_call_target="__cublas$lt$matmul$f8", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), checks); } TEST_P(ParameterizedFp8GemmRewriteTest, UnscaledABUnscaledDMatrixBiasF8) { const char* hlo_text = R"( HloModule test ENTRY test { x = <<F8E4M3>>[16,32] parameter(0) y = <<F8E4M3>>[32,16] parameter(1) dot_a = <<F8E4M3>>[16,16] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} b = <<F8E4M3>>[16,16] parameter(2) ROOT out = <<F8E4M3>>[16,16] add(dot_a, b) } )"; CheckFp8IfSupported(hlo_text); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: <<F8E4M3>>[16,16]) -> <<F8E4M3>>[16,16] { ; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1) ; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0} ; CHECK-NEXT: [[C1:[^ ]+]] = f32[] constant(1) ; CHECK-NEXT: [[DOT_TUPLE:%[^ ]+]] = (<<F8E4M3>>[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[C1]], [[C1]]), ; CHECK: custom_call_target="__cublas$lt$matmul$f8", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } ; CHECK: [[DOT:%[^ ]+]] = <<F8E4M3>>[16,16]{1,0} get-tuple-element([[DOT_TUPLE]]), index=0 ; CHECK-NEXT: [[P2:%[^ ]+]] = <<F8E4M3>>[16,16]{1,0} parameter(2) ; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = <<F8E4M3>>[16,16]{1,0} add([[DOT]], [[P2]]) )"); } TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABUnscaledDColMajorLhsF8) { const char* hlo_text = R"( HloModule test ENTRY test { x = <<F8E4M3>>[2,64,32]{1,2,0} parameter(0) y = <<F8E4M3>>[2,32,16]{2,1,0} parameter(1) x_scale = f32[] parameter(2) y_scale = f32[] parameter(3) dq_scale = f32[] multiply(x_scale, y_scale) dq_scale_bcast = f32[2,64,16] broadcast(dq_scale), dimensions={} out.0 = f32[2,64,16] dot(x, y), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0} ROOT out = f32[2,64,16] multiply(out.0, dq_scale_bcast) } )"; CheckFp8IfSupported(hlo_text); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[2,64,32], {{.*}}: <<F8E4M3>>[2,32,16], {{.*}}: f32[], {{.*}}: f32[]) -> f32[2,64,16] { ; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[2,64,32]{1,2,0} parameter(0) ; CHECK-NEXT: [[P0_BT:%[^ ]+]] = <<F8E4M3>>[2,32,64]{2,1,0} bitcast([[P0]]) ; CHECK-NEXT: [[P0_TR:%[^ ]+]] = <<F8E4M3>>[2,64,32]{2,1,0} transpose([[P0_BT]]), dimensions={0,2,1} ; CHECK-NEXT: [[P0_BT1:%[^ ]+]] = <<F8E4M3>>[2,32,64]{1,2,0} bitcast([[P0_TR]]) ; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[2,32,16]{2,1,0} parameter(1) ; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[2,16,32]{2,1,0} transpose([[P1]]), dimensions={0,2,1} ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[] parameter(2) ; CHECK-NEXT: [[P3:%[^ ]+]] = f32[] parameter(3) ; CHECK-NEXT: [[DQ:%[^ ]+]] = f32[] multiply([[P2]], [[P3]]) ; CHECK-NEXT: [[C1:%[^ ]+]] = f32[] constant(1) ; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[2,64,16]{2,1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0_BT1]], [[P1_TRANSPOSE]], [[DQ]], [[C1]]), ; CHECK: custom_call_target="__cublas$lt$matmul$f8", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["2"] ; CHECK-DAG: "lhs_batch_dimensions":["0"] ; CHECK-DAG: "rhs_batch_dimensions":["0"] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABUnscaledDF8) { const char* hlo_text = R"( HloModule test ENTRY test { x = <<F8E4M3>>[16,32] parameter(0) y = <<F8E4M3>>[32,16] parameter(1) x_f32 = f32[16,32] convert(x) y_f32 = f32[32,16] convert(y) x_scale = f32[] parameter(2) y_scale = f32[] parameter(3) x_scale_bcast = f32[16,32] broadcast(x_scale), dimensions={} y_scale_bcast = f32[32,16] broadcast(y_scale), dimensions={} x_unscaled = f32[16,32] multiply(x_f32, x_scale_bcast) y_unscaled = f32[32,16] multiply(y_f32, y_scale_bcast) ROOT out = f32[16,16] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; CheckFp8IfSupported(hlo_text); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: f32[], {{.*}}: f32[]) -> f32[16,16] { ; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1) ; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0} ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[] parameter(2) ; CHECK-NEXT: [[P3:%[^ ]+]] = f32[] parameter(3) ; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[P2]], [[P3]]), ; CHECK: custom_call_target="__cublas$lt$matmul$f8", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABUnscaledDPaddedF8) { const char* hlo_text = R"( HloModule test ENTRY test { x = <<F8E4M3>>[13,17] parameter(0) y = <<F8E4M3>>[17,31] parameter(1) x_f32 = f32[13,17] convert(x) y_f32 = f32[17,31] convert(y) x_scale = f32[] parameter(2) y_scale = f32[] parameter(3) x_scale_bcast = f32[13,17] broadcast(x_scale), dimensions={} y_scale_bcast = f32[17,31] broadcast(y_scale), dimensions={} x_unscaled = f32[13,17] multiply(x_f32, x_scale_bcast) y_unscaled = f32[17,31] multiply(y_f32, y_scale_bcast) ROOT out = f32[13,31] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; CheckFp8IfSupported(hlo_text); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[13,17], {{.*}}: <<F8E4M3>>[17,31], {{.*}}: f32[], {{.*}}: f32[]) -> f32[13,31] { ; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[13,17]{1,0} parameter(0) ; CHECK-NEXT: [[C0:%[^ ]+]] = <<F8E4M3>>[] constant(0) ; CHECK-NEXT: [[P0_PADDED:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} pad([[P0]], [[C0]]), padding=0_3x0_15 ; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[17,31]{1,0} parameter(1) ; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[31,17]{1,0} transpose([[P1]]), dimensions={1,0} ; CHECK-NEXT: [[C1:%[^ ]+]] = <<F8E4M3>>[] constant(0) ; CHECK-NEXT: [[P1_TRANSPOSE_PADDED:%[^ ]+]] = <<F8E4M3>>[32,32]{1,0} pad([[P1_TRANSPOSE]], [[C1]]) ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[] parameter(2) ; CHECK-NEXT: [[P3:%[^ ]+]] = f32[] parameter(3) ; CHECK-NEXT: [[DOT_TUPLE:%[^ ]+]] = (f32[16,32]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0_PADDED]], [[P1_TRANSPOSE_PADDED]], [[P2]], [[P3]]), ; CHECK: custom_call_target="__cublas$lt$matmul$f8", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } ; CHECK-NEXT: [[DOT:%[^ ]+]] = f32[16,32]{1,0} get-tuple-element([[DOT_TUPLE]]), index=0 ; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = f32[13,31]{1,0} slice([[DOT]]), slice={[0:13], [0:31]} )"); } TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABUnscaledDBitcastF8) { const char* hlo_text = R"( HloModule test ENTRY test { x = <<F8E4M3>>[2,8,16] parameter(0) y = <<F8E4M3>>[16,16] parameter(1) x_f32 = f32[2,8,16] convert(x) y_f32 = f32[16,16] convert(y) x_scale = f32[] parameter(2) y_scale = f32[] parameter(3) x_scale_bcast = f32[2,8,16] broadcast(x_scale), dimensions={} y_scale_bcast = f32[16,16] broadcast(y_scale), dimensions={} x_unscaled = f32[2,8,16] multiply(x_f32, x_scale_bcast) y_unscaled = f32[16,16] multiply(y_f32, y_scale_bcast) x_bitcast = f32[16,16] bitcast(x_unscaled) ROOT out = f32[16,16] dot(x_bitcast, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); GemmRewriter pass(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(m::GetTupleElement(m::CustomCall({"__cublas$lt$matmul$f8"}), 0) .WithShape(F32, {16, 16}))); } TEST_P(ParameterizedFp8GemmRewriteTest, UnscaledABUnscaledDWithConvertF8) { const char* hlo_text = R"( HloModule test ENTRY test { x = <<F8E4M3>>[16,32] parameter(0) y = <<F8E4M3>>[32,16] parameter(1) x_f32 = f32[16,32] convert(x) y_f32 = f32[32,16] convert(y) ROOT out = f32[16,16] dot(x_f32, y_f32), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; CheckFp8IfSupported(hlo_text); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16]) -> f32[16,16] { ; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1) ; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0} ; CHECK-NEXT: [[C1:%[^ ]+]] = f32[] constant(1) ; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[C1]], [[C1]]), ; CHECK: custom_call_target="__cublas$lt$matmul$f8", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABUnscaledDUnaryOpsF8) { const char* hlo_text = R"( HloModule test ENTRY test { x = <<F8E4M3>>[3] parameter(0) y = <<F8E4M3>>[32,16] parameter(1) x_f32 = f32[3] convert(x) y_f32 = f32[32,16] convert(y) x_scale = f32[] parameter(2) y_scale = f32[] parameter(3) x_scale_bcast = f32[3] broadcast(x_scale), dimensions={} y_scale_bcast = f32[32,16] broadcast(y_scale), dimensions={} x_unscaled = f32[3] multiply(x_f32, x_scale_bcast) zero = f32[] constant(0) x_unscaled_padded = f32[30] pad(x_unscaled, zero), padding=0_27 x_unscaled_padded_bcast = f32[30,8,5] broadcast(x_unscaled_padded), dimensions={0} x_unscaled_padded_bcast_sliced = f32[16,8,4] slice(x_unscaled_padded_bcast), slice={[2:18], [0:8], [0:4]} x_unscaled_padded_bcast_sliced_reshaped = f32[16,32] reshape(x_unscaled_padded_bcast_sliced) y_unscaled = f32[32,16] multiply(y_f32, y_scale_bcast) ROOT out = f32[16,16] dot(x_unscaled_padded_bcast_sliced_reshaped, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; CheckFp8IfSupported(hlo_text); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[3], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: f32[], {{.*}}: f32[]) -> f32[16,16] { ; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[3]{0} parameter(0) ; CHECK-NEXT: [[C0:%[^ ]+]] = f32[] constant(0) ; CHECK-NEXT: [[C0_CONVERT:%[^ ]+]] = <<F8E4M3>>[] convert([[C0]]) ; CHECK-NEXT: [[P0_U0:%[^ ]+]] = <<F8E4M3>>[30]{0} pad([[P0]], [[C0_CONVERT]]), padding=0_27 ; CHECK-NEXT: [[P0_U1:%[^ ]+]] = <<F8E4M3>>[30,8,5]{2,1,0} broadcast([[P0_U0]]), dimensions={0} ; CHECK-NEXT: [[P0_U2:%[^ ]+]] = <<F8E4M3>>[16,8,4]{2,1,0} slice([[P0_U1]]), slice={[2:18], [0:8], [0:4]} ; CHECK-NEXT: [[P0_U3:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} reshape([[P0_U2]]) ; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1) ; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0} ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[] parameter(2) ; CHECK-NEXT: [[P3:%[^ ]+]] = f32[] parameter(3) ; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0_U3]], [[P1_TRANSPOSE]], [[P2]], [[P3]]), ; CHECK: custom_call_target="__cublas$lt$matmul$f8", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_P(ParameterizedFp8GemmRewriteTest, UnscaledABUnscaledDUnaryOpsWithConvertF8) { const char* hlo_text = R"( HloModule test ENTRY test { x = <<F8E4M3>>[3] parameter(0) y = <<F8E4M3>>[32,16] parameter(1) x_f32 = f32[3] convert(x) y_f32 = f32[32,16] convert(y) zero = f32[] constant(0) x_padded = f32[30] pad(x_f32, zero), padding=0_27 x_padded_bcast = f32[30,8,5] broadcast(x_padded), dimensions={0} x_padded_bcast_sliced = f32[16,8,4] slice(x_padded_bcast), slice={[2:18], [0:8], [0:4]} x_padded_bcast_sliced_reshaped = f32[16,32] reshape(x_padded_bcast_sliced) ROOT out = f32[16,16] dot(x_padded_bcast_sliced_reshaped, y_f32), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; CheckFp8IfSupported(hlo_text); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[3], {{.*}}: <<F8E4M3>>[32,16]) -> f32[16,16] { ; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[3]{0} parameter(0) ; CHECK-NEXT: [[C0:%[^ ]+]] = f32[] constant(0) ; CHECK-NEXT: [[C0_CONVERT:%[^ ]+]] = <<F8E4M3>>[] convert([[C0]]) ; CHECK-NEXT: [[P0_U0:%[^ ]+]] = <<F8E4M3>>[30]{0} pad([[P0]], [[C0_CONVERT]]), padding=0_27 ; CHECK-NEXT: [[P0_U1:%[^ ]+]] = <<F8E4M3>>[30,8,5]{2,1,0} broadcast([[P0_U0]]), dimensions={0} ; CHECK-NEXT: [[P0_U2:%[^ ]+]] = <<F8E4M3>>[16,8,4]{2,1,0} slice([[P0_U1]]), slice={[2:18], [0:8], [0:4]} ; CHECK-NEXT: [[P0_U3:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} reshape([[P0_U2]]) ; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1) ; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0} ; CHECK-NEXT: [[C2:%[^ ]+]] = f32[] constant(1) ; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0_U3]], [[P1_TRANSPOSE]], [[C2]], [[C2]]), ; CHECK: custom_call_target="__cublas$lt$matmul$f8", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABUnscaledDDynamicSliceF8) { const char* hlo_text = R"( HloModule test ENTRY test { x = <<F8E4M3>>[32,32] parameter(0) y = <<F8E4M3>>[16,32] parameter(1) zero = s32[] constant(0) x_f32 = f32[32,32] convert(x) y_f32 = f32[16,32] convert(y) x_scale = f32[] parameter(2) y_scale = f32[] parameter(3) x_scale_bcast = f32[32,32] broadcast(x_scale), dimensions={} y_scale_bcast = f32[16,32] broadcast(y_scale), dimensions={} x_unscaled = f32[32,32] multiply(x_f32, x_scale_bcast) y_unscaled = f32[16,32] multiply(y_f32, y_scale_bcast) dyn_slice = f32[16,32]{1,0} dynamic-slice(x_unscaled, zero, zero), dynamic_slice_sizes={16,32} ROOT dot_a = f32[16,16] dot(dyn_slice, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={1} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); GemmRewriter pass(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_TRUE(changed); CheckFp8IfSupported(hlo_text); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[32,32], {{.*}}: <<F8E4M3>>[16,32], {{.*}}: f32[], {{.*}}: f32[]) -> f32[16,16] { ; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[32,32]{1,0} parameter(0) ; CHECK-NEXT: [[C0:%[^ ]+]] = s32[] constant(0) ; CHECK-NEXT: [[DYN_SLICE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} dynamic-slice([[P0]], [[C0]], [[C0]]), dynamic_slice_sizes={16,32} ; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(1) ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[] parameter(2) ; CHECK-NEXT: [[P3:%[^ ]+]] = f32[] parameter(3) ; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[DYN_SLICE]], [[P1]], [[P2]], [[P3]]), ; CHECK: custom_call_target="__cublas$lt$matmul$f8", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABUnscaledDSelectF8) { const char* hlo_text = R"( HloModule test ENTRY test { x = <<F8E4M3>>[16,32] parameter(0) y = <<F8E4M3>>[16,32] parameter(1) x_f32 = f32[16,32] convert(x) y_f32 = f32[16,32] convert(y) x_scale = f32[] parameter(2) y_scale = f32[] parameter(3) x_scale_bcast = f32[16,32] broadcast(x_scale), dimensions={} y_scale_bcast = f32[16,32] broadcast(y_scale), dimensions={} x_unscaled = f32[16,32] multiply(x_f32, x_scale_bcast) y_unscaled = f32[16,32] multiply(y_f32, y_scale_bcast) k = pred[16,32] parameter(4) c = f32[] constant(0) c_bcast = f32[16,32] broadcast(c), dimensions={} select_a = f32[16,32] select(k, y_unscaled, c_bcast) ROOT dot_a = f32[16,16] dot(x_unscaled, select_a), lhs_contracting_dims={1}, rhs_contracting_dims={1} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); GemmRewriter pass(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_TRUE(changed); CheckFp8IfSupported(hlo_text); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[16,32], {{.*}}: f32[], {{.*}}: f32[], {{.*}}: pred[16,32]) -> f32[16,16] { ; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0) ; CHECK-NEXT: [[P4:%[^ ]+]] = pred[16,32]{1,0} parameter(4) ; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(1) ; CHECK-NEXT: [[C0:%[^ ]+]] = f32[] constant(0) ; CHECK-NEXT: [[C0_BCAST:%[^ ]+]] = f32[16,32]{1,0} broadcast([[C0]]), dimensions={} ; CHECK-NEXT: [[C0_CONVERT:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} convert([[C0_BCAST]]) ; CHECK-NEXT: [[SELECT:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} select([[P4]], [[P1]], [[C0_CONVERT]]) ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[] parameter(2) ; CHECK-NEXT: [[P3:%[^ ]+]] = f32[] parameter(3) ; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[SELECT]], [[P2]], [[P3]]), ; CHECK: custom_call_target="__cublas$lt$matmul$f8", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABUnscaledDSelectNonzeroConstantF8) { const char* hlo_text = R"( HloModule test ENTRY test { x = <<F8E4M3>>[16,32] parameter(0) y = <<F8E4M3>>[16,32] parameter(1) x_f32 = f32[16,32] convert(x) y_f32 = f32[16,32] convert(y) x_scale = f32[] parameter(2) y_scale = f32[] parameter(3) x_scale_bcast = f32[16,32] broadcast(x_scale), dimensions={} y_scale_bcast = f32[16,32] broadcast(y_scale), dimensions={} x_unscaled = f32[16,32] multiply(x_f32, x_scale_bcast) y_unscaled = f32[16,32] multiply(y_f32, y_scale_bcast) k = pred[16,32] parameter(4) c = f32[] constant(1) c_bcast = f32[16,32] broadcast(c), dimensions={} select_a = f32[16,32] select(k, y_unscaled, c_bcast) ROOT dot_a = f32[16,16] dot(x_unscaled, select_a), lhs_contracting_dims={1}, rhs_contracting_dims={1} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); GemmRewriter pass(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_FALSE(changed); } TEST_P(ParameterizedFp8GemmRewriteTest, BatchedScaledABUnscaledDF8) { const char* hlo_text = R"( HloModule test ENTRY test { x = <<F8E4M3>>[10,16,32] parameter(0) y = <<F8E4M3>>[10,32,16] parameter(1) x_f32 = f32[10,16,32] convert(x) y_f32 = f32[10,32,16] convert(y) x_scale = f32[] parameter(2) y_scale = f32[] parameter(3) x_scale_bcast = f32[10,16,32] broadcast(x_scale), dimensions={} y_scale_bcast = f32[10,32,16] broadcast(y_scale), dimensions={} x_unscaled = f32[10,16,32] multiply(x_f32, x_scale_bcast) y_unscaled = f32[10,32,16] multiply(y_f32, y_scale_bcast) ROOT out = f32[10,16,16] dot(x_unscaled, y_unscaled), lhs_contracting_dims={2}, rhs_contracting_dims={1}, lhs_batch_dims={0}, rhs_batch_dims={0} } )"; CheckFp8IfSupported(hlo_text); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[10,16,32], {{.*}}: <<F8E4M3>>[10,32,16], {{.*}}: f32[], {{.*}}: f32[]) -> f32[10,16,16] { ; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[10,16,32]{2,1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[10,32,16]{2,1,0} parameter(1) ; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[10,16,32]{2,1,0} transpose([[P1]]), dimensions={0,2,1} ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[] parameter(2) ; CHECK-NEXT: [[P3:%[^ ]+]] = f32[] parameter(3) ; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[10,16,16]{2,1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[P2]], [[P3]]), ; CHECK: custom_call_target="__cublas$lt$matmul$f8", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["2"] ; CHECK-DAG: "rhs_contracting_dimensions":["2"] ; CHECK-DAG: "lhs_batch_dimensions":["0"] ; CHECK-DAG: "rhs_batch_dimensions":["0"] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABAlphaDF8) { const char* hlo_text = R"( HloModule test ENTRY test { x = <<F8E4M3>>[16,32] parameter(0) y = <<F8E4M3>>[32,16] parameter(1) x_f32 = f32[16,32] convert(x) y_f32 = f32[32,16] convert(y) x_scale = f32[] parameter(2) y_scale = f32[] parameter(3) x_scale_bcast = f32[16,32] broadcast(x_scale), dimensions={} y_scale_bcast = f32[32,16] broadcast(y_scale), dimensions={} x_unscaled = f32[16,32] multiply(x_f32, x_scale_bcast) y_unscaled = f32[32,16] multiply(y_f32, y_scale_bcast) k = f32[] constant(3.0) k_bcast = f32[16,16] broadcast(k), dimensions={} dot_a = f32[16,16] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT out = f32[16,16] multiply(dot_a, k_bcast) } )"; CheckFp8IfSupported(hlo_text); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: f32[], {{.*}}: f32[]) -> f32[16,16] { ; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1) ; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0} ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[] parameter(2) ; CHECK-NEXT: [[P3:%[^ ]+]] = f32[] parameter(3) ; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[P2]], [[P3]]), ; CHECK: custom_call_target="__cublas$lt$matmul$f8", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":3 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABUnscaledDReluActivationF8) { const char* hlo_text = R"( HloModule test ENTRY test { x = <<F8E4M3>>[16,32] parameter(0) y = <<F8E4M3>>[32,16] parameter(1) x_f32 = f32[16,32] convert(x) y_f32 = f32[32,16] convert(y) x_scale = f32[] parameter(2) y_scale = f32[] parameter(3) x_scale_bcast = f32[16,32] broadcast(x_scale), dimensions={} y_scale_bcast = f32[32,16] broadcast(y_scale), dimensions={} x_unscaled = f32[16,32] multiply(x_f32, x_scale_bcast) y_unscaled = f32[32,16] multiply(y_f32, y_scale_bcast) dot_a = f32[16,16] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0} c = f32[] constant(0) c_bcast = f32[16,16] broadcast(c), dimensions={} ROOT out = f32[16,16] maximum(dot_a, c_bcast) } )"; CheckFp8IfSupported(hlo_text); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: f32[], {{.*}}: f32[]) -> f32[16,16] { ; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1) ; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0} ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[] parameter(2) ; CHECK-NEXT: [[P3:%[^ ]+]] = f32[] parameter(3) ; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[P2]], [[P3]]), ; CHECK: custom_call_target="__cublas$lt$matmul$f8", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"RELU" ; CHECK: } )"); } TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABUnscaledDVectorBiasThenApproxGeluActivationF8) { const char* hlo_text = R"( HloModule test ENTRY test { x = <<F8E4M3>>[16,32] parameter(0) y = <<F8E4M3>>[32,16] parameter(1) x_bf16 = bf16[16,32] convert(x) y_bf16 = bf16[32,16] convert(y) x_scale = bf16[] parameter(2) y_scale = bf16[] parameter(3) bias = bf16[16] parameter(4) x_scale_bcast = bf16[16,32] broadcast(x_scale), dimensions={} y_scale_bcast = bf16[32,16] broadcast(y_scale), dimensions={} x_unscaled = bf16[16,32] multiply(x_bf16, x_scale_bcast) y_unscaled = bf16[32,16] multiply(y_bf16, y_scale_bcast) dot1 = bf16[16,16] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0} b_bcast = bf16[16,16] broadcast(bias), dimensions={1} dot = bf16[16,16] add(dot1, b_bcast) mul.0 = bf16[16,16] multiply(dot, dot) mul.1 = bf16[16,16] multiply(dot, mul.0) const.0 = bf16[] constant(0.044715) bcast.0 = bf16[16,16] broadcast(const.0), dimensions={} mul.2 = bf16[16,16] multiply(mul.1, bcast.0) add.0 = bf16[16,16] add(dot, mul.2) const.1 = bf16[] constant(0.797884583) bcast.1 = bf16[16,16] broadcast(const.1), dimensions={} mul.3 = bf16[16,16] multiply(add.0, bcast.1) tanh = bf16[16,16] tanh(mul.3) const.2 = bf16[] constant(1) bcast.2 = bf16[16,16] broadcast(const.2), dimensions={} add.2 = bf16[16,16] add(tanh, bcast.2) const.3 = bf16[] constant(0.5) bcast.3 = bf16[16,16] broadcast(const.3), dimensions={} mul.4 = bf16[16,16] multiply(add.2, bcast.3) ROOT out = bf16[16,16] multiply(dot, mul.4) } )"; CheckFp8IfSupported(hlo_text); if ((IsCuda() && GetToolkitVersion() >= se::SemanticVersion{12, 4, 0}) || IsRocm()) { std::string checks = R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: bf16[], {{.*}}: bf16[], {{.*}}: bf16[16]) -> bf16[16,16] { ; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1) ; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0} ; CHECK-NEXT: [[P2:%[^ ]+]] = bf16[] parameter(2) ; CHECK-NEXT: [[XS:%[^ ]+]] = f32[] convert([[P2]]) ; CHECK-NEXT: [[P3:%[^ ]+]] = bf16[] parameter(3) ; CHECK-NEXT: [[XS1:%[^ ]+]] = f32[] convert([[P3]]) )"; if (IsRocm() && GetToolkitVersion() < se::SemanticVersion{6, 2, 0}) { checks += R"(; CHECK-GCN-NEXT: [[OUT:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[XS]], [[XS1]]), )"; } else { checks += R"(; CHECK-NEXT: [[B:%[^ ]+]] = bf16[16]{0} parameter(4) ; CHECK-NEXT: [[OUT:%[^ ]+]] = (bf16[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[XS]], [[XS1]], [[B]]), )"; } checks += R"(; CHECK: custom_call_target="__cublas$lt$matmul$f8", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } )"; if (IsRocm() && GetToolkitVersion() < se::SemanticVersion{6, 2, 0}) { checks += R"(; CHECK-GCN-DAG: "epilogue":"DEFAULT" )"; } else { checks += R"(; CHECK-DAG: "epilogue":"BIAS_GELU" )"; } checks += R"(; CHECK: } )"; RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), checks); } } TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABUnscaledDApproxGeluActivationF8) { const char* hlo_text = R"( HloModule test ENTRY test { x = <<F8E4M3>>[16,32] parameter(0) y = <<F8E4M3>>[32,16] parameter(1) x_bf16 = bf16[16,32] convert(x) y_bf16 = bf16[32,16] convert(y) x_scale = bf16[] parameter(2) y_scale = bf16[] parameter(3) x_scale_bcast = bf16[16,32] broadcast(x_scale), dimensions={} y_scale_bcast = bf16[32,16] broadcast(y_scale), dimensions={} x_unscaled = bf16[16,32] multiply(x_bf16, x_scale_bcast) y_unscaled = bf16[32,16] multiply(y_bf16, y_scale_bcast) dot = bf16[16,16] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0} mul.0 = bf16[16,16] multiply(dot, dot) mul.1 = bf16[16,16] multiply(dot, mul.0) const.0 = bf16[] constant(0.044715) bcast.0 = bf16[16,16] broadcast(const.0), dimensions={} mul.2 = bf16[16,16] multiply(mul.1, bcast.0) add.0 = bf16[16,16] add(dot, mul.2) const.1 = bf16[] constant(0.797884583) bcast.1 = bf16[16,16] broadcast(const.1), dimensions={} mul.3 = bf16[16,16] multiply(add.0, bcast.1) tanh = bf16[16,16] tanh(mul.3) const.2 = bf16[] constant(1) bcast.2 = bf16[16,16] broadcast(const.2), dimensions={} add.2 = bf16[16,16] add(tanh, bcast.2) const.3 = bf16[] constant(0.5) bcast.3 = bf16[16,16] broadcast(const.3), dimensions={} mul.4 = bf16[16,16] multiply(add.2, bcast.3) ROOT out = bf16[16,16] multiply(dot, mul.4) } )"; CheckFp8IfSupported(hlo_text); if ((IsCuda() && GetToolkitVersion() >= se::SemanticVersion{12, 4, 0}) || IsRocm()) { std::string checks = R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: bf16[], {{.*}}: bf16[]) -> bf16[16,16] { ; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1) ; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0} ; CHECK-NEXT: [[P2:%[^ ]+]] = bf16[] parameter(2) ; CHECK-NEXT: [[XS:%[^ ]+]] = f32[] convert([[P2]]) ; CHECK-NEXT: [[P3:%[^ ]+]] = bf16[] parameter(3) ; CHECK-NEXT: [[XS1:%[^ ]+]] = f32[] convert([[P3]]) )"; if (IsRocm() && GetToolkitVersion() < se::SemanticVersion{6, 2, 0}) { checks += R"(; CHECK-GCN-NEXT: [[OUT:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[XS]], [[XS1]]), )"; } else { checks += R"(; CHECK-NEXT: [[OUT:%[^ ]+]] = (bf16[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[XS]], [[XS1]]), )"; } checks += R"(; CHECK: custom_call_target="__cublas$lt$matmul$f8", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } )"; if (IsRocm() && GetToolkitVersion() < se::SemanticVersion{6, 2, 0}) { checks += R"(; CHECK-GCN-DAG: "epilogue":"DEFAULT" )"; } else { checks += R"(; CHECK-DAG: "epilogue":"GELU" )"; } checks += R"(; CHECK: } )"; RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), checks); } } TEST_P(ParameterizedFp8GemmRewriteTest, InvScaledABUnscaledDF8) { const char* hlo_text = R"( HloModule test ENTRY test { x = <<F8E4M3>>[16,32] parameter(0) y = <<F8E4M3>>[32,16] parameter(1) x_f32 = f32[16,32] convert(x) y_f32 = f32[32,16] convert(y) x_scale = f32[] parameter(2) y_scale = f32[] parameter(3) x_scale_bcast = f32[16,32] broadcast(x_scale), dimensions={} y_scale_bcast = f32[32,16] broadcast(y_scale), dimensions={} x_unscaled = f32[16,32] divide(x_f32, x_scale_bcast) y_unscaled = f32[32,16] divide(y_f32, y_scale_bcast) ROOT out = f32[16,16] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; CheckFp8IfSupported(hlo_text); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK: custom_call_target="__cublas$lt$matmul$f8", )"); } TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABUnscaledDMatrixBiasF8) { const char* hlo_text = R"( HloModule test ENTRY test { x = <<F8E4M3>>[16,32] parameter(0) y = <<F8E4M3>>[32,16] parameter(1) b = f32[16,16] parameter(2) one = f32[] constant(1) ones = f32[16,16] broadcast(one), dimensions={} b_ones = f32[16,16] add(b, ones) x_f32 = f32[16,32] convert(x) y_f32 = f32[32,16] convert(y) x_scale = f32[] parameter(3) y_scale = f32[] parameter(4) x_scale_bcast = f32[16,32] broadcast(x_scale), dimensions={} y_scale_bcast = f32[32,16] broadcast(y_scale), dimensions={} x_unscaled = f32[16,32] multiply(x_f32, x_scale_bcast) y_unscaled = f32[32,16] multiply(y_f32, y_scale_bcast) dot_a = f32[16,16] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT out = add(dot_a, b_ones) } )"; CheckFp8IfSupported(hlo_text); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: f32[16,16], {{.*}}: f32[], {{.*}}: f32[]) -> f32[16,16] { ; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1) ; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0} ; CHECK: [[C0:%[^ ]+]] = f32[16,16]{1,0} add({{.*}}) ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[] parameter(3) ; CHECK-NEXT: [[P3:%[^ ]+]] = f32[] parameter(4) ; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[C0]], [[P2]], [[P3]]), ; CHECK: custom_call_target="__cublas$lt$matmul$f8", ; CHECK: output_to_operand_aliasing={ ; CHECK-SAME: {0}: (2, {}) ; CHECK-SAME: } ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":1 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABUnscaledDMatrixBiasPaddedF8) { const char* hlo_text = R"( HloModule test ENTRY test { x = <<F8E4M3>>[14,31] parameter(0) y = <<F8E4M3>>[31,14] parameter(1) b = f32[14,14] parameter(2) x_f32 = f32[14,31] convert(x) y_f32 = f32[31,14] convert(y) x_scale = f32[] parameter(3) y_scale = f32[] parameter(4) x_scale_bcast = f32[14,31] broadcast(x_scale), dimensions={} y_scale_bcast = f32[31,14] broadcast(y_scale), dimensions={} x_unscaled = f32[14,31] multiply(x_f32, x_scale_bcast) y_unscaled = f32[31,14] multiply(y_f32, y_scale_bcast) dot_a = f32[14,14] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT out = add(dot_a, b) } )"; CheckFp8IfSupported(hlo_text); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[14,31], {{.*}}: <<F8E4M3>>[31,14], {{.*}}: f32[14,14], {{.*}}: f32[], {{.*}}: f32[]) -> f32[14,14] { ; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[14,31]{1,0} parameter(0) ; CHECK-NEXT: [[C0:%[^ ]+]] = <<F8E4M3>>[] constant(0) ; CHECK-NEXT: [[P0_PADDED:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} pad([[P0]], [[C0]]), padding=0_2x0_1 ; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[31,14]{1,0} parameter(1) ; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[14,31]{1,0} transpose([[P1]]), dimensions={1,0} ; CHECK-NEXT: [[C1:%[^ ]+]] = <<F8E4M3>>[] constant(0) ; CHECK-NEXT: [[P1_TRANSPOSE_PADDED:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} pad([[P1_TRANSPOSE]], [[C1]]), padding=0_2x0_1 ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[14,14]{1,0} parameter(2) ; CHECK-NEXT: [[C2:%[^ ]+]] = f32[] constant(0) ; CHECK-NEXT: [[P2_PADDED:%[^ ]+]] = f32[16,16]{1,0} pad([[P2]], [[C2]]), padding=0_2x0_2 ; CHECK-NEXT: [[P3:%[^ ]+]] = f32[] parameter(3) ; CHECK-NEXT: [[P4:%[^ ]+]] = f32[] parameter(4) ; CHECK-NEXT: [[DOT_TUPLE:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0_PADDED]], [[P1_TRANSPOSE_PADDED]], [[P2_PADDED]], [[P3]], [[P4]]), ; CHECK: custom_call_target="__cublas$lt$matmul$f8", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":1 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } ; CHECK: [[DOT:%[^ ]+]] = f32[16,16]{1,0} get-tuple-element([[DOT_TUPLE]]), index=0 ; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = f32[14,14]{1,0} slice([[DOT]]), slice={[0:14], [0:14]} )"); } TEST_P(ParameterizedFp8GemmRewriteTest, UnscaledABScaledDF8) { const char* hlo_text = R"( HloModule test ENTRY test { x = <<F8E4M3>>[16,32] parameter(0) y = <<F8E4M3>>[32,16] parameter(1) z_scale = f32[] parameter(2) z_scale_bcast = f32[16,16] broadcast(z_scale), dimensions={} dot_a = f32[16,16] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} dot_a_scaled = f32[16,16] divide(dot_a, z_scale_bcast) c1 = f32[] constant(-448.) c1_bcast = f32[16,16] broadcast(c1), dimensions={} c2 = f32[] constant(448.) c2_bcast = f32[16,16] broadcast(c2), dimensions={} dot_a_clamped = f32[16,16] clamp(c1_bcast, dot_a_scaled, c2_bcast) ROOT dot_a_f8 = <<F8E4M3>>[16,16] convert(dot_a_clamped) } )"; CheckFp8IfSupported(hlo_text, ErrorSpec{1e-2, 1e-1}); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: f32[]) -> <<F8E4M3>>[16,16] { ; CHECK: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1) ; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0} ; CHECK-NEXT: [[C0:%[^ ]+]] = f32[] constant(1) ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[] parameter(2) ; CHECK-NEXT: [[P2_INV:%[^ ]+]] = f32[] divide([[C0]], [[P2]]) ; CHECK-NEXT: [[C1:%[^ ]+]] = f32[] constant(1) ; CHECK-NEXT: [[C2:%[^ ]+]] = f32[] constant(1) ; CHECK-PTX-NEXT: [[OUT:%[^ ]+]] = (<<F8E4M3>>[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[P2_INV]], [[C1]], [[C2]]), ; CHECK-GCN-NEXT: [[OUT:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[P2_INV]], [[C1]], [[C2]]), ; CHECK: custom_call_target="__cublas$lt$matmul$f8", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_P(ParameterizedFp8GemmRewriteTest, UnscaledABScaledF32DF8) { const char* hlo_text = R"( HloModule test ENTRY test { x = <<F8E4M3>>[16,32] parameter(0) y = <<F8E4M3>>[32,16] parameter(1) z_scale = f32[] parameter(2) z_scale_bcast = f32[16,16] broadcast(z_scale), dimensions={} dot_a = f32[16,16] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT dot_a_scaled = f32[16,16] divide(dot_a, z_scale_bcast) } )"; CheckFp8IfSupported(hlo_text, ErrorSpec{1e-2, 1e-1}); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: f32[]) -> f32[16,16] { ; CHECK: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1) ; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0} ; CHECK-NEXT: [[C0:%[^ ]+]] = f32[] constant(1) ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[] parameter(2) ; CHECK-NEXT: [[P2_INV:%[^ ]+]] = f32[] divide([[C0]], [[P2]]) ; CHECK-NEXT: [[C1:%[^ ]+]] = f32[] constant(1) ; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[P2_INV]], [[C1]]), ; CHECK: custom_call_target="__cublas$lt$matmul$f8", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_P(ParameterizedFp8GemmRewriteTest, UnscaledABInvScaledF32DF8) { const char* hlo_text = R"( HloModule test ENTRY test { x = <<F8E4M3>>[16,32] parameter(0) y = <<F8E4M3>>[32,16] parameter(1) z_scale = f32[] parameter(2) z_scale_bcast = f32[16,16] broadcast(z_scale), dimensions={} dot_a = f32[16,16] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT dot_a_scaled = f32[16,16] multiply(dot_a, z_scale_bcast) } )"; CheckFp8IfSupported(hlo_text, ErrorSpec{1e-2, 1e-1}); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: f32[]) -> f32[16,16] { ; CHECK: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1) ; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0} ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[] parameter(2) ; CHECK-NEXT: [[C0:%[^ ]+]] = f32[] constant(1) ; CHECK-NEXT: [[OUT:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[P2]], [[C0]]), ; CHECK: custom_call_target="__cublas$lt$matmul$f8", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_P(ParameterizedFp8GemmRewriteTest, UnscaledABScaledF32DMatrixBiasF8) { const char* hlo_text = R"( HloModule test ENTRY test { x = <<F8E4M3>>[16,32] parameter(0) y = <<F8E4M3>>[32,16] parameter(1) b = f32[16,16] parameter(2) z_scale = f32[] parameter(3) z_scale_bcast = f32[16,16] broadcast(z_scale), dimensions={} dot_a = f32[16,16] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} dot_a_bias = f32[16,16] add(dot_a, b) ROOT dot_a_scaled = f32[16,16] divide(dot_a_bias, z_scale_bcast) } )"; CheckFp8IfSupported(hlo_text, ErrorSpec{1e-2, 1e-1}); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: f32[]) -> f32[16,16] { ; CHECK: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1) ; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0} ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[16,16]{1,0} parameter(2) ; CHECK-NEXT: [[C0:%[^ ]+]] = f32[] constant(1) ; CHECK-NEXT: [[GEMM_TUPLE:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[P2]], [[C0]], [[C0]]), ; CHECK: custom_call_target="__cublas$lt$matmul$f8", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":1 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK-PTX-NEXT: [[GEMM:%[^ ]+]] = f32[16,16]{1,0} get-tuple-element([[GEMM_TUPLE]]), index=0 ; CHECK-PTX-NEXT: [[P3:%[^ ]+]] = f32[] parameter(3) ; CHECK-PTX-NEXT: [[P3_BCAST:%[^ ]+]] = f32[16,16]{1,0} broadcast([[P3]]), dimensions={} ; CHECK-PTX-NEXT: ROOT [[OUT:%[^ ]+]] = f32[16,16]{1,0} divide([[GEMM]], [[P3_BCAST]]) ; CHECK: } )"); } TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABScaledDF8) { const char* hlo_text = R"( HloModule test ENTRY test { x = <<F8E4M3>>[16,32] parameter(0) y = <<F8E4M3>>[32,16] parameter(1) x_f32 = f32[16,32] convert(x) y_f32 = f32[32,16] convert(y) x_scale = f32[] parameter(2) y_scale = f32[] parameter(3) z_scale = f32[] parameter(4) x_scale_bcast = f32[16,32] broadcast(x_scale), dimensions={} y_scale_bcast = f32[32,16] broadcast(y_scale), dimensions={} z_scale_bcast = f32[16,16] broadcast(z_scale), dimensions={} x_unscaled = f32[16,32] multiply(x_f32, x_scale_bcast) y_unscaled = f32[32,16] multiply(y_f32, y_scale_bcast) dot_a = f32[16,16] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0} dot_a_scaled = f32[16,16] divide(dot_a, z_scale_bcast) c1 = f32[] constant(-<<F8E4M3_AMAX>>) c1_bcast = f32[16,16] broadcast(c1), dimensions={} c2 = f32[] constant(<<F8E4M3_AMAX>>) c2_bcast = f32[16,16] broadcast(c2), dimensions={} dot_a_clamped = f32[16,16] clamp(c1_bcast, dot_a_scaled, c2_bcast) ROOT dot_a_f8 = <<F8E4M3>>[16,16] convert(dot_a_clamped) } )"; CheckFp8IfSupported(hlo_text); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: f32[], {{.*}}: f32[], {{.*}}: f32[]) -> <<F8E4M3>>[16,16] { ; CHECK: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1) ; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0} ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[] parameter(2) ; CHECK-NEXT: [[P3:%[^ ]+]] = f32[] parameter(3) ; CHECK-PTX-NEXT: [[C2:%[^ ]+]] = f32[] constant(1) ; CHECK-PTX-NEXT: [[P4:%[^ ]+]] = f32[] parameter(4) ; CHECK-PTX-NEXT: [[P4_INV:%[^ ]+]] = f32[] divide([[C2]], [[P4]]) ; CHECK-PTX-NEXT: [[OUT:%[^ ]+]] = (<<F8E4M3>>[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[P2]], [[P3]], [[P4_INV]]), ; CHECK-GCN-NEXT: [[C1:%[^ ]+]] = f32[] constant(1) ; CHECK-GCN-NEXT: [[OUT:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[P2]], [[P3]], [[C1]]), ; CHECK: custom_call_target="__cublas$lt$matmul$f8", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABInvScaledDF8) { const char* hlo_text = R"( HloModule test ENTRY test { x = <<F8E4M3>>[16,32] parameter(0) y = <<F8E4M3>>[32,16] parameter(1) x_f32 = f32[16,32] convert(x) y_f32 = f32[32,16] convert(y) x_scale = f32[] parameter(2) y_scale = f32[] parameter(3) z_scale = f32[] parameter(4) x_scale_bcast = f32[16,32] broadcast(x_scale), dimensions={} y_scale_bcast = f32[32,16] broadcast(y_scale), dimensions={} z_scale_bcast = f32[16,16] broadcast(z_scale), dimensions={} x_unscaled = f32[16,32] multiply(x_f32, x_scale_bcast) y_unscaled = f32[32,16] multiply(y_f32, y_scale_bcast) dot_a = f32[16,16] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0} dot_a_scaled = f32[16,16] multiply(dot_a, z_scale_bcast) c1 = f32[] constant(-<<F8E4M3_AMAX>>) c1_bcast = f32[16,16] broadcast(c1), dimensions={} c2 = f32[] constant(<<F8E4M3_AMAX>>) c2_bcast = f32[16,16] broadcast(c2), dimensions={} dot_a_clamped = f32[16,16] clamp(c1_bcast, dot_a_scaled, c2_bcast) ROOT dot_a_f8 = <<F8E4M3>>[16,16] convert(dot_a_clamped) } )"; CheckFp8IfSupported(hlo_text); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK-NOT: divide ; CHECK: custom_call_target="__cublas$lt$matmul$f8", )"); } TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABScaledDReluActivationF8) { const char* hlo_text = R"( HloModule test ENTRY test { x = <<F8E4M3>>[16,32] parameter(0) y = <<F8E4M3>>[32,16] parameter(1) x_f32 = f32[16,32] convert(x) y_f32 = f32[32,16] convert(y) x_scale = f32[] parameter(2) y_scale = f32[] parameter(3) z_scale = f32[] parameter(4) x_scale_bcast = f32[16,32] broadcast(x_scale), dimensions={} y_scale_bcast = f32[32,16] broadcast(y_scale), dimensions={} z_scale_bcast = f32[16,16] broadcast(z_scale), dimensions={} x_unscaled = f32[16,32] multiply(x_f32, x_scale_bcast) y_unscaled = f32[32,16] multiply(y_f32, y_scale_bcast) c = f32[] constant(0) c_bcast = f32[16,16] broadcast(c), dimensions={} dot_a = f32[16,16] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0} relu_a = f32[16,16] maximum(dot_a, c_bcast) relu_a_scaled = f32[16,16] divide(relu_a, z_scale_bcast) c1 = f32[] constant(-<<F8E4M3_AMAX>>) c1_bcast = f32[16,16] broadcast(c1), dimensions={} c2 = f32[] constant(<<F8E4M3_AMAX>>) c2_bcast = f32[16,16] broadcast(c2), dimensions={} relu_a_clamped = f32[16,16] clamp(c1_bcast, relu_a_scaled, c2_bcast) ROOT out = <<F8E4M3>>[16,16] convert(relu_a_clamped) } )"; CheckFp8IfSupported(hlo_text); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: f32[], {{.*}}: f32[], {{.*}}: f32[]) -> <<F8E4M3>>[16,16] { ; CHECK: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1) ; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0} ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[] parameter(2) ; CHECK-NEXT: [[P3:%[^ ]+]] = f32[] parameter(3) ; CHECK-PTX-NEXT: [[C2:%[^ ]+]] = f32[] constant(1) ; CHECK-PTX-NEXT: [[P4:%[^ ]+]] = f32[] parameter(4) ; CHECK-PTX-NEXT: [[P4_INV:%[^ ]+]] = f32[] divide([[C2]], [[P4]]) ; CHECK-PTX-NEXT: [[OUT:%[^ ]+]] = (<<F8E4M3>>[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[P2]], [[P3]], [[P4_INV]]), ; CHECK-CGN-NEXT: [[C1:%[^ ]+]] = f32[] constant(1) ; CHECK-GCN-NEXT: [[OUT:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[P2]], [[P3]], [[C1]]), ; CHECK: custom_call_target="__cublas$lt$matmul$f8", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"RELU" ; CHECK: } )"); } TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABScaledDMatrixBiasWithDAmaxF8) { const char* hlo_text = R"( HloModule test apply { a = f16[] parameter(0) b = f16[] parameter(1) ROOT c = f16[] maximum(a, b) } ENTRY test { x = <<F8E4M3>>[16,32] parameter(0) y = <<F8E4M3>>[32,16] parameter(1) x_f16 = f16[16,32] convert(x) y_f16 = f16[32,16] convert(y) b = f16[16,16] parameter(2) one = f16[] constant(1) ones = f16[16,16] broadcast(one), dimensions={} b_ones = f16[16,16] add(b, ones) x_scale = f16[] parameter(3) y_scale = f16[] parameter(4) z_scale = f16[] parameter(5) x_scale_bcast = f16[16,32] broadcast(x_scale), dimensions={} y_scale_bcast = f16[32,16] broadcast(y_scale), dimensions={} z_scale_bcast = f16[16,16] broadcast(z_scale), dimensions={} x_unscaled = f16[16,32] multiply(x_f16, x_scale_bcast) y_unscaled = f16[32,16] multiply(y_f16, y_scale_bcast) dot_a = f16[16,16] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0} dot_a_bias = f16[16,16] add(dot_a, b_ones) abs_dot_a = f16[16,16] abs(dot_a_bias) c0 = f16[] constant(-inf) amax = f16[] reduce(abs_dot_a, c0), dimensions={0,1}, to_apply=apply dot_a_scaled = f16[16,16] divide(dot_a_bias, z_scale_bcast) c1 = f16[] constant(-<<F8E4M3_AMAX>>) c1_bcast = f16[16,16] broadcast(c1), dimensions={} c2 = f16[] constant(<<F8E4M3_AMAX>>) c2_bcast = f16[16,16] broadcast(c2), dimensions={} dot_a_clamped = f16[16,16] clamp(c1_bcast, dot_a_scaled, c2_bcast) dot_a_f8 = <<F8E4M3>>[16,16] convert(dot_a_clamped) ROOT result = (<<F8E4M3>>[16,16], f16[]) tuple(dot_a_f8, amax) } )"; CheckFp8IfSupported(hlo_text, ErrorSpec{0.1, 0.1}); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: f16[16,16], {{.*}}: f16[], {{.*}}: f16[], {{.*}}: f16[]) -> (<<F8E4M3>>[16,16], f16[]) { ; CHECK: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1) ; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0} ; CHECK: [[C0:%[^ ]+]] = f16[16,16]{1,0} add({{.*}}) ; CHECK-NEXT: [[P2:%[^ ]+]] = f16[] parameter(3) ; CHECK: [[P3:%[^ ]+]] = f16[] parameter(4) ; CHECK-PTX: [[P4:%[^ ]+]] = f16[] parameter(5) ; CHECK-PTX: [[OUT:%[^ ]+]] = (<<F8E4M3>>[16,16]{1,0}, f32[], s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[C0]], [[DUMMY0:%[^ ]+]], [[DUMMY1:%[^ ]+]], [[DUMMY2:%[^ ]+]]), ; CHECK-NOT: output_to_operand_aliasing ; CHECK-GCN: [[OUT:%[^ ]+]] = (f16[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[C0]], [[DUMMY0:%[^ ]+]], [[DUMMY1:%[^ ]+]], [[DUMMY2:%[^ ]+]]), ; CHECK: custom_call_target="__cublas$lt$matmul$f8", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":1 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABScaledDVectorBiasF8) { const char* hlo_text = R"( HloModule test ENTRY test { x = <<F8E4M3>>[16,32] parameter(0) y = <<F8E4M3>>[32,16] parameter(1) x_f16 = f16[16,32] convert(x) y_f16 = f16[32,16] convert(y) b = f16[16] parameter(2) b_bcast = f16[16,16] broadcast(b), dimensions={1} x_scale = f16[] parameter(3) y_scale = f16[] parameter(4) z_scale = f16[] parameter(5) x_scale_bcast = f16[16,32] broadcast(x_scale), dimensions={} y_scale_bcast = f16[32,16] broadcast(y_scale), dimensions={} z_scale_bcast = f16[16,16] broadcast(z_scale), dimensions={} x_unscaled = f16[16,32] multiply(x_f16, x_scale_bcast) y_unscaled = f16[32,16] multiply(y_f16, y_scale_bcast) dot_a = f16[16,16] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0} dot_a_bias = f16[16,16] add(dot_a, b_bcast) dot_a_scaled = f16[16,16] divide(dot_a_bias, z_scale_bcast) c1 = f16[] constant(-<<F8E4M3_AMAX>>) c1_bcast = f16[16,16] broadcast(c1), dimensions={} c2 = f16[] constant(<<F8E4M3_AMAX>>) c2_bcast = f16[16,16] broadcast(c2), dimensions={} dot_a_clamped = f16[16,16] clamp(c1_bcast, dot_a_scaled, c2_bcast) ROOT dot_a_f8 = <<F8E4M3>>[16,16] convert(dot_a_clamped) } )"; CheckFp8IfSupported(hlo_text, ErrorSpec{0.1, 0.1}); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: f16[16], {{.*}}: f16[], {{.*}}: f16[], {{.*}}: f16[]) -> <<F8E4M3>>[16,16] { ; CHECK: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1) ; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0} ; CHECK-NEXT: [[P2:%[^ ]+]] = f16[] parameter(3) ; CHECK-NEXT: [[CV:%[^ ]+]] = f32[] convert([[P2]]) ; CHECK-NEXT: [[P3:%[^ ]+]] = f16[] parameter(4) ; CHECK-NEXT: [[CV1:%[^ ]+]] = f32[] convert([[P3]]) ; CHECK-NEXT: [[VB:%[^ ]+]] = f16[16]{0} parameter(2) ; CHECK-PTX-NEXT: [[C2:%[^ ]+]] = f16[] constant(1) ; CHECK-PTX-NEXT: [[P4:%[^ ]+]] = f16[] parameter(5) ; CHECK-PTX-NEXT: [[DV:%[^ ]+]] = f16[] divide([[C2]], [[P4]]) ; CHECK-PTX-NEXT: [[CV2:%[^ ]+]] = f32[] convert([[DV]]) ; CHECK-PTX: [[OUT:%[^ ]+]] = (<<F8E4M3>>[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[CV]], [[CV1]], [[VB]], [[CV2]]), ; CHECK-GCN: [[C:%[^ ]+]] = f32[] constant(1) ; CHECK-GCN: [[OUT:%[^ ]+]] = (f16[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[CV]], [[CV1]], [[C]], [[C]], [[VB]]), ; CHECK: custom_call_target="__cublas$lt$matmul$f8", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"BIAS" ; CHECK: } )"); } TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABUnscaledDF32VectorBiasF8) { const char* hlo_text = R"( HloModule test ENTRY test { x = <<F8E4M3>>[16,32] parameter(0) y = <<F8E4M3>>[32,16] parameter(1) x_f32 = f32[16,32] convert(x) y_f32 = f32[32,16] convert(y) b = f32[16] parameter(2) b_bf16 = bf16[16] convert(b) b_f32 = f32[16] convert(b_bf16) b_bcast = f32[16,16] broadcast(b_f32), dimensions={1} x_scale = f32[] parameter(3) y_scale = f32[] parameter(4) x_scale_bcast = f32[16,32] broadcast(x_scale), dimensions={} y_scale_bcast = f32[32,16] broadcast(y_scale), dimensions={} x_unscaled = f32[16,32] multiply(x_f32, x_scale_bcast) y_unscaled = f32[32,16] multiply(y_f32, y_scale_bcast) dot_a = f32[16,16] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT out = f32[16,16] add(dot_a, b_bcast) } )"; CheckFp8IfSupported(hlo_text); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: f32[16], {{.*}}: f32[], {{.*}}: f32[]) -> f32[16,16] { ; CHECK: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1) ; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0} ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[] parameter(3) ; CHECK-NEXT: [[P3:%[^ ]+]] = f32[] parameter(4) ; CHECK-NEXT: [[VB:%[^ ]+]] = f32[16]{0} parameter(2) ; CHECK-NEXT: [[VBC:%[^ ]+]] = bf16[16]{0} convert([[VB]]) ; CHECK: [[OUT:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[P2]], [[P3]], [[VBC]]), ; CHECK: custom_call_target="__cublas$lt$matmul$f8", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"BIAS" ; CHECK: } )"); } TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABUnscaledDVectorBiasThenReluActivationF8) { const char* hlo_text = R"( HloModule test ENTRY test { x = <<F8E4M3>>[16,32] parameter(0) y = <<F8E4M3>>[32,16] parameter(1) b = f16[16] parameter(2) b_bcast = f16[16,16] broadcast(b), dimensions={1} x_f32 = f16[16,32] convert(x) y_f32 = f16[32,16] convert(y) x_scale = f16[] parameter(3) y_scale = f16[] parameter(4) x_scale_bcast = f16[16,32] broadcast(x_scale), dimensions={} y_scale_bcast = f16[32,16] broadcast(y_scale), dimensions={} x_unscaled = f16[16,32] multiply(x_f32, x_scale_bcast) y_unscaled = f16[32,16] multiply(y_f32, y_scale_bcast) c = f16[] constant(0) c_bcast = f16[16,16] broadcast(c), dimensions={} dot_a0 = f16[16,16] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0} dot_a = f16[16,16] add(dot_a0, b_bcast) ROOT out = f16[16,16] maximum(dot_a, c_bcast) } )"; CheckFp8IfSupported(hlo_text, ErrorSpec{2e-3, 0.}); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: f16[16], {{.*}}: f16[], {{.*}}: f16[]) -> f16[16,16] { ; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1) ; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0} ; CHECK-NEXT: [[P2:%[^ ]+]] = f16[] parameter(3) ; CHECK-NEXT: [[CV:%[^ ]+]] = f32[] convert([[P2]]) ; CHECK-NEXT: [[P3:%[^ ]+]] = f16[] parameter(4) ; CHECK-NEXT: [[CV1:%[^ ]+]] = f32[] convert([[P3]]) ; CHECK-NEXT: [[VB:%[^ ]+]] = f16[16]{0} parameter(2) ; CHECK : ROOT [[OUT:%[^ ]+]] = f16[16,16]{1,0} custom-call([[P0]], [[P1_TRANSPOSE]], [[CV]], [[CV1]], [[VB]]), ; CHECK: custom_call_target="__cublas$lt$matmul$f8", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"BIAS_RELU" ; CHECK: } )"); } TEST_P(ParameterizedFp8GemmRewriteTest, Rank3ScaledABUnscaledDVectorBiasF8) { const char* hlo_text = R"( HloModule test ENTRY test { x = <<F8E4M3>>[4,16,16] parameter(0) y = <<F8E4M3>>[16,32] parameter(1) b = f32[32] parameter(2) b_f16 = f16[32] convert(b) b_bcast = f16[4,16,32] broadcast(b_f16), dimensions={2} x_f16 = f16[4,16,16] convert(x) y_f16 = f16[16,32] convert(y) x_scale = f16[] parameter(3) y_scale = f16[] parameter(4) x_scale_bcast = f16[4,16,16] broadcast(x_scale), dimensions={} y_scale_bcast = f16[16,32] broadcast(y_scale), dimensions={} x_unscaled = f16[4,16,16] multiply(x_f16, x_scale_bcast) x_unscaled_bitcast = f16[64,16] bitcast(x_unscaled) y_unscaled = f16[16,32] multiply(y_f16, y_scale_bcast) dot_a = f16[64,32] dot(x_unscaled_bitcast, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0} dot_a_bitcast = f16[4,16,32]{2,1,0} bitcast(dot_a) ROOT out = f16[4,16,32] add(dot_a_bitcast, b_bcast) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); GemmRewriter pass(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(m::Bitcast(m::GetTupleElement( m::CustomCall({"__cublas$lt$matmul$f8"}), 0) .WithShape(F16, {64, 32})) .WithShape(F16, {4, 16, 32}))); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[4,16,16], {{.*}}: <<F8E4M3>>[16,32], {{.*}}: f32[32], {{.*}}: f16[], {{.*}}: f16[]) -> f16[4,16,32] { ; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[4,16,16]{2,1,0} parameter(0) ; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = <<F8E4M3>>[64,16]{1,0} bitcast([[P0]]) ; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(1) ; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} transpose([[P1]]), dimensions={1,0} ; CHECK-NEXT: [[P2:%[^ ]+]] = f16[] parameter(3) ; CHECK-NEXT: [[P2_CV:%[^ ]+]] = f32[] convert([[P2]]) ; CHECK-NEXT: [[P3:%[^ ]+]] = f16[] parameter(4) ; CHECK-NEXT: [[P3_CV:%[^ ]+]] = f32[] convert([[P3]]) ; CHECK-NEXT: [[B:%[^ ]+]] = f32[32]{0} parameter(2) ; CHECK-NEXT: [[B_F16:%[^ ]+]] = f16[32]{0} convert([[B]]) ; CHECK-NEXT: [[GEMM_TUPLE:%[^ ]+]] = (f16[64,32]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0_BITCAST]], [[P1_TRANSPOSE]], [[P2_CV]], [[P3_CV]], [[B_F16]]), ; CHECK: custom_call_target="__cublas$lt$matmul$f8", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"BIAS" ; CHECK: } ; CHECK: [[GEMM:%[^ ]+]] = f16[64,32]{1,0} get-tuple-element([[GEMM_TUPLE]]), index=0 ; CHECK: ROOT [[OUT:%[^ ]+]] = f16[4,16,32]{2,1,0} bitcast([[GEMM]]) )"); } TEST_P(ParameterizedFp8GemmRewriteTest, Rank3ScaledABUnscaledDVectorBiasPaddedF8) { const char* hlo_text = R"( HloModule test ENTRY test { x = <<F8E4M3>>[4,15,15] parameter(0) y = <<F8E4M3>>[15,31] parameter(1) b = f32[31] parameter(2) b_f16 = f16[31] convert(b) b_bcast = f16[4,15,31] broadcast(b_f16), dimensions={2} x_f16 = f16[4,15,15] convert(x) y_f16 = f16[15,31] convert(y) x_scale = f16[] parameter(3) y_scale = f16[] parameter(4) x_scale_bcast = f16[4,15,15] broadcast(x_scale), dimensions={} y_scale_bcast = f16[15,31] broadcast(y_scale), dimensions={} x_unscaled = f16[4,15,15] multiply(x_f16, x_scale_bcast) x_unscaled_bitcast = f16[60,15] bitcast(x_unscaled) y_unscaled = f16[15,31] multiply(y_f16, y_scale_bcast) dot_a = f16[60,31] dot(x_unscaled_bitcast, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0} dot_a_bitcast = f16[4,15,31]{2,1,0} bitcast(dot_a) ROOT out = f16[4,15,31] add(dot_a_bitcast, b_bcast) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); GemmRewriter pass(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch( m::Bitcast(m::Slice(m::GetTupleElement( m::CustomCall({"__cublas$lt$matmul$f8"}), 0) .WithShape(F16, {64, 32})) .WithShape(F16, {60, 31})) .WithShape(F16, {4, 15, 31}))); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[4,15,15], {{.*}}: <<F8E4M3>>[15,31], {{.*}}: f32[31], {{.*}}: f16[], {{.*}}: f16[]) -> f16[4,15,31] { ; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[4,15,15]{2,1,0} parameter(0) ; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = <<F8E4M3>>[60,15]{1,0} bitcast([[P0]]) ; CHECK-NEXT: [[C1:%[^ ]+]] = <<F8E4M3>>[] constant(0) ; CHECK-NEXT: [[P0_PAD:%[^ ]+]] = <<F8E4M3>>[64,16]{1,0} pad([[P0_BITCAST]], [[C1]]), padding=0_4x0_1 ; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[15,31]{1,0} parameter(1) ; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[31,15]{1,0} transpose([[P1]]), dimensions={1,0} ; CHECK-NEXT: [[C2:%[^ ]+]] = <<F8E4M3>>[] constant(0) ; CHECK-NEXT: [[P1_PAD:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} pad([[P1_TRANSPOSE]], [[C2]]), padding=0_1x0_1 ; CHECK-NEXT: [[P2:%[^ ]+]] = f16[] parameter(3) ; CHECK-NEXT: [[P2_CV:%[^ ]+]] = f32[] convert([[P2]]) ; CHECK-NEXT: [[P3:%[^ ]+]] = f16[] parameter(4) ; CHECK-NEXT: [[P3_CV:%[^ ]+]] = f32[] convert([[P3]]) ; CHECK-NEXT: [[B:%[^ ]+]] = f32[31]{0} parameter(2) ; CHECK-NEXT: [[B_F16:%[^ ]+]] = f16[31]{0} convert([[B]]) ; CHECK-NEXT: [[C3:%[^ ]+]] = f16[] constant(0) ; CHECK-NEXT: [[P2_PAD:%[^ ]+]] = f16[32]{0} pad([[B_F16]], [[C3]]), padding=0_1 ; CHECK-NEXT: [[GEMM_TUPLE:%[^ ]+]] = (f16[64,32]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0_PAD]], [[P1_PAD]], [[P2_CV]], [[P3_CV]], [[P2_PAD]]), ; CHECK: custom_call_target="__cublas$lt$matmul$f8", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"BIAS" ; CHECK: } ; CHECK: [[GEMM:%[^ ]+]] = f16[64,32]{1,0} get-tuple-element([[GEMM_TUPLE]]), index=0 ; CHECK-NEXT: [[SLICE:%[^ ]+]] = f16[60,31]{1,0} slice([[GEMM]]), slice={[0:60], [0:31]} ; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = f16[4,15,31]{2,1,0} bitcast([[SLICE]]) )"); } TEST_P(ParameterizedFp8GemmRewriteTest, Rank3ScaledABUnscaledDMatrixBiasF8) { const char* hlo_text = R"( HloModule test ENTRY test { x = <<F8E4M3>>[4,16,16] parameter(0) y = <<F8E4M3>>[16,32] parameter(1) b = f32[4,16,32] parameter(2) x_f32 = f32[4,16,16] convert(x) y_f32 = f32[16,32] convert(y) x_scale = f32[] parameter(3) y_scale = f32[] parameter(4) x_scale_bcast = f32[4,16,16] broadcast(x_scale), dimensions={} y_scale_bcast = f32[16,32] broadcast(y_scale), dimensions={} x_unscaled = f32[4,16,16] multiply(x_f32, x_scale_bcast) x_unscaled_bitcast = f32[64,16] bitcast(x_unscaled) y_unscaled = f32[16,32] multiply(y_f32, y_scale_bcast) dot_a = f32[64,32] dot(x_unscaled_bitcast, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0} dot_a_bitcast = f32[4,16,32]{2,1,0} bitcast(dot_a) ROOT out = f32[4,16,32] add(dot_a_bitcast, b) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); GemmRewriter pass(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(m::Bitcast(m::GetTupleElement( m::CustomCall({"__cublas$lt$matmul$f8"}), 0) .WithShape(F32, {64, 32})) .WithShape(F32, {4, 16, 32}))); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[4,16,16], {{.*}}: <<F8E4M3>>[16,32], {{.*}}: f32[4,16,32], {{.*}}: f32[], {{.*}}: f32[]) -> f32[4,16,32] { ; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[4,16,16]{2,1,0} parameter(0) ; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = <<F8E4M3>>[64,16]{1,0} bitcast([[P0]]) ; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(1) ; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} transpose([[P1]]), dimensions={1,0} ; CHECK-NEXT: [[B:%[^ ]+]] = f32[4,16,32]{2,1,0} parameter(2) ; CHECK-NEXT: [[B_BITCAST:%[^ ]+]] = f32[64,32]{1,0} bitcast([[B]]) ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[] parameter(3) ; CHECK-NEXT: [[P3:%[^ ]+]] = f32[] parameter(4) ; CHECK-NEXT: [[GEMM_TUPLE:%[^ ]+]] = (f32[64,32]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0_BITCAST]], [[P1_TRANSPOSE]], [[B_BITCAST]], [[P2]], [[P3]]), ; CHECK: custom_call_target="__cublas$lt$matmul$f8", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":1 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } ; CHECK: [[GEMM:%[^ ]+]] = f32[64,32]{1,0} get-tuple-element([[GEMM_TUPLE]]), index=0 ; CHECK: ROOT [[OUT:%[^ ]+]] = f32[4,16,32]{2,1,0} bitcast([[GEMM]]) )"); } TEST_P(ParameterizedFp8GemmRewriteTest, Rank3ScaledABUnscaledDMatrixBiasPaddedF8) { const char* hlo_text = R"( HloModule test ENTRY test { x = <<F8E4M3>>[3,15,15] parameter(0) y = <<F8E4M3>>[15,31] parameter(1) b = f32[3,15,31] parameter(2) x_f32 = f32[3,15,15] convert(x) y_f32 = f32[15,31] convert(y) x_scale = f32[] parameter(3) y_scale = f32[] parameter(4) x_scale_bcast = f32[3,15,15] broadcast(x_scale), dimensions={} y_scale_bcast = f32[15,31] broadcast(y_scale), dimensions={} x_unscaled = f32[3,15,15] multiply(x_f32, x_scale_bcast) x_unscaled_bitcast = f32[45,15] bitcast(x_unscaled) y_unscaled = f32[15,31] multiply(y_f32, y_scale_bcast) dot_a = f32[45,31] dot(x_unscaled_bitcast, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0} dot_a_bitcast = f32[3,15,31]{2,1,0} bitcast(dot_a) ROOT out = f32[3,15,31] add(dot_a_bitcast, b) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); GemmRewriter pass(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_TRUE(changed); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch( m::Bitcast(m::Slice(m::GetTupleElement( m::CustomCall({"__cublas$lt$matmul$f8"}), 0) .WithShape(F32, {48, 32})) .WithShape(F32, {45, 31})) .WithShape(F32, {3, 15, 31}))); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[3,15,15], {{.*}}: <<F8E4M3>>[15,31], {{.*}}: f32[3,15,31], {{.*}}: f32[], {{.*}}: f32[]) -> f32[3,15,31] { ; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[3,15,15]{2,1,0} parameter(0) ; CHECK-NEXT: [[P0_BITCAST:%[^ ]+]] = <<F8E4M3>>[45,15]{1,0} bitcast([[P0]]) ; CHECK-NEXT: [[C1:%[^ ]+]] = <<F8E4M3>>[] constant(0) ; CHECK-NEXT: [[P0_PADDED:%[^ ]+]] = <<F8E4M3>>[48,16]{1,0} pad([[P0_BITCAST]], [[C1]]), padding=0_3x0_1 ; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[15,31]{1,0} parameter(1) ; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[31,15]{1,0} transpose([[P1]]), dimensions={1,0} ; CHECK-NEXT: [[C2:%[^ ]+]] = <<F8E4M3>>[] constant(0) ; CHECK-NEXT: [[P1_PADDED:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} pad([[P1_TRANSPOSE]], [[C2]]), padding=0_1x0_1 ; CHECK-NEXT: [[B:%[^ ]+]] = f32[3,15,31]{2,1,0} parameter(2) ; CHECK-NEXT: [[B_BITCAST:%[^ ]+]] = f32[45,31]{1,0} bitcast([[B]]) ; CHECK-NEXT: [[C3:%[^ ]+]] = f32[] constant(0) ; CHECK-NEXT: [[P2_PADDED:%[^ ]+]] = f32[48,32]{1,0} pad([[B_BITCAST]], [[C3]]), padding=0_3x0_1 ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[] parameter(3) ; CHECK-NEXT: [[P3:%[^ ]+]] = f32[] parameter(4) ; CHECK-NEXT: [[GEMM_TUPLE:%[^ ]+]] = (f32[48,32]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0_PADDED]], [[P1_PADDED]], [[P2_PADDED]], [[P2]], [[P3]]), ; CHECK: custom_call_target="__cublas$lt$matmul$f8", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":1 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } ; CHECK-NEXT: [[GEMM:%[^ ]+]] = f32[48,32]{1,0} get-tuple-element([[GEMM_TUPLE]]), index=0 ; CHECK-NEXT: [[SLICE:%[^ ]+]] = f32[45,31]{1,0} slice([[GEMM]]), slice={[0:45], [0:31]} ; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = f32[3,15,31]{2,1,0} bitcast([[SLICE]]) )"); } TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABUnscaledDMatrixBiasWithSliceF8) { const char* hlo_text = R"( HloModule test ENTRY test { x = <<F8E4M3>>[48,16] parameter(0) y = <<F8E4M3>>[16,32] parameter(1) b = f32[32,16] parameter(2) x_f32 = f32[48,16] convert(x) y_f32 = f32[16,32] convert(y) x_scale = f32[] parameter(3) y_scale = f32[] parameter(4) x_scale_bcast = f32[48,16] broadcast(x_scale), dimensions={} y_scale_bcast = f32[16,32] broadcast(y_scale), dimensions={} x_unscaled = f32[48,16] multiply(x_f32, x_scale_bcast) y_unscaled = f32[16,32] multiply(y_f32, y_scale_bcast) dot_a = f32[48,32] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0} dot_a_sliced = f32[32,16] slice(dot_a), slice={[16:48], [16:32]} ROOT out = f32[32,16] add(dot_a_sliced, b) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); GemmRewriter pass(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_TRUE(changed); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[48,16], {{.*}}: <<F8E4M3>>[16,32], {{.*}}: f32[32,16], {{.*}}: f32[], {{.*}}: f32[]) -> f32[32,16] { ; CHECK-NEXT: [[P0:%[^ ]+]] = <<F8E4M3>>[48,16]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(1) ; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} transpose([[P1]]), dimensions={1,0} ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[] parameter(3) ; CHECK-NEXT: [[P3:%[^ ]+]] = f32[] parameter(4) ; CHECK-NEXT: [[GEMM_TUPLE:%[^ ]+]] = (f32[48,32]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[P2]], [[P3]]), ; CHECK: custom_call_target="__cublas$lt$matmul$f8", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } ; CHECK: [[GEMM:%[^_]+]] = f32[48,32]{1,0} get-tuple-element([[GEMM_TUPLE]]), index=0 ; CHECK-NEXT: [[SLICE:%[^ ]+]] = f32[32,16]{1,0} slice([[GEMM]]), slice={[16:48], [16:32]} ; CHECK-NEXT: [[B:%[^ ]+]] = f32[32,16]{1,0} parameter(2) ; CHECK-NEXT: ROOT [[OUT:%[^ ]+]] = f32[32,16]{1,0} add([[SLICE]], [[B]]) )"); } TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABUnscaledDWithAllGatherF8) { absl::string_view hlo_text = R"( HloModule test ENTRY test { x = <<F8E4M3>>[16,32] parameter(0) y = <<F8E4M3>>[16,32] parameter(1) x_f32 = f32[16,32] convert(x) y_f32 = f32[16,32] convert(y) x_scale = f32[] parameter(2) y_scale = f32[] parameter(3) x_scale_bcast = f32[16,32] broadcast(x_scale), dimensions={} y_scale_bcast = f32[16,32] broadcast(y_scale), dimensions={} x_unscaled = f32[16,32] multiply(x_f32, x_scale_bcast) y_unscaled = f32[16,32] multiply(y_f32, y_scale_bcast) all_gather = f32[16,64]{1,0} all-gather(x_unscaled), channel_id=1, replica_groups={{0,1},{2,3},{4,5},{6,7}}, dimensions={1}, use_global_device_ids=true all_gather1 = f32[64,32]{1,0} all-gather(y_unscaled), channel_id=2, replica_groups={{0,2,4,6},{1,3,5,7}}, dimensions={0}, use_global_device_ids=true ROOT dot_a = f32[16,32] dot(all_gather, all_gather1), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; HloModuleConfig config = GetModuleConfigForTest(); config.set_use_spmd_partitioning(true); config.set_num_partitions(8); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[16,32], {{.*}}: f32[], {{.*}}: f32[]) -> f32[16,32] { ; CHECK: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0) ; CHECK: [[AG:%[^ ]+]] = <<F8E4M3>>[16,64]{1,0} all-gather([[P0]]), {{[^ ]+}} ; CHECK: [[P1:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(1) ; CHECK: [[AG1:%[^ ]+]] = <<F8E4M3>>[64,32]{1,0} all-gather([[P1]]), {{[^ ]+}} ; CHECK: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[32,64]{1,0} transpose([[AG1]]), dimensions={1,0} ; CHECK: [[P2:%[^ ]+]] = f32[] parameter(2) ; CHECK: [[P3:%[^ ]+]] = f32[] parameter(3) ; CHECK: [[GEMM_TUPLE:%[^ ]+]] = (f32[16,32]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[AG]], [[P1_TRANSPOSE]], [[P2]], [[P3]]), ; CHECK: custom_call_target="__cublas$lt$matmul$f8", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } ; CHECK: ROOT [[GEMM:%[^_]+]] = f32[16,32]{1,0} get-tuple-element([[GEMM_TUPLE]]), index=0 )", nullptr, &config); } TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABUnscaledDWithAllToAllF8) { absl::string_view hlo_text = R"( HloModule test ENTRY test { x = <<F8E4M3>>[16,32] parameter(0) y = <<F8E4M3>>[16,32] parameter(1) x_f32 = f32[16,32] convert(x) y_f32 = f32[16,32] convert(y) x_scale = f32[] parameter(2) y_scale = f32[] parameter(3) x_scale_bcast = f32[16,32] broadcast(x_scale), dimensions={} y_scale_bcast = f32[16,32] broadcast(y_scale), dimensions={} x_unscaled = f32[16,32] multiply(x_f32, x_scale_bcast) y_unscaled = f32[16,32] multiply(y_f32, y_scale_bcast) all_to_all = f32[16,32]{1,0} all-to-all(x_unscaled), channel_id=1, replica_groups={{0,1,2,3},{4,5,6,7}}, dimensions={0} ROOT dot_a = f32[16,16] dot(all_to_all, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={1} } )"; HloModuleConfig config = GetModuleConfigForTest(); config.set_use_spmd_partitioning(true); config.set_num_partitions(8); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[16,32], {{.*}}: f32[], {{.*}}: f32[]) -> f32[16,16] { ; CHECK: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0) ; CHECK: [[AA:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} all-to-all([[P0]]), {{[^ ]+}} ; CHECK: [[P1:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(1) ; CHECK: [[P2:%[^ ]+]] = f32[] parameter(2) ; CHECK: [[P3:%[^ ]+]] = f32[] parameter(3) ; CHECK: [[GEMM:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[AA]], [[P1]], [[P2]], [[P3]]), ; CHECK: custom_call_target="__cublas$lt$matmul$f8", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )", nullptr, &config); } TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABUnscaledDWithCollectivePermuteF8) { absl::string_view hlo_text = R"( HloModule test ENTRY test { x = <<F8E4M3>>[16,32] parameter(0) y = <<F8E4M3>>[16,32] parameter(1) x_f32 = f32[16,32] convert(x) y_f32 = f32[16,32] convert(y) x_scale = f32[] parameter(2) y_scale = f32[] parameter(3) x_scale_bcast = f32[16,32] broadcast(x_scale), dimensions={} y_scale_bcast = f32[16,32] broadcast(y_scale), dimensions={} x_unscaled = f32[16,32] multiply(x_f32, x_scale_bcast) y_unscaled = f32[16,32] multiply(y_f32, y_scale_bcast) collective_permute = f32[16,32]{1,0} collective-permute(x_unscaled), source_target_pairs={{0,0}, {1,1}, {2,4}, {3,5}, {4,2}, {5,3}, {6,6}, {7,7}} ROOT dot_a = f32[16,16] dot(collective_permute, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={1} } )"; HloModuleConfig config = GetModuleConfigForTest(); config.set_use_spmd_partitioning(true); config.set_num_partitions(8); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[16,32], {{.*}}: f32[], {{.*}}: f32[]) -> f32[16,16] { ; CHECK: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0) ; CHECK: [[AA:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} collective-permute([[P0]]), {{[^ ]+}} ; CHECK: [[P1:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(1) ; CHECK: [[P2:%[^ ]+]] = f32[] parameter(2) ; CHECK: [[P3:%[^ ]+]] = f32[] parameter(3) ; CHECK: [[GEMM:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[AA]], [[P1]], [[P2]], [[P3]]), ; CHECK: custom_call_target="__cublas$lt$matmul$f8", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )", nullptr, &config); } TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABUnscaledDMatrixBiasThenVectorBiasF8) { const char* hlo_text = R"( HloModule test ENTRY test { x = <<F8E4M3>>[16,32] parameter(0) y = <<F8E4M3>>[32,16] parameter(1) x_f16 = f16[16,32] convert(x) y_f16 = f16[32,16] convert(y) b = f16[16] parameter(2) b_bcast = f16[16,16] broadcast(b), dimensions={1} b2 = f16[16,16] parameter(3) x_scale = f16[] parameter(4) y_scale = f16[] parameter(5) x_scale_bcast = f16[16,32] broadcast(x_scale), dimensions={} y_scale_bcast = f16[32,16] broadcast(y_scale), dimensions={} x_unscaled = f16[16,32] multiply(x_f16, x_scale_bcast) y_unscaled = f16[32,16] multiply(y_f16, y_scale_bcast) dot_a = f16[16,16] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0} dot_a_bias1 = f16[16,16] add(dot_a, b2) ROOT dot_a_bias = f16[16,16] add(dot_a_bias1, b_bcast) } )"; CheckFp8IfSupported(hlo_text, ErrorSpec{2e-3, 0.}); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: f16[16], {{.*}}: f16[16,16], {{.*}}: f16[], {{.*}}: f16[]) -> f16[16,16] { ; CHECK-DAG: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1) ; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]), dimensions={1,0} ; CHECK-NEXT: [[MB:%[^ ]+]] = f16[16,16]{1,0} parameter(3) ; CHECK-NEXT: [[P2:%[^ ]+]] = f16[] parameter(4) ; CHECK-NEXT: [[CV0:%[^ ]+]] = f32[] convert([[P2]]) ; CHECK-NEXT: [[P3:%[^ ]+]] = f16[] parameter(5) ; CHECK-NEXT: [[CV1:%[^ ]+]] = f32[] convert([[P3]]) ; CHECK: [[GEMMOUT_TUPLE:%[^ ]+]] = (f16[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[MB]], [[CV0]], [[CV1]]), ; CHECK: custom_call_target="__cublas$lt$matmul$f8", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":1 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } ; CHECK: [[GEMMOUT:%[^ ]+]] = f16[16,16]{1,0} get-tuple-element([[GEMMOUT_TUPLE]]), index=0 ; CHECK: [[VB:%[^ ]+]] = f16[16]{0} parameter(2) ; CHECK: [[VBC:%[^ ]+]] = f16[16,16]{1,0} broadcast([[VB]]), dimensions={1} ; CHECK: ROOT [[OUT:%[^ ]+]] = f16[16,16]{1,0} add([[GEMMOUT]], [[VBC]]) )"); } TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABScaledDWithDAmaxF8) { const char* hlo_text = R"( HloModule test apply { a = f32[] parameter(0) b = f32[] parameter(1) ROOT c = f32[] maximum(a, b) } ENTRY test { x = <<F8E4M3>>[16,32] parameter(0) y = <<F8E4M3>>[32,16] parameter(1) x_f32 = f32[16,32] convert(x) y_f32 = f32[32,16] convert(y) x_scale = f32[] parameter(2) y_scale = f32[] parameter(3) z_scale = f32[] parameter(4) x_scale_bcast = f32[16,32] broadcast(x_scale), dimensions={} y_scale_bcast = f32[32,16] broadcast(y_scale), dimensions={} z_scale_bcast = f32[16,16] broadcast(z_scale), dimensions={} x_unscaled = f32[16,32] multiply(x_f32, x_scale_bcast) y_unscaled = f32[32,16] multiply(y_f32, y_scale_bcast) dot_a = f32[16,16] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0} abs_dot_a = f32[16,16] abs(dot_a) c0 = f32[] constant(-inf) amax = f32[] reduce(abs_dot_a, c0), dimensions={0,1}, to_apply=apply dot_a_scaled = f32[16,16] divide(dot_a, z_scale_bcast) c1 = f32[] constant(-<<F8E4M3_AMAX>>) c1_bcast = f32[16,16] broadcast(c1), dimensions={} c2 = f32[] constant(<<F8E4M3_AMAX>>) c2_bcast = f32[16,16] broadcast(c2), dimensions={} dot_a_clamped = f32[16,16] clamp(c1_bcast, dot_a_scaled, c2_bcast) dot_a_f8 = <<F8E4M3>>[16,16] convert(dot_a_clamped) ROOT out = (<<F8E4M3>>[16,16], f32[]) tuple(dot_a_f8, amax) } )"; CheckFp8IfSupported(hlo_text); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: f32[], {{.*}}: f32[], {{.*}}: f32[]) -> (<<F8E4M3>>[16,16], f32[]) { ; CHECK: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1) ; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]) ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[] parameter(2) ; CHECK-NEXT: [[P3:%[^ ]+]] = f32[] parameter(3) ; CHECK-PTX-NEXT: [[C2:%[^ ]+]] = f32[] constant(1) ; CHECK-PTX-NEXT: [[P4:%[^ ]+]] = f32[] parameter(4) ; CHECK-PTX-NEXT: [[P4_INV:%[^ ]+]] = f32[] divide([[C2]], [[P4]]) ; CHECK-PTX-NEXT: [[OUT:%[^ ]+]] = (<<F8E4M3>>[16,16]{1,0}, f32[], s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[P2]], [[P3]], [[P4_INV]]), ; CHECK-GCN-NEXT: [[C1:%[^ ]+]] = f32[] constant(1) ; CHECK-GCN-NEXT: [[OUT:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[P2]], [[P3]], [[C1]]), ; CHECK: custom_call_target="__cublas$lt$matmul$f8", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABScaledDWithDAmaxF8WithF16Intermediates) { const char* hlo_text = R"( HloModule test apply { a = f16[] parameter(0) b = f16[] parameter(1) ROOT c = f16[] maximum(a, b) } ENTRY test { x = <<F8E4M3>>[16,32] parameter(0) y = <<F8E4M3>>[32,16] parameter(1) x_f16 = f16[16,32] convert(x) y_f16 = f16[32,16] convert(y) x_scale = f16[] parameter(2) y_scale = f16[] parameter(3) z_scale = f16[] parameter(4) x_scale_bcast = f16[16,32] broadcast(x_scale), dimensions={} y_scale_bcast = f16[32,16] broadcast(y_scale), dimensions={} z_scale_bcast = f16[16,16] broadcast(z_scale), dimensions={} x_unscaled = f16[16,32] multiply(x_f16, x_scale_bcast) y_unscaled = f16[32,16] multiply(y_f16, y_scale_bcast) dot_a = f16[16,16] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0} abs_dot_a = f16[16,16] abs(dot_a) c0 = f16[] constant(-inf) amax = f16[] reduce(abs_dot_a, c0), dimensions={0,1}, to_apply=apply dot_a_scaled = f16[16,16] divide(dot_a, z_scale_bcast) c1 = f16[] constant(-<<F8E4M3_AMAX>>) c1_bcast = f16[16,16] broadcast(c1), dimensions={} c2 = f16[] constant(<<F8E4M3_AMAX>>) c2_bcast = f16[16,16] broadcast(c2), dimensions={} dot_a_clamped = f16[16,16] clamp(c1_bcast, dot_a_scaled, c2_bcast) dot_a_f8 = <<F8E4M3>>[16,16] convert(dot_a_clamped) ROOT out = (<<F8E4M3>>[16,16], f16[]) tuple(dot_a_f8, amax) } )"; CheckFp8IfSupported(hlo_text); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: f16[], {{.*}}: f16[], {{.*}}: f16[]) -> (<<F8E4M3>>[16,16], f16[]) { ; CHECK: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1) ; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]) ; CHECK-NEXT: [[P2:%[^ ]+]] = f16[] parameter(2) ; CHECK-NEXT: [[P2_CONVERT:%[^ ]+]] = f32[] convert([[P2]]) ; CHECK-NEXT: [[P3:%[^ ]+]] = f16[] parameter(3) ; CHECK-NEXT: [[P3_CONVERT:%[^ ]+]] = f32[] convert([[P3]]) ; CHECK-PTX-NEXT: [[C2:%[^ ]+]] = f16[] constant(1) ; CHECK-PTX-NEXT: [[P4:%[^ ]+]] = f16[] parameter(4) ; CHECK-PTX-NEXT: [[P4_INV:%[^ ]+]] = f16[] divide([[C2]], [[P4]]) ; CHECK-PTX-NEXT: [[P4_INV_CONVERT:%[^ ]+]] = f32[] convert([[P4_INV]]) ; CHECK-PTX-NEXT: [[OUT:%[^ ]+]] = (<<F8E4M3>>[16,16]{1,0}, f32[], s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[P2_CONVERT]], [[P3_CONVERT]], [[P4_INV_CONVERT]]), ; CHECK-CGN-NEXT: [[C1:%[^ ]+]] = f32[] constant(1) ; CHECK-GCN-NEXT: [[OUT:%[^ ]+]] = (f16[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[P2_CONVERT]], [[P3_CONVERT]], [[C1]]), ; CHECK: custom_call_target="__cublas$lt$matmul$f8", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABScaledDReluActivationWithDAmaxF8) { const char* hlo_text = R"( HloModule test apply { a = f32[] parameter(0) b = f32[] parameter(1) ROOT c = f32[] maximum(a, b) } ENTRY test { x = <<F8E4M3>>[16,32] parameter(0) y = <<F8E4M3>>[32,16] parameter(1) x_f32 = f32[16,32] convert(x) y_f32 = f32[32,16] convert(y) x_scale = f32[] parameter(2) y_scale = f32[] parameter(3) z_scale = f32[] parameter(4) x_scale_bcast = f32[16,32] broadcast(x_scale), dimensions={} y_scale_bcast = f32[32,16] broadcast(y_scale), dimensions={} z_scale_bcast = f32[16,16] broadcast(z_scale), dimensions={} x_unscaled = f32[16,32] multiply(x_f32, x_scale_bcast) y_unscaled = f32[32,16] multiply(y_f32, y_scale_bcast) dot_a = f32[16,16] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0} czero = f32[] constant(0) czero_bcast = f32[16,16] broadcast(czero), dimensions={} dot_a_relu = f32[16,16] maximum(dot_a, czero_bcast) c0 = f32[] constant(-inf) amax = f32[] reduce(dot_a_relu, c0), dimensions={0,1}, to_apply=apply dot_a_scaled = f32[16,16] divide(dot_a_relu, z_scale_bcast) c1 = f32[] constant(-<<F8E4M3_AMAX>>) c1_bcast = f32[16,16] broadcast(c1), dimensions={} c2 = f32[] constant(<<F8E4M3_AMAX>>) c2_bcast = f32[16,16] broadcast(c2), dimensions={} dot_a_clamped = f32[16,16] clamp(c1_bcast, dot_a_scaled, c2_bcast) dot_a_f8 = <<F8E4M3>>[16,16] convert(dot_a_clamped) ROOT out = (<<F8E4M3>>[16,16], f32[]) tuple(dot_a_f8, amax) } )"; CheckFp8IfSupported(hlo_text); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: <<F8E4M3>>[16,32], {{.*}}: <<F8E4M3>>[32,16], {{.*}}: f32[], {{.*}}: f32[], {{.*}}: f32[]) -> (<<F8E4M3>>[16,16], f32[]) { ; CHECK: [[P0:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = <<F8E4M3>>[32,16]{1,0} parameter(1) ; CHECK-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]) ; CHECK-NEXT: [[P2:%[^ ]+]] = f32[] parameter(2) ; CHECK-NEXT: [[P3:%[^ ]+]] = f32[] parameter(3) ; CHECK-PTX-NEXT: [[C2:%[^ ]+]] = f32[] constant(1) ; CHECK-PTX-NEXT: [[P4:%[^ ]+]] = f32[] parameter(4) ; CHECK-PTX-NEXT: [[P4_INV:%[^ ]+]] = f32[] divide([[C2]], [[P4]]) ; CHECK-PTX-NEXT: [[OUT:%[^ ]+]] = (<<F8E4M3>>[16,16]{1,0}, f32[], s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[P2]], [[P3]], [[P4_INV]]), ; CHECK-CGN-NEXT: [[C1:%[^ ]+]] = f32[] constant(1) ; CHECK-GCN-NEXT: [[OUT:%[^ ]+]] = (f32[16,16]{1,0}, s8[{{[0-9]+}}]{0}) custom-call([[P0]], [[P1_TRANSPOSE]], [[P2]], [[P3]], [[C1]]), ; CHECK: custom_call_target="__cublas$lt$matmul$f8", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"RELU" ; CHECK: } )"); } TEST_P(ParameterizedFp8GemmRewriteTest, UnscaledABUnscaledDPrecisionF8) { const char* raw_hlo_template = R"( HloModule test ENTRY test { x = <<F8E4M3>>[1600,3200] parameter(0) y = <<F8E4M3>>[3200,1600] parameter(1) x_f32 = f32[1600,3200] convert(x) y_f32 = f32[3200,1600] convert(y) x_scale = f32[] parameter(2) y_scale = f32[] parameter(3) x_scale_bcast = f32[1600,3200] broadcast(x_scale), dimensions={} y_scale_bcast = f32[3200,1600] broadcast(y_scale), dimensions={} x_unscaled = f32[1600,3200] multiply(x_f32, x_scale_bcast) y_unscaled = f32[3200,1600] multiply(y_f32, y_scale_bcast) ROOT out = f32[1600,1600] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0}, operand_precision={<<precision>>,<<precision>>} } )"; std::string hlo_template = absl::StrReplaceAll(raw_hlo_template, replacements_); absl::flat_hash_map<absl::string_view, absl::string_view> replacements; replacements["<<precision>>"] = "default"; const auto hlo_text_default = absl::StrReplaceAll(hlo_template, replacements); EXPECT_TRUE(RunAndCompare(hlo_text_default, ErrorSpec{1e-3, 1e-3})); replacements["<<precision>>"] = "highest"; const auto hlo_text_highest = absl::StrReplaceAll(hlo_template, replacements); EXPECT_TRUE(RunAndCompare(hlo_text_highest, ErrorSpec{1e-4, 1e-4})); } TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABUnscaledDF8Parameterized) { std::array<std::array<absl::string_view, 7>, 32> combinations; int i = 0; for (bool d_is_col : {false, true}) { for (bool a_is_col : {false, true}) { for (bool b_is_col : {false, true}) { for (int lhs_contracting_dim : {0, 1}) { for (int rhs_contracting_dim : {0, 1}) { const absl::string_view lcd = lhs_contracting_dim == 1 ? "{1}" : "{0}"; const absl::string_view rcd = rhs_contracting_dim == 1 ? "{1}" : "{0}"; const absl::string_view a_shape = lhs_contracting_dim == 1 ? "[64,32]" : "[32,64]"; const absl::string_view b_shape = rhs_contracting_dim == 0 ? "[32,16]" : "[16,32]"; const absl::string_view a_layout = a_is_col ? "{0,1}" : "{1,0}"; const absl::string_view b_layout = b_is_col ? "{0,1}" : "{1,0}"; const absl::string_view output_layout = d_is_col ? "{0,1}" : "{1,0}"; combinations[i++] = std::array{ lcd, rcd, a_shape, b_shape, a_layout, b_layout, output_layout}; } } } } } const char* hlo_template = R"( HloModule test ENTRY test { x = <<F8E4M3>><<Ashape>><<Alayout>> parameter(0) x_f32 = f32<<Ashape>><<Alayout>> convert(x) x_scale = f32[] parameter(2) x_scale_bcast = f32<<Ashape>> broadcast(x_scale), dimensions={} x_unscaled = f32<<Ashape>> multiply(x_f32, x_scale_bcast) y = <<F8E4M3>><<Bshape>><<Blayout>> parameter(1) y_f32 = f32<<Bshape>><<Blayout>> convert(y) y_scale = f32[] parameter(3) y_scale_bcast = f32<<Bshape>> broadcast(y_scale), dimensions={} y_unscaled = f32<<Bshape>> multiply(y_f32, y_scale_bcast) ROOT out = f32[64,16]<<Olayout>> dot(x_unscaled, y_unscaled), lhs_contracting_dims=<<Lcd>>, rhs_contracting_dims=<<Rcd>> } )"; for (const auto& combination : combinations) { absl::flat_hash_map<absl::string_view, absl::string_view> replacements; replacements["<<Lcd>>"] = std::get<0>(combination); replacements["<<Rcd>>"] = std::get<1>(combination); replacements["<<Ashape>>"] = std::get<2>(combination); replacements["<<Bshape>>"] = std::get<3>(combination); replacements["<<Alayout>>"] = std::get<4>(combination); replacements["<<Blayout>>"] = std::get<5>(combination); replacements["<<Olayout>>"] = std::get<6>(combination); const auto hlo_text = absl::StrReplaceAll(hlo_template, replacements); CheckFp8IfSupported(hlo_text); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK: custom_call_target="__cublas$lt$matmul$f8", )"); } } TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABUnscaledDF8ParameterizedBatched) { std::array<std::array<std::string, 7>, 32> combinations; std::string lcd, rcd, a_shape, b_shape, a_layout, b_layout, o_layout; int i = 0; for (bool o_is_col : {false, true}) { for (int lhs_contracting_dim : {2, 1}) { for (int rhs_contracting_dim : {2, 1}) { lcd = lhs_contracting_dim == 2 ? "{2}" : "{1}"; rcd = rhs_contracting_dim == 2 ? "{2}" : "{1}"; a_shape = lhs_contracting_dim == 2 ? "[2,64,32]" : "[2,32,64]"; b_shape = rhs_contracting_dim == 1 ? "[2,32,16]" : "[2,16,32]"; o_layout = o_is_col ? "{2, 0, 1}" : "{2, 1, 0}"; for (std::string a_layout : {"{2,1,0}", "{1,2,0}"}) { for (std::string b_layout : {"{2,1,0}", "{1,2,0}"}) { combinations[i++] = std::array{lcd, rcd, a_shape, b_shape, a_layout, b_layout, o_layout}; } } } } } const char* hlo_template = R"( HloModule m ENTRY f { x_q = <<F8E4M3>><<Ashape>><<Alayout>> parameter(0) x_scale = f32[] parameter(2) x_scale_broadcast = f32<<Ashape>><<Alayout>> broadcast(x_scale), dimensions={} x_q_convert = f32<<Ashape>><<Alayout>> convert(x_q) x_qdq = f32<<Ashape>><<Alayout>> multiply(x_q_convert, x_scale_broadcast) y_q = <<F8E4M3>><<Bshape>><<Blayout>> parameter(1) y_scale = f32[] parameter(3) y_scale_broadcast = f32<<Bshape>><<Blayout>> broadcast(y_scale), dimensions={} y_q_convert = f32<<Bshape>><<Blayout>> convert(y_q) y_qdq = f32<<Bshape>><<Blayout>> multiply(y_q_convert, y_scale_broadcast) ROOT out = f32[2,64,16]<<Olayout>> dot(x_qdq, y_qdq), lhs_batch_dims={0}, lhs_contracting_dims=<<Lcd>>, rhs_batch_dims={0}, rhs_contracting_dims=<<Rcd>> } )"; for (const auto& combination : combinations) { absl::flat_hash_map<std::string, std::string> replacements; replacements["<<Lcd>>"] = std::get<0>(combination); replacements["<<Rcd>>"] = std::get<1>(combination); replacements["<<Ashape>>"] = std::get<2>(combination); replacements["<<Bshape>>"] = std::get<3>(combination); replacements["<<Alayout>>"] = std::get<4>(combination); replacements["<<Blayout>>"] = std::get<5>(combination); replacements["<<Olayout>>"] = std::get<6>(combination); const auto hlo_text = absl::StrReplaceAll(hlo_template, replacements); CheckFp8IfSupported(hlo_text); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK: custom_call_target="__cublas$lt$matmul$f8", )"); } } TEST_P(ParameterizedFp8GemmRewriteTest, ScaledABUnscaledDF8TF32E5M2) { const char* hlo_text = R"( HloModule test ENTRY test { x = <<F8E4M3>>[16,32] parameter(0) y = <<F8E5M2>>[32,16] parameter(1) x_f32 = f32[16,32] convert(x) y_f32 = f32[32,16] convert(y) x_scale = f32[] parameter(2) y_scale = f32[] parameter(3) x_scale_bcast = f32[16,32] broadcast(x_scale), dimensions={} y_scale_bcast = f32[32,16] broadcast(y_scale), dimensions={} x_unscaled = f32[16,32] multiply(x_f32, x_scale_bcast) y_unscaled = f32[32,16] multiply(y_f32, y_scale_bcast) ROOT out = f32[16,16] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; CheckFp8IfSupported(hlo_text); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK: custom_call_target="__cublas$lt$matmul$f8", )"); } TEST_P(ParameterizedFp8GemmRewriteTest, FnuzTypeF8) { const char* hlo_text = R"( HloModule test ENTRY test { x = f8e4m3fnuz[16,32] parameter(0) y = f8e4m3fnuz[32,16] parameter(1) x_f32 = f32[16,32] convert(x) y_f32 = f32[32,16] convert(y) x_scale = f32[] parameter(2) y_scale = f32[] parameter(3) x_scale_bcast = f32[16,32] broadcast(x_scale), dimensions={} y_scale_bcast = f32[32,16] broadcast(y_scale), dimensions={} x_unscaled = f32[16,32] multiply(x_f32, x_scale_bcast) y_unscaled = f32[32,16] multiply(y_f32, y_scale_bcast) ROOT out = f32[16,16] dot(x_unscaled, y_unscaled), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; if (IsCuda()) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); GemmRewriter pass( CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&pass, module.get())); EXPECT_FALSE(changed); return; } if (IsRocm()) { EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-2, 1e-2})); RunAndFilecheckHloRewrite( hlo_text, GemmRewriter(CudaHopperOrRocmMI300(), GetToolkitVersion(), GemmRewriterOptions{GemmRewriterOptions::DType::kFp8Only}), R"( ; CHECK-LABEL: ENTRY %test ({{.*}}: f8e4m3fnuz[16,32], {{.*}}: f8e4m3fnuz[32,16], {{.*}}: f32[], {{.*}}: f32[]) -> f32[16,16] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f8e4m3fnuz[16,32]{1,0} parameter(0) ; CHECK-PTX-NEXT: [[P0_CV:%[^ ]+]] = f32[16,32]{1,0} convert([[P0]]) ; CHECK-PTX-NEXT: [[P2:%[^ ]+]] = f32[] parameter(2) ; CHECK-PTX-NEXT: [[P2_B:%[^ ]+]] = f32[16,32]{1,0} broadcast([[P2]]), dimensions={} ; CHECK-PTX-NEXT: [[P0_UNSCALED:%[^ ]+]] = f32[16,32]{1,0} multiply([[P0_CV]], [[P2_B]]) ; CHECK-PTX-NEXT: [[P1:%[^ ]+]] = f8e4m3fnuz[32,16]{1,0} parameter(1) ; CHECK-PTX-NEXT: [[P1_CV:%[^ ]+]] = f32[32,16]{1,0} convert([[P1]]) ; CHECK-PTX-NEXT: [[P3:%[^ ]+]] = f32[] parameter(3) ; CHECK-PTX-NEXT: [[P3_B:%[^ ]+]] = f32[32,16]{1,0} broadcast([[P3]]), dimensions={} ; CHECK-PTX-NEXT: [[P1_UNSCALED:%[^ ]+]] = f32[32,16]{1,0} multiply([[P1_CV]], [[P3_B]]) ; CHECK-PTX-NEXT: [[GEMM:%[^ ]+]] = {{.*}} custom-call([[P0_UNSCALED]], [[P1_UNSCALED]]), ; CHECK-GCN-NEXT: [[P1:%[^ ]+]] = f8e4m3fnuz[32,16]{1,0} parameter(1) ; CHECK-GCN-NEXT: [[P1_TRANSPOSE:%[^ ]+]] = <<F8E4M3>>[16,32]{1,0} transpose([[P1]]) ; CHECK-GCN-NEXT: [[P2:%[^ ]+]] = f32[] parameter(2) ; CHECK-GCN-NEXT: [[P3:%[^ ]+]] = f32[] parameter(3) ; CHECK-GCN-NEXT: [[C1:%[^ ]+]] = f32[] constant(1) ; CHECK-PTX: custom_call_target="<<CUBLAS_CUSTOM_CALL_TARGET_PLACEHOLDER>>", ; CHECK-GCN: custom_call_target="__cublas$lt$matmul$f8", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-PTX-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-GCN-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } } INSTANTIATE_TEST_SUITE_P(Fp8CublasTestsBothLegacyAndLt, ParameterizedFp8GemmRewriteTest, ::testing::Bool()); TEST_F(GemmRewriteTest, NoFuseBiasBroadcast) { const char* hlo = R"( HloModule module ENTRY main.10 { Arg_0.1 = f16[384,128]{1,0} parameter(0) Arg_1.2 = f16[128,256]{1,0} parameter(1) dot.4 = f16[384,256]{1,0} dot(Arg_0.1, Arg_1.2), lhs_contracting_dims={1}, rhs_contracting_dims={0} Arg_2.3 = f16[256]{0} parameter(2) reshape.5 = f16[1,256]{1,0} reshape(Arg_2.3) broadcast.6 = f16[1,256]{1,0} broadcast(reshape.5), dimensions={0,1} reshape.7 = f16[256]{0} reshape(broadcast.6) broadcast.8 = f16[384,256]{1,0} broadcast(reshape.7), dimensions={1} ROOT add.9 = f16[384,256]{1,0} add(dot.4, broadcast.8) })"; MatchOptimizedHlo(hlo, R"( )"); } TEST_F(GemmRewriteTest, ReduceOfBatchDot) { absl::string_view hlo_string = R"( HloModule test region_5.50 { Arg_0.51 = f32[] parameter(0) Arg_1.52 = f32[] parameter(1) ROOT add.53 = f32[] add(Arg_0.51, Arg_1.52) } ENTRY main { p0 = bf16[3,32,3,13]{3,2,1,0} parameter(0) p1 = bf16[3,32,3,64]{3,2,1,0} parameter(1) dot.95 = bf16[3,3,13,64]{3,2,1,0} dot(p0, p1), lhs_batch_dims={0,2}, lhs_contracting_dims={1}, rhs_batch_dims={0,2}, rhs_contracting_dims={1}, operand_precision={highest,highest} transpose.96 = bf16[3,64,3,13]{1,3,2,0} transpose(dot.95), dimensions={0,3,1,2} convert.101 = f32[3,64,3,13]{1,3,2,0} convert(transpose.96) constant.66 = f32[] constant(0.0) ROOT reduce.102 = f32[3,64,13]{2,1,0} reduce(convert.101, constant.66), dimensions={2}, to_apply=region_5.50 } )"; MatchOptimizedHlo(hlo_string, R"( )"); } TEST_F(GemmRewriteTest, DotWithBias) { const char* hlo = R"( HloModule m ENTRY main { p0 = f32[1024,1024] parameter(0) p1 = f32[1024,1024] parameter(1) p2 = f32[1024,1024] parameter(2) p3 = f32[1024,1024] parameter(3) dot0 = f32[1024,1024] dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} dot1 = f32[1024,1024] dot(p2, p3), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT root = f32[1024,1024] add(dot0, dot1) })"; const char* expected = R"() })"; RunAndFilecheckHloRewrite( hlo, GemmRewriter( se::CudaComputeCapability{}, stream_executor::SemanticVersion{0, 0, 0}, GemmRewriterOptions{GemmRewriterOptions::DType::kNonFp8Only}), expected); } TEST_F(GemmRewriteTest, DotWithoutBias) { const char* hlo = R"( HloModule m ENTRY main { p0 = f32[1024,1024] parameter(0) p1 = f32[1024,1024] parameter(1) p2 = f32[1024,1024] parameter(2) p3 = f32[1024,1024] parameter(3) dot0 = f32[1024,1024] dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} dot1 = f32[1024,1024] dot(p2, p3), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT root = f32[1024,1024] add(dot0, dot1) })"; const char* expected = R"() })"; RunAndFilecheckHloRewrite( hlo, GemmRewriter( se::CudaComputeCapability{}, stream_executor::SemanticVersion{0, 0, 0}, GemmRewriterOptions{GemmRewriterOptions::DType::kNonFp8Only, GemmRewriterOptions::BiasMode::kNoBias}), expected); } TEST_F(CublasLtGemmRewriteTest, CublasLtSuccessfullyMatchesLargeC64Lhs) { const char* hlo_text = R"( HloModule test ENTRY test { p0 = c64[2000,3000,3]{2,1,0} parameter(0) p1 = c64[3,6]{1,0} parameter(1) ROOT dot = c64[2000,3000,6]{2,1,0} dot(p0, p1), lhs_contracting_dims={2}, rhs_contracting_dims={0} } )"; if (IsCuda()) { MatchOptimizedHlo(hlo_text, R"(; CHECK: custom_call_target="__cublas$lt$matmul")"); } else { MatchOptimizedHlo(hlo_text, R"(; CHECK: custom_call_target="__cublas$gemm")"); } } TEST_F(CublasLtGemmRewriteTest, CublasLtOnlyMatchesLargeC64RhsPostAmpere) { const char* hlo_text = R"( HloModule test ENTRY test { p0 = c64[6,3]{1,0} parameter(0) p1 = c64[3,2000,3000]{2,1,0} parameter(1) ROOT dot = c64[6,2000,3000]{2,1,0} dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; if (HasCudaComputeCapability(se::CudaComputeCapability::Ampere())) { MatchOptimizedHlo(hlo_text, R"(; CHECK: custom_call_target="__cublas$lt$matmul")"); } else { MatchOptimizedHlo( hlo_text, R"(; CHECK-NOT: custom_call_target="__cublas$lt$matmul")"); } } class GemmRewriteAllocationTest : public GpuCodegenTest { public: void CheckNumberOfAllocations(const std::string& hlo, int expected_number_of_allocations) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> optimized_module, GetOptimizedModule(hlo)); if (allocator_ == nullptr) { allocator_ = std::make_unique<se::StreamExecutorMemoryAllocator>( backend().default_stream_executor()); } TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<Executable> executable, backend().compiler()->RunBackend(std::move(optimized_module), backend().default_stream_executor(), allocator_.get())); GpuExecutable* gpu_executable = static_cast<GpuExecutable*>(executable.get()); absl::Span<const BufferAllocation> allocations = gpu_executable->GetAllocations(); ASSERT_EQ(allocations.size(), expected_number_of_allocations); } DebugOptions GetDebugOptionsForTest() override { DebugOptions debug_options = GpuCodegenTest::GetDebugOptionsForTest(); debug_options.set_xla_gpu_gemm_rewrite_size_threshold(0); debug_options.set_xla_gpu_enable_triton_gemm(false); return debug_options; } private: std::unique_ptr<se::DeviceMemoryAllocator> allocator_; }; TEST_F(GemmRewriteAllocationTest, SharedBufferAssignment) { const char* hlo_text = R"( HloModule SharedBufferAssignment ENTRY AddDotsFunc { x = f32[2,2] parameter(0) y = f32[2,2] parameter(1) bias = f32[2,2] add(x, y) dot = f32[2,2] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT out = f32[2,2] add(dot, bias) } )"; CheckNumberOfAllocations(hlo_text, 4); EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); } class SmallDotGemmRewriteTest : public GemmRewriteTest { public: DebugOptions GetDebugOptionsForTest() override { DebugOptions debug_options = GemmRewriteTest::GetDebugOptionsForTest(); debug_options.set_xla_gpu_gemm_rewrite_size_threshold(100); return debug_options; } }; TEST_F(SmallDotGemmRewriteTest, SkipSmallMatrixMultiplicationRewrite) { const char* hlo_text = R"( HloModule SkipSmallMatrixRewrite ENTRY DotFunc { x = f32[3,3] parameter(0) y = f32[3,3] parameter(1) ROOT out = f32[3,3] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %DotFunc ({{.*}}: f32[3,3], {{.*}}: f32[3,3]) -> f32[3,3] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[3,3]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[3,3]{1,0} parameter(1) ; CHECK-NEXT: [[GEMM:%[^ ]+]] = {{.*}} dot([[P0]], [[P1]]), ; CHECK: lhs_contracting_dims={1}, rhs_contracting_dims={0} )"); } TEST_F(SmallDotGemmRewriteTest, LargeMatrixMultiplicationIsRewritten) { const char* hlo_text = R"( HloModule SkipSmallMatrixRewrite ENTRY DotFunc { x = f32[8,8] parameter(0) y = f32[8,8] parameter(1) ROOT out = f32[8,8] dot(x, y), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %DotFunc ({{.*}}: f32[8,8], {{.*}}: f32[8,8]) -> f32[8,8] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[8,8]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[8,8]{1,0} parameter(1) ; CHECK: {{[^ ]+}} = {{.*}} custom-call([[P0]], [[P1]]) )"); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/gemm_rewriter.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/gemm_rewriter_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
cbb3b921-f947-46ca-b80d-a3cd1c02ea80
cpp
tensorflow/tensorflow
dynamic_slice_fusion_rewriter
third_party/xla/xla/service/gpu/transforms/dynamic_slice_fusion_rewriter.cc
third_party/xla/xla/service/gpu/transforms/dynamic_slice_fusion_rewriter_test.cc
#include "xla/service/gpu/transforms/dynamic_slice_fusion_rewriter.h" #include <cstddef> #include <cstdint> #include <functional> #include <iterator> #include <optional> #include <string> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/container/inlined_vector.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/ffi/ffi_api.h" #include "xla/hlo/evaluator/hlo_evaluator.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/ir/hlo_schedule.h" #include "xla/literal_util.h" #include "xla/primitive_util.h" #include "xla/service/custom_call_target_registry.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/cublas_cudnn.h" #include "xla/service/gpu/gpu_constants.h" #include "xla/service/gpu/hlo_traversal.h" #include "xla/service/gpu/ir_emission_utils.h" #include "xla/service/pattern_matcher.h" #include "xla/service/while_loop_analysis.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/tools/hlo_extractor.h" #include "xla/util.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { namespace m = ::xla::match; using DefUseDataflowPath = absl::InlinedVector<HloInstruction*, 2>; using DefUseDataflowPaths = absl::InlinedVector<DefUseDataflowPath, 4>; using UseDefDataflowPath = absl::InlinedVector<HloInstruction*, 4>; using UseDefDataflowPaths = absl::InlinedVector<HloInstruction*, 8>; using DataflowPathView = absl::Span<HloInstruction* const>; using DataflowPathsView = absl::Span<DataflowPathView>; using InstructionSet = absl::flat_hash_set<HloInstruction*>; using OffsetValueMap = absl::flat_hash_map<HloInstruction*, std::vector<Literal>>; bool IsNoOp(const HloInstruction* hlo) { return HloPredicateIsOp<HloOpcode::kBitcast, HloOpcode::kTuple, HloOpcode::kGetTupleElement>(hlo); } bool IsCustomCall(const HloInstruction* hlo, absl::string_view platform_name) { auto* custom_call = DynCast<HloCustomCallInstruction>(hlo); if (custom_call == nullptr) return false; if (custom_call->shape().IsTuple() && absl::c_any_of( custom_call->shape().tuple_shapes(), [&](const Shape& sub_shape) { return sub_shape.IsToken(); })) return false; const std::string call_target_name = custom_call->custom_call_target(); bool is_ffi_custom_call = custom_call->api_version() == CustomCallApiVersion::API_VERSION_TYPED_FFI; void* call_target = CustomCallTargetRegistry::Global()->Lookup( call_target_name, std::string(platform_name)); absl::StatusOr<ffi::HandlerRegistration> handler_registration = ffi::FindHandler(call_target_name, platform_name); bool found_custom_call = !is_ffi_custom_call && call_target != nullptr; bool found_ffi_handler = is_ffi_custom_call && handler_registration.ok(); return found_custom_call || found_ffi_handler; } bool IsAlignedSlice(const HloInstruction* slice) { DCHECK(slice->opcode() == HloOpcode::kSlice || slice->opcode() == HloOpcode::kDynamicSlice || slice->opcode() == HloOpcode::kDynamicUpdateSlice) << "Unknown slice operation: " << slice->ToString(); if (!IsContiguousSlice(*slice)) return false; auto [full_shape, slice_shape] = [&] { if (auto* dus = DynCast<HloDynamicUpdateSliceInstruction>(slice)) { return std::make_pair(dus->shape(), dus->update()->shape()); } return std::make_pair(slice->operand(0)->shape(), slice->shape()); }(); auto strides = ShapeUtil::ByteStrides(slice_shape); if (!strides.has_value()) return false; for (auto dim : slice_shape.layout().minor_to_major()) { if ((strides.value()[dim] % kXlaAllocatedBufferAlignBytes) == 0) { return true; } if (slice_shape.dimensions(dim) < full_shape.dimensions(dim)) { return (slice->opcode() == HloOpcode::kSlice && (((*strides)[dim] * slice->slice_starts(dim)) % kXlaAllocatedBufferAlignBytes == 0)); } } return true; } std::optional<int64_t> GetWhileLoopTripCount(HloInstruction* whileop) { CHECK(whileop->opcode() == HloOpcode::kWhile); auto backend_config = whileop->backend_config<WhileLoopBackendConfig>(); if (!backend_config.ok() || !backend_config.value().has_known_trip_count()) { VLOG(4) << "Backend config not ok. Computing while loop trip count for " << whileop->name(); return ComputeWhileLoopTripCount(whileop); } int trip_count = backend_config.value().known_trip_count().n(); VLOG(4) << "Found trip count in backend config for " << whileop->name() << ": " << trip_count; return trip_count; } std::optional<std::vector<Literal>> GetValues(const HloInstruction* idx) { VLOG(3) << "Getting values for " << idx->name(); const HloComputation* computation = idx->parent(); if (!computation->IsWhileBodyComputation()) { VLOG(3) << "While calculating offset values for " << idx->name() << ", the parent computation(" << computation->name() << ") is not a while computation"; return std::nullopt; } HloInstruction* whileop = computation->WhileCallInstruction(); std::optional<int64_t> trip_count = GetWhileLoopTripCount(whileop); if (trip_count == std::nullopt) { VLOG(3) << "Unable to get trip count for " << whileop->name(); return std::nullopt; } auto root_tuple = computation->root_instruction(); if (root_tuple->opcode() != HloOpcode::kTuple) { VLOG(3) << "Root operation " << root_tuple->name() << " of computation " << computation->name() << " expected to be a tuple because it is a while body. Found: " << root_tuple->opcode(); return std::nullopt; } std::optional<int64_t> loop_indvar_tuple_idx = GetLoopInductionVarTupleIdx(whileop); if (loop_indvar_tuple_idx == std::nullopt) { VLOG(3) << "Unable to find tuple index for loop induction variable"; return std::nullopt; } auto update_operation = computation->root_instruction()->operand(*loop_indvar_tuple_idx); HloInstruction* loop_indvar = nullptr; for (auto instr : computation->instructions()) { if (instr->opcode() == HloOpcode::kGetTupleElement && instr->operand(0) == computation->parameter_instruction(0) && instr->tuple_index() == *loop_indvar_tuple_idx) { loop_indvar = instr; } } if (loop_indvar == nullptr) { VLOG(3) << "Unable to find get-tuple-element(" << computation->parameter_instruction(0)->name() << "), index=" << *loop_indvar_tuple_idx << " in " << computation->name(); return std::nullopt; } auto IsValidModule = [loop_indvar](std::unique_ptr<HloModule>& module) -> bool { if (module == nullptr || module->entry_computation()->num_parameters() != 1) return false; const HloInstruction* p0 = module->entry_computation()->parameter_instruction(0); if (p0->shape() != loop_indvar->shape()) { VLOG(4) << "Extracted module must depend only on the loop induction " "variable."; return false; }; return llvm::all_of(module->entry_computation()->instructions(), [](const HloInstruction* instr) { return instr->opcode() != HloOpcode::kPartitionId && instr->opcode() != HloOpcode::kReplicaId; }); }; auto params = computation->parameter_instructions(); if (params.size() != 1 || !params[0]->shape().IsTuple()) { VLOG(3) << "While loop parameter is expected to be a tuple."; return std::nullopt; } std::unique_ptr<HloModule> offset_module = ExtractModule( idx, -1, [loop_indvar, params](const HloInstruction* inst) -> bool { return inst != loop_indvar && llvm::find(params, inst) == params.end(); }, [](const HloInstruction* inst) -> ReplaceType { return ReplaceType::kReplaceParam; }); std::unique_ptr<HloModule> update_module = ExtractModule( update_operation, -1, [loop_indvar, params](const HloInstruction* inst) -> bool { return inst != loop_indvar && llvm::find(params, inst) == params.end(); }, [](const HloInstruction* inst) -> ReplaceType { return ReplaceType::kReplaceParam; }); if (!IsValidModule(offset_module) || !IsValidModule(update_module)) { return std::nullopt; } VLOG(3) << "Successfully generated offset and update modules"; std::vector<Literal> offset_values; absl::Status status = [&]() -> absl::Status { HloEvaluator evaluator; const Literal& init = whileop->operand(0)->operand(*loop_indvar_tuple_idx)->literal(); std::unique_ptr<Literal> updated_value = nullptr; for (int64_t i = 0; i < *trip_count; i++) { if (i == 0) { evaluator.ResetVisitStates(); TF_ASSIGN_OR_RETURN(offset_values.emplace_back(), evaluator.Evaluate(*offset_module, {&init})); CHECK(offset_values.back().shape() == idx->shape()); evaluator.ResetVisitStates(); TF_ASSIGN_OR_RETURN(Literal next_update_value, evaluator.Evaluate(*update_module, {&init})); updated_value = next_update_value.CloneToUnique(); } else { evaluator.ResetVisitStates(); TF_ASSIGN_OR_RETURN( offset_values.emplace_back(), evaluator.Evaluate(*offset_module, {updated_value.get()})); CHECK(offset_values.back().shape() == idx->shape()); evaluator.ResetVisitStates(); TF_ASSIGN_OR_RETURN( Literal next_update_value, evaluator.Evaluate(*update_module, {updated_value.get()})); updated_value = next_update_value.CloneToUnique(); } } VLOG(3) << "Offset values for " << idx->name() << ": " << absl::StrJoin(offset_values, ",", [](std::string* out, const Literal& l) { out->append(l.ToString()); }); return absl::OkStatus(); }(); if (status.ok()) return offset_values; return std::nullopt; } absl::StatusOr<HloInstruction*> AddLoopIterationParam(HloInstruction* whileop) { CHECK(whileop->opcode() == HloOpcode::kWhile); HloComputation* while_body = whileop->while_body(); HloComputation* while_cond = whileop->while_condition(); const HloInstruction* while_init = whileop->operand(0); CHECK(while_init->opcode() == HloOpcode::kTuple); std::vector<HloInstruction*> new_init_operands(while_init->operands().begin(), while_init->operands().end()); PrimitiveType indvar_type = whileop->while_init() ->operand(*GetLoopInductionVarTupleIdx(whileop)) ->shape() .element_type(); new_init_operands.push_back(whileop->parent()->AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0( whileop->while_init() ->operand(*GetLoopInductionVarTupleIdx(whileop)) ->shape() .element_type(), 0)), "zero")); HloInstruction* new_while_init = whileop->parent()->AddInstruction( HloInstruction::CreateTuple(new_init_operands)); HloInstruction* new_whileop = whileop->parent()->AddInstruction( whileop->CloneWithNewOperands(new_while_init->shape(), {new_while_init})); if (whileop->IsRoot()) { absl::InlinedVector<HloInstruction*, 4> tuple_entries; tuple_entries.reserve(while_init->shape().tuple_shapes_size()); for (auto i = 0; i < while_init->shape().tuple_shapes_size(); i++) { tuple_entries.push_back(whileop->parent()->AddInstruction( HloInstruction::CreateGetTupleElement(new_whileop, i))); } HloInstruction* new_whileop_result = whileop->parent()->AddInstruction( HloInstruction::CreateTuple(tuple_entries)); TF_RETURN_IF_ERROR( whileop->parent()->ReplaceInstruction(whileop, new_whileop_result)); } else { TF_RETURN_IF_ERROR(whileop->parent()->ReplaceInstructionWithDifferentShape( whileop, new_whileop)); } while_cond->ReplaceParameter(0, HloInstruction::CreateParameter( 0, new_while_init->shape(), "new_param")); HloInstruction* new_body_param = while_body->ReplaceParameter( 0, HloInstruction::CreateParameter(0, new_while_init->shape(), "new_param")); HloInstruction* gte = while_body->AddInstruction( HloInstruction::CreateGetTupleElement( new_body_param, new_while_init->shape().tuple_shapes_size() - 1), "loop_iteration_count"); HloInstruction* c1 = while_body->AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0(indvar_type, 1)), "one"); HloInstruction* add = while_body->AddInstruction( HloInstruction::CreateBinary(gte->shape(), HloOpcode::kAdd, gte, c1), "updated_loop_iteration_count"); absl::InlinedVector<HloInstruction*, 2> old_return_tuple_operands = while_body->root_instruction()->operands(); std::vector<HloInstruction*> new_return_tuple_operands( old_return_tuple_operands.begin(), old_return_tuple_operands.end()); new_return_tuple_operands.push_back(add); HloInstruction* new_return_tuple = while_body->AddInstruction( HloInstruction::CreateTuple(new_return_tuple_operands)); while_body->set_root_instruction(new_return_tuple, true); return gte; } std::unique_ptr<HloInstruction> GetAsConstantInstruction( const std::vector<Literal>& offset_values) { if (offset_values.empty()) return nullptr; std::unique_ptr<HloInstruction> value = primitive_util::PrimitiveTypeSwitch<std::unique_ptr<HloInstruction>>( [&offset_values]( auto primitive_type_constant) -> std::unique_ptr<HloInstruction> { if constexpr (primitive_util::IsIntegralType( primitive_type_constant)) { using NativeT = typename primitive_util::PrimitiveTypeToNative< primitive_type_constant>::type; Array<NativeT> constantLiterals({(int64_t)offset_values.size()}); std::vector<NativeT> valuesAsTy; valuesAsTy.reserve(offset_values.size()); for (auto& i : offset_values) { valuesAsTy.push_back( static_cast<NativeT>(i.data<NativeT>()[0])); } constantLiterals.SetValues(valuesAsTy); return HloInstruction::CreateConstant( LiteralUtil::CreateFromArray(constantLiterals)); } return nullptr; }, offset_values[0].shape().element_type()); return value; } bool PopulateOffsetValueMap(const HloInstruction* matched_instr, OffsetValueMap& value_map) { OffsetValueMap local_value_map; if (auto dyn_idx_op = DynCast<HloDynamicIndexInstruction>(matched_instr); dyn_idx_op) { for (auto indexop : dyn_idx_op->index_operands()) { if (indexop->IsConstant()) continue; if (local_value_map.contains(indexop) || value_map.contains(indexop)) continue; std::optional<std::vector<Literal>> values = GetValues(indexop); if (values == std::nullopt) return false; if (values->empty() || !primitive_util::IsIntegralType( values->at(0).shape().element_type())) { return false; } std::transform(values->begin(), values->end(), std::back_inserter(local_value_map[indexop]), [](Literal& l) { return std::move(l); }); } } for (auto& [op, values] : local_value_map) { std::transform(values.begin(), values.end(), std::back_inserter(value_map[op]), [](Literal& l) { return std::move(l); }); } VLOG(2) << "Received " << local_value_map.size() << " new offsets."; return true; } absl::Status ReplaceOffsetCalculationWithArrayAccess( PtrVec<HloInstruction*> fusions, OffsetValueMap& value_map) { absl::flat_hash_map<HloComputation*, HloInstruction*> loop_iteration_param; for (auto& [instr, _] : value_map) { VLOG(2) << "Handling " << instr->name(); if (!instr->parent()->IsWhileBodyComputation()) { VLOG(2) << "It is not a while body computation"; return absl::InternalError( absl::StrFormat("%s is expected to be a while computation.", instr->parent()->name())); } if (loop_iteration_param.find(instr->parent()) != loop_iteration_param.end()) { VLOG(2) << "This was already handled"; continue; } VLOG(2) << "Adding loop iteration param for " << instr->parent()->name(); TF_ASSIGN_OR_RETURN( loop_iteration_param[instr->parent()], AddLoopIterationParam(instr->parent()->WhileCallInstruction())); } for (auto fusion_instr : fusions) { for (auto maybe_offset : fusion_instr->operands()) { if (value_map.find(maybe_offset) == value_map.end()) continue; HloInstruction* loop_counter = loop_iteration_param[fusion_instr->parent()]; HloComputation* fusion = fusion_instr->fused_instructions_computation(); loop_iteration_param[fusion] = fusion_instr->AddFusionOperand(loop_counter); break; } } for (auto fusion_instr : fusions) { absl::flat_hash_map<HloInstruction*, HloInstruction*> param_replacement_map; absl::InlinedVector<HloInstruction*, 4> parameters; HloComputation* fusion_comp = fusion_instr->fused_instructions_computation(); for (auto [idx, maybe_offset] : llvm::enumerate(fusion_instr->operands())) { HloInstruction* offset_param = fusion_instr->fused_instructions_computation()->parameter_instruction( idx); if (value_map.find(maybe_offset) == value_map.end() || param_replacement_map.contains(offset_param)) continue; std::vector<Literal>& values = value_map.at(maybe_offset); std::unique_ptr<HloInstruction> values_as_const_instruction = GetAsConstantInstruction(values); if (values_as_const_instruction == nullptr) { return absl::InternalError( "Unable to convert offsets into constant array."); } HloInstruction* array = fusion_comp->AddInstruction( std::move(values_as_const_instruction), "offset_values"); HloInstruction* ds = fusion_comp->AddInstruction(HloInstruction::CreateDynamicSlice( ShapeUtil::MakeShape(offset_param->shape().element_type(), {1}), array, {loop_iteration_param[fusion_comp]}, {1})); HloInstruction* offset = fusion_comp->AddInstruction( HloInstruction::CreateReshape(offset_param->shape(), ds), "offset"); param_replacement_map[offset_param] = offset; parameters.push_back(offset_param); } for (auto param = parameters.rbegin(); param != parameters.rend(); param++) { auto offset = param_replacement_map[*param]; TF_RETURN_IF_ERROR(fusion_comp->ReplaceInstruction(*param, offset)); } } return absl::OkStatus(); } UseDefDataflowPaths GetSlicedOperandPaths(const HloInstruction* instr, OffsetValueMap& value_map) { UseDefDataflowPaths sliced_operand_paths; InstructionSet processed_instrs; std::vector<std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>> aliasing_pairs; if (instr->opcode() == HloOpcode::kCustomCall) { aliasing_pairs = Cast<HloCustomCallInstruction>(instr)->output_to_operand_aliasing(); } absl::flat_hash_set<int64_t> aliased_operands; for (const auto& pair : aliasing_pairs) { aliased_operands.insert(pair.second.first); } for (const auto* operand : instr->operands()) { if (aliased_operands.contains(instr->operand_index(operand))) continue; UseDefDataflowPath maybe_sliced_operand_path; bool slice_found = false; auto maybe_slice_instr = HloBfsFindIf({operand}, [&](const HloInstruction* cur) { if (processed_instrs.contains(cur)) return true; maybe_sliced_operand_path.push_back(const_cast<HloInstruction*>(cur)); if (IsOpcodeAnyOf<HloOpcode::kDynamicSlice, HloOpcode::kSlice>(cur)) { if (IsAlignedSlice(cur)) { slice_found = true; return slice_found; } } return !IsNoOp(cur); }); if (maybe_slice_instr == std::nullopt) continue; bool valid_slice_status = PopulateOffsetValueMap(*maybe_slice_instr, value_map); if ((valid_slice_status && slice_found) || processed_instrs.contains(maybe_slice_instr.value())) { sliced_operand_paths.insert(sliced_operand_paths.end(), maybe_sliced_operand_path.rbegin(), maybe_sliced_operand_path.rend()); processed_instrs.insert(maybe_sliced_operand_path.begin(), maybe_sliced_operand_path.end()); } } sliced_operand_paths.push_back(const_cast<HloInstruction*>(instr)); return sliced_operand_paths; } DefUseDataflowPaths GetSlicedUserPaths(const HloInstruction* instr, OffsetValueMap& value_map) { DefUseDataflowPaths sliced_user_paths; InstructionSet processed_instrs; auto traverse_hlo_and_collect = [&](HloInstruction* start) { DefUseDataflowPath maybe_sliced_user_path; bool dus_found = false; auto maybe_dus_instr = HloBfsFindIf( {start}, [&](const HloInstruction* cur) { if (processed_instrs.contains(cur)) return true; maybe_sliced_user_path.push_back(const_cast<HloInstruction*>(cur)); if (const auto slice_instr = DynCast<HloDynamicUpdateSliceInstruction>(cur)) { if (IsAlignedSlice(slice_instr)) { dus_found = true; return true; } } return cur->user_count() > 1 || !IsNoOp(cur); }, false); if (maybe_dus_instr == std::nullopt) return; bool valid_slice_status = PopulateOffsetValueMap(*maybe_dus_instr, value_map); if ((valid_slice_status && dus_found) || processed_instrs.contains(maybe_dus_instr.value())) { processed_instrs.insert(maybe_sliced_user_path.begin(), maybe_sliced_user_path.end()); sliced_user_paths.push_back(std::move(maybe_sliced_user_path)); } }; if (instr->shape().IsTuple()) { for (auto* user : instr->users()) { if (DynCast<HloGetTupleElementInstruction>(user)) { traverse_hlo_and_collect(user); } } } else { if (instr->user_count() == 1) { traverse_hlo_and_collect(instr->users().front()); } } return sliced_user_paths; } absl::InlinedVector<HloInstruction*, 4> GetPatternCaptures( DataflowPathView matches) { absl::InlinedVector<HloInstruction*, 4> captures; InstructionSet matched_instrs(matches.begin(), matches.end()); for (HloInstruction* instr : matches) { for (HloInstruction* operand : instr->operands()) { if (!matched_instrs.contains(operand) && absl::c_find(captures, operand) == captures.end()) { captures.emplace_back(operand); } } } return captures; } absl::Status CreateRootTuple( HloInstruction* hero, HloComputation::Builder& builder, DataflowPathsView sliced_user_paths, absl::flat_hash_map<const HloInstruction*, HloInstruction*>& instr_mapping) { unsigned tuple_size = hero->shape().tuple_shapes_size(); std::vector<HloInstruction*> sliced_elems(tuple_size, nullptr); for (auto& sliced_user_path : sliced_user_paths) { auto gte = Cast<HloGetTupleElementInstruction>(sliced_user_path.front()); sliced_elems[gte->tuple_index()] = sliced_user_path.back(); } std::vector<HloInstruction*> elements; for (size_t i = 0; i < tuple_size; ++i) { if (sliced_elems[i] != nullptr) { elements.push_back(instr_mapping[sliced_elems[i]]); continue; } auto* gte = builder.AddInstruction( HloInstruction::CreateGetTupleElement(instr_mapping[hero], i)); if (hero->shape().tuple_shapes(i).IsTuple()) { instr_mapping[gte] = gte; TF_RETURN_IF_ERROR(CreateRootTuple(gte, builder, {}, instr_mapping)); elements.push_back(builder.last_added_instruction()); } else { elements.push_back(gte); } } if (elements.size() > 1) builder.AddInstruction(HloInstruction::CreateTuple(elements)); return absl::OkStatus(); } absl::StatusOr<HloComputation*> CreateFusionBody( HloModule* module, DataflowPathView sliced_operand_paths, DataflowPathsView sliced_user_paths, DataflowPathView captures) { HloComputation::Builder builder("dynamic-slice-fusion"); absl::flat_hash_map<const HloInstruction*, HloInstruction*> instr_mapping; auto mapped_operands = [&](HloInstruction* instr) { absl::InlinedVector<HloInstruction*, 4> operands; for (HloInstruction* operand : instr->operands()) { operands.push_back(instr_mapping.at(operand)); } return operands; }; for (const HloInstruction* capture : captures) { int64_t index = instr_mapping.size(); instr_mapping[capture] = builder.AddInstruction(HloInstruction::CreateParameter( index, capture->shape(), absl::StrCat("p", index))); } HloInstruction* hero; for (HloInstruction* instr : sliced_operand_paths) { instr_mapping[instr] = builder.AddInstruction( instr->CloneWithNewOperands(instr->shape(), mapped_operands(instr))); hero = instr; } for (auto& sliced_user_path : sliced_user_paths) { for (HloInstruction* instr : sliced_user_path) { instr_mapping[instr] = builder.AddInstruction( instr->CloneWithNewOperands(instr->shape(), mapped_operands(instr))); } } if (hero->shape().IsTuple() && hero->shape().tuple_shapes_size() > 0) { TF_RETURN_IF_ERROR( CreateRootTuple(hero, builder, sliced_user_paths, instr_mapping)); } return module->AddComputationAndUnifyNamesAndIds(builder.Build(), false); } absl::StatusOr<HloInstruction*> CreateFusionInstruction( HloModule* module, HloInstruction* orig, DataflowPathView captures, HloComputation* body, bool dynamic) { HloComputation* parent = orig->parent(); HloInstruction* fusion = parent->AddInstruction(HloInstruction::CreateFusion( body->root_instruction()->shape(), HloInstruction::FusionKind::kCustom, captures, body)); module->SetAndUniquifyInstrName(fusion, "address_computation"); GpuBackendConfig gpu_config; FusionBackendConfig& backend_config = *gpu_config.mutable_fusion_backend_config(); backend_config.set_kind("__custom_fusion"); CustomFusionConfig config; config.set_name(dynamic ? "dynamic_address_computation" : "address_computation"); *backend_config.mutable_custom_fusion_config() = config; TF_RETURN_IF_ERROR(fusion->set_backend_config(std::move(gpu_config))); return fusion; } } absl::StatusOr<bool> DynamicSliceFusionRewriter::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { absl::flat_hash_map<HloInstruction*, std::pair<UseDefDataflowPaths, DefUseDataflowPaths>> matches_kv; std::vector<HloInstruction*> matches; OffsetValueMap value_map; for (HloComputation* computation : module->computations()) { if (computation->IsFusionComputation()) continue; for (HloInstruction* instr : computation->instructions()) { if ((instr->opcode() == HloOpcode::kReduceScatter && instr->shape().IsArray()) || IsLegacyCublasMatmul(*instr) || IsCustomCall(instr, platform_name_)) { UseDefDataflowPaths sliced_operand_paths = GetSlicedOperandPaths(instr, value_map); VLOG(1) << "For operation: " << instr->name() << ", operands: " << absl::StrJoin( sliced_operand_paths, ",", [](std::string* out, const HloInstruction* inst) { out->append(inst->name()); }); bool has_sliced_operand_paths = sliced_operand_paths.size() > 1; DefUseDataflowPaths sliced_user_paths = GetSlicedUserPaths(instr, value_map); VLOG(1) << "For operation: " << instr->name() << ", users: " << absl::StrJoin( sliced_user_paths, ",", [](std::string* out, const DefUseDataflowPath& path) { out->append( "{" + absl::StrJoin(path, ",", [](std::string* out, const HloInstruction* inst) { out->append(inst->name()); }) + "}"); }); bool has_sliced_user_paths = absl::c_any_of( sliced_user_paths, [&](auto& sliced_user_path) { return !sliced_user_path.empty(); }); if (absl::c_any_of(sliced_user_paths, [&](auto& sliced_user_path) { return DynCast<HloDynamicUpdateSliceInstruction>( sliced_user_path.back()) == nullptr; })) { return absl::InternalError( "Expect sliced user path to end with a DUS."); } if (has_sliced_operand_paths || has_sliced_user_paths) { matches_kv[instr] = std::make_pair(std::move(sliced_operand_paths), std::move(sliced_user_paths)); matches.push_back(instr); } } } } if (matches.empty()) return false; PtrVec<HloInstruction*> fusions; for (HloInstruction* hero : matches) { auto& paths = matches_kv[hero]; auto& [sliced_operand_paths, sliced_user_paths] = paths; std::vector<HloInstruction*> matched_instrs; absl::c_copy(sliced_operand_paths, std::back_inserter(matched_instrs)); std::vector<DataflowPathView> sliced_user_paths_view; for (auto& sliced_user_path : sliced_user_paths) { absl::c_copy(sliced_user_path, std::back_inserter(matched_instrs)); DataflowPathView sliced_user_path_view{&sliced_user_path.front(), sliced_user_path.size()}; sliced_user_paths_view.push_back(std::move(sliced_user_path_view)); } auto captures = GetPatternCaptures(matched_instrs); TF_ASSIGN_OR_RETURN( HloComputation * fusion_body, CreateFusionBody(module, sliced_operand_paths, DataflowPathsView(sliced_user_paths_view), captures)); bool has_dynamic_slices = absl::c_any_of(matched_instrs, [&](auto* instr) { return DynCast<HloDynamicIndexInstruction>(instr) != nullptr; }); TF_ASSIGN_OR_RETURN( HloInstruction * fusion, CreateFusionInstruction(module, hero, captures, fusion_body, has_dynamic_slices)); fusions.push_back(fusion); HloComputation* parent = hero->parent(); if (fusion->shape().IsTuple()) { TF_RETURN_IF_ERROR(parent->ReplaceInstructionWithDifferentShape( const_cast<HloInstruction*>(hero), fusion)); for (auto& sliced_user_path : sliced_user_paths) { auto old_gte = Cast<HloGetTupleElementInstruction>(sliced_user_path.front()); HloInstruction* gte = parent->AddInstruction(HloInstruction::CreateGetTupleElement( fusion, old_gte->tuple_index())); TF_RETURN_IF_ERROR( parent->ReplaceInstruction(sliced_user_path.back(), gte)); } } else { auto* instr_to_be_replaced = const_cast<HloInstruction*>(hero); if (sliced_user_paths.empty()) { if (hero->shape().IsTuple()) { if (hero->user_count() != 1 || !DynCast<HloGetTupleElementInstruction>(hero->users().front())) { return absl::InternalError( "Expect a single get-tuple-element user of the original " "tuple-shaped hero op when address computation fusion does " "not return a tuple"); } instr_to_be_replaced = hero->users().front(); } } else { instr_to_be_replaced = sliced_user_paths.front().back(); } TF_RETURN_IF_ERROR( parent->ReplaceInstruction(instr_to_be_replaced, fusion)); if (hero->parent()) { TF_RETURN_IF_ERROR(hero->parent()->RemoveInstruction(hero)); } } } TF_RETURN_IF_ERROR( ReplaceOffsetCalculationWithArrayAccess(fusions, value_map)); return true; } } }
#include "xla/service/gpu/transforms/dynamic_slice_fusion_rewriter.h" #include <cstddef> #include <optional> #include "absl/status/status.h" #include "xla/client/lib/constants.h" #include "xla/client/xla_builder.h" #include "xla/ffi/ffi.h" #include "xla/ffi/ffi_api.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_schedule.h" #include "xla/service/custom_call_target_registry.h" #include "xla/service/gpu/gpu_device_info_for_tests.h" #include "xla/service/hlo_module_config.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/stream_executor/device_memory.h" #include "xla/stream_executor/gpu/gpu_types.h" #include "xla/stream_executor/stream.h" #include "xla/tests/filecheck.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/statusor.h" #include "tsl/platform/test.h" namespace xla::gpu { class DynamicSliceFusionRewriterTest : public HloTestBase {}; TEST_F(DynamicSliceFusionRewriterTest, SimpleGemm) { const char* hlo = R"( HloModule test ENTRY %main.9 { %p0 = f16[2,8,8]{2,1,0} parameter(0) %p1 = f16[2,8,8]{2,1,0} parameter(1) %slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[1:2], [0:8], [0:8]} %bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13) %slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]} %bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14) ROOT %custom-call.1 = f16[8,8]{1,0} custom-call(%bitcast.41, %bitcast.42), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} } )"; const char* expected = R"( ; CHECK: %dynamic-slice-fusion{{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1) ; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]]) ; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]]) ; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]), ; CHECK: custom_call_target="__cublas$gemm" ; CHECK: } ; CHECK: ENTRY %main{{.*}} { ; CHECK: ROOT [[FUSION:%[^ ]+]] = f16[8,8]{1,0} fusion ; CHECK: kind=kCustom, calls=%dynamic-slice-fusion, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"address_computation","kernel_index":0} ; CHECK: } ; CHECK: } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected); } TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmWithWorkspace) { const char* hlo = R"( HloModule test ENTRY %main.9 { %p0 = f16[2,8,8]{2,1,0} parameter(0) %p1 = f16[2,8,8]{2,1,0} parameter(1) %slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[1:2], [0:8], [0:8]} %bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13) %slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]} %bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14) ROOT %custom-call.1 = (f16[8,8]{1,0}, s8[256]{0}) custom-call(%bitcast.41, %bitcast.42), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} } )"; const char* expected = R"( ; CHECK: %dynamic-slice-fusion{{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1) ; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]]) ; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]]) ; CHECK: [[CC:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0}) custom-call([[B0]], [[B1]]), ; CHECK: custom_call_target="__cublas$gemm" ; CHECK: [[DOT:%[^ ]+]] = f16[8,8]{1,0} get-tuple-element([[CC]]), index=0 ; CHECK: [[WORKSPACE:%[^ ]+]] = s8[256]{0} get-tuple-element([[CC]]), index=1 ; CHECK: ROOT [[TUPLE:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0}) ; CHECK: tuple([[DOT]], [[WORKSPACE]]) ; CHECK: } ; CHECK: ENTRY %main{{.*}} { ; CHECK: ROOT [[FUSION:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0}) fusion ; CHECK: kind=kCustom, calls=%dynamic-slice-fusion, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"address_computation","kernel_index":0} ; CHECK: } ; CHECK: } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected); } TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmWorkspaceIgnored) { const char* hlo = R"( HloModule test ENTRY %main.9 { %p0 = f16[2,8,8]{2,1,0} parameter(0) %p1 = f16[2,8,8]{2,1,0} parameter(1) %slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[1:2], [0:8], [0:8]} %bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13) %slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]} %bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14) %custom-call.1 = (f16[8,8]{1,0}, s8[256]{0}) custom-call(%bitcast.41, %bitcast.42), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} ROOT %get-tuple-element.0 = f16[8,8]{1,0} get-tuple-element(%custom-call.1), index=0 } )"; const char* expected = R"( ; CHECK: %dynamic-slice-fusion{{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1) ; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]]) ; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]]) ; CHECK: [[CC:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0}) custom-call([[B0]], [[B1]]), ; CHECK: custom_call_target="__cublas$gemm" ; CHECK: [[DOT:%[^ ]+]] = f16[8,8]{1,0} get-tuple-element([[CC]]), index=0 ; CHECK: [[WORKSPACE:%[^ ]+]] = s8[256]{0} get-tuple-element([[CC]]), index=1 ; CHECK: ROOT [[TUPLE:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0}) ; CHECK: tuple([[DOT]], [[WORKSPACE]]) ; CHECK: } ; CHECK: ENTRY %main{{.*}} { ; CHECK: [[FUSION:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0}) fusion ; CHECK: kind=kCustom, calls=%dynamic-slice-fusion, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"address_computation","kernel_index":0} ; CHECK: } ; CHECK: ROOT [[DOT_MAIN:%[^ ]+]] = f16[8,8]{1,0} get-tuple-element([[FUSION]]), index=0 ; CHECK: } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected); } TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmNotRoot) { const char* hlo = R"( HloModule test ENTRY %main.9 { %p0 = f16[2,8,8]{2,1,0} parameter(0) %p1 = f16[2,8,8]{2,1,0} parameter(1) %slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[1:2], [0:8], [0:8]} %bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13) %slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]} %bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14) %custom-call.1 = f16[8,8]{1,0} custom-call(%bitcast.41, %bitcast.42), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} ROOT %res = f16[8,8]{1,0} add(%custom-call.1, %custom-call.1) } )"; const char* expected = R"( ; CHECK: %dynamic-slice-fusion{{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1) ; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]]) ; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]]) ; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]), ; CHECK: custom_call_target="__cublas$gemm" ; CHECK: } ; CHECK: ENTRY %main{{.*}} { ; CHECK: [[FUSION:%[^ ]+]] = f16[8,8]{1,0} fusion ; CHECK: kind=kCustom, calls=%dynamic-slice-fusion, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"address_computation","kernel_index":0} ; CHECK: } ; CHECK: ROOT {{.*}} = f16[8,8]{1,0} add([[FUSION]], [[FUSION]]) ; CHECK: } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected); } TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmOperandHasMultipleUsers) { const char* hlo = R"( HloModule test ENTRY %main.9 { %p0 = f16[2,8,8]{2,1,0} parameter(0) %p1 = f16[4,8,8]{2,1,0} parameter(1) %slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[1:2], [0:8], [0:8]} %bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13) %slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[2:3], [0:8], [0:8]} %bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14) %custom-call.1 = f16[8,8]{1,0} custom-call(%bitcast.41, %bitcast.42), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} ROOT %res = f16[8,8]{1,0} add(%custom-call.1, %bitcast.41) } )"; const char* expected = R"( ; CHECK: %dynamic-slice-fusion{{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f16[4,8,8]{2,1,0} parameter(1) ; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]]) ; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[2:3], [0:8], [0:8]} ; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]]) ; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]), ; CHECK: custom_call_target="__cublas$gemm" ; CHECK: } ; CHECK: ENTRY %main{{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f16[4,8,8]{2,1,0} parameter(1) ; CHECK-DAG: [[FUSION:%[^ ]+]] = f16[8,8]{1,0} fusion([[P0]], [[P1]]) ; CHECK-DAG: kind=kCustom, calls=%dynamic-slice-fusion, ; CHECK-DAG: backend_config={ ; CHECK-DAG: "kind":"__custom_fusion", ; CHECK-DAG: "custom_fusion_config":{"name":"address_computation","kernel_index":0} ; CHECK-DAG: } ; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]]) ; CHECK: ROOT {{.*}} = f16[8,8]{1,0} add([[FUSION]], [[B0]]) ; CHECK: } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected); } TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmOperandsHaveMultipleUsers) { const char* hlo = R"( HloModule test ENTRY %main.9 { %p0 = f16[2,8,8]{2,1,0} parameter(0) %p1 = f16[2,8,8]{2,1,0} parameter(1) %slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[1:2], [0:8], [0:8]} %bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13) %slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]} %bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14) %custom-call.0 = f16[8,8]{1,0} custom-call(%bitcast.41, %bitcast.42), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} ROOT %custom-call.1 = f16[8,8]{1,0} custom-call(%bitcast.42, %bitcast.41), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} } )"; const char* expected = R"( ; CHECK: %dynamic-slice-fusion{{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1) ; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]]) ; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]]) ; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]), ; CHECK: custom_call_target="__cublas$gemm" ; CHECK: } ; CHECK: %dynamic-slice-fusion{{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1) ; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]]) ; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]]) ; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]), ; CHECK: custom_call_target="__cublas$gemm" ; CHECK: } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected); } TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmSlicingNotParameter) { const char* hlo = R"( HloModule test ENTRY %main.9 { %p0 = f16[4,8,8]{2,1,0} parameter(0) %p1 = f16[2,8,8]{2,1,0} parameter(1) %slice.12 = f16[2,8,8]{2,1,0} slice(%p0), slice={[0:2], [0:8], [0:8]} %slice.13 = f16[1,8,8]{2,1,0} slice(%slice.12), slice={[1:2], [0:8], [0:8]} %bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13) %slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]} %bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14) %custom-call.1 = f16[8,8]{1,0} custom-call(%bitcast.41, %bitcast.42), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} ROOT %res = f16[8,8]{1,0} add(%custom-call.1, %custom-call.1) } )"; const char* expected = R"( ; CHECK: %dynamic-slice-fusion{{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1) ; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]]) ; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]]) ; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]), ; CHECK: custom_call_target="__cublas$gemm" ; CHECK: } ; CHECK: ENTRY %main{{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[4,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[S0:%[^ ]+]] = f16[2,8,8]{2,1,0} slice([[P0]]), slice={[0:2], [0:8], [0:8]} ; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1) ; CHECK: [[FUSION:%[^ ]+]] = f16[8,8]{1,0} fusion([[S0]], [[P1]]) ; CHECK: kind=kCustom, calls=%dynamic-slice-fusion, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"address_computation","kernel_index":0} ; CHECK: } ; CHECK: ROOT {{.*}} = f16[8,8]{1,0} add([[FUSION]], [[FUSION]]) ; CHECK: } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected); } TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmNotContiguousSlice) { const char* hlo = R"( HloModule test ENTRY %main.9 { %p0 = f16[2,8,8]{2,1,0} parameter(0) %p1 = f16[2,8,8]{2,1,0} parameter(1) %slice.13 = f16[1,4,6]{2,1,0} slice(%p0), slice={[1:2], [0:4], [0:6]} %bitcast.41 = f16[4,6]{1,0} bitcast(%slice.13) %slice.14 = f16[1,6,4]{2,1,0} slice(%p1), slice={[1:2], [0:6], [0:4]} %bitcast.42 = f16[6,4]{1,0} bitcast(%slice.14) ROOT %custom-call.1 = f16[4,4]{1,0} custom-call(%bitcast.41, %bitcast.42), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), std::nullopt); } TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmNonNoOpInSliceChain) { const char* hlo = R"( HloModule test ENTRY %main.9 { %p0 = f16[2,8,8]{2,1,0} parameter(0) %p1 = f16[2,8,8]{2,1,0} parameter(1) %slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[0:1], [0:8], [0:8]} %slice.14 = f16[1,8,8]{2,1,0} slice(%p0), slice={[1:2], [0:8], [0:8]} %add.0 = f16[1,8,8]{2,1,0} add(%slice.13, %slice.14) %bitcast.41 = f16[8,8]{1,0} bitcast(%add.0) %slice.15 = f16[1,8,8]{2,1,0} slice(%p1), slice={[0:1], [0:8], [0:8]} %slice.16 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]} %add.1 = f16[1,8,8]{2,1,0} add(%slice.15, %slice.16) %bitcast.42 = f16[8,8]{1,0} bitcast(%add.1) ROOT %custom-call.1 = f16[8,8]{1,0} custom-call(%bitcast.41, %bitcast.42), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), std::nullopt); } TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmDuplicateOperand) { const char* hlo = R"( HloModule test ENTRY %main { %p0 = (f32[100,100]{1,0}, f32[100,100]{1,0}) parameter(0) %get-tuple-element.240 = f32[100,100]{1,0} get-tuple-element(%p0), index=0 %get-tuple-element.241 = f32[100,100]{1,0} get-tuple-element(%p0), index=1 %concatenate.10 = f32[200,100]{1,0} concatenate(%get-tuple-element.240, %get-tuple-element.241), dimensions={0} %custom-call.16 = (f32[200,100]{1,0}, s8[120000]{0}) custom-call(%concatenate.10, %get-tuple-element.240), custom_call_target="__cublas$gemm", backend_config={ "gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["HIGHEST","HIGHEST"]}, "epilogue":"DEFAULT", "lhs_stride":"20000", "rhs_stride":"10000", "grad_x":false, "grad_y":false } } %get-tuple-element.97 = f32[200,100]{1,0} get-tuple-element(%custom-call.16), index=0 %slice.26 = f32[100,100]{1,0} slice(%get-tuple-element.97), slice={[0:100], [0:100]} ROOT %custom-call.17 = (f32[100,100]{1,0}, s8[80000]{0}) custom-call(%slice.26, %slice.26), custom_call_target="__cublas$gemm", backend_config={ "gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["HIGHEST","HIGHEST"]}, "epilogue":"DEFAULT", "lhs_stride":"10000", "rhs_stride":"10000", "grad_x":false, "grad_y":false } } })"; const char* expected = R"( ; CHECK: %dynamic-slice-fusion{{.*}} { ; CHECK: [[P0:%[^ ]+]] = f32[200,100]{1,0} parameter(0) ; CHECK: [[S0:%[^ ]+]] = f32[100,100]{1,0} slice([[P0]]), slice={[0:100], [0:100]} ; CHECK-NOT: slice ; CHECK: [[CC:%[^ ]+]] = (f32[100,100]{1,0}, s8[80000]{0}) custom-call([[S0]], [[S0]]), ; CHECK: custom_call_target="__cublas$gemm" ; CHECK: } ; CHECK: ENTRY %main{{.*}} { ; CHECK: ROOT [[FUSION:%[^ ]+]] = (f32[100,100]{1,0}, s8[80000]{0}) fusion ; CHECK: kind=kCustom, calls=%dynamic-slice-fusion, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"address_computation","kernel_index":0} ; CHECK: } ; CHECK: } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected); } TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmReverseOperandOrder) { const char* hlo = R"( HloModule test ENTRY %main.9 { %p0 = f16[2,8,8]{2,1,0} parameter(1) %slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[0:1], [0:8], [0:8]} %bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13) %p1 = f16[2,8,8]{2,1,0} parameter(0) %slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]} %bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14) ROOT %custom-call.1 = f16[8,8]{1,0} custom-call(%bitcast.41, %bitcast.42), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} } )"; const char* expected = R"( ; CHECK: %dynamic-slice-fusion{{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1) ; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[0:1], [0:8], [0:8]} ; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]]) ; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]]) ; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]), ; CHECK: custom_call_target="__cublas$gemm" ; CHECK: } ; CHECK: ENTRY %main{{.*}} { ; CHECK-DAG: [[A0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1) ; CHECK-DAG: [[A1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK: ROOT [[FUSION:%[^ ]+]] = f16[8,8]{1,0} fusion([[A0]], [[A1]]) ; CHECK: kind=kCustom, calls=%dynamic-slice-fusion, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"address_computation","kernel_index":0} ; CHECK: } ; CHECK: } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected); } TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmReverseOperandOrder2) { const char* hlo = R"( HloModule test ENTRY %main.9 { %p0 = f16[2,8,8]{2,1,0} parameter(0) %slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[0:1], [0:8], [0:8]} %bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13) %p1 = f16[2,8,8]{2,1,0} parameter(1) %slice.14 = f16[1,8,8]{2,1,0} slice(%p1), slice={[1:2], [0:8], [0:8]} %bitcast.42 = f16[8,8]{1,0} bitcast(%slice.14) ROOT %custom-call.1 = f16[8,8]{1,0} custom-call(%bitcast.42, %bitcast.41), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} } )"; const char* expected = R"( ; CHECK: %dynamic-slice-fusion{{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1) ; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[1:2], [0:8], [0:8]} ; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]]) ; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P1]]), slice={[0:1], [0:8], [0:8]} ; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]]) ; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]), ; CHECK: custom_call_target="__cublas$gemm" ; CHECK: } ; CHECK: ENTRY %main{{.*}} { ; CHECK-DAG: [[A0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(1) ; CHECK-DAG: [[A1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK: ROOT [[FUSION:%[^ ]+]] = f16[8,8]{1,0} fusion([[A0]], [[A1]]) ; CHECK: kind=kCustom, calls=%dynamic-slice-fusion, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"address_computation","kernel_index":0} ; CHECK: } ; CHECK: } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected); } TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmOperandAliasingOutput) { const char* hlo = R"( HloModule test ENTRY %main.9 { %p0 = (f32[100,100]{1,0}, f32[100,100]{1,0}) parameter(0) %get-tuple-element.287 = f32[100,100]{1,0} get-tuple-element(%p0), index=0 %get-tuple-element.288 = f32[100,100]{1,0} get-tuple-element(%p0), index=1 %concatenate.12 = f32[200,100]{1,0} concatenate(%get-tuple-element.287, %get-tuple-element.288), dimensions={0} %slice.30 = f32[100,100]{1,0} slice(%concatenate.12), slice={[16:116], [0:100]} %slice.34 = f32[100,100]{1,0} slice(%concatenate.12), slice={[99:199], [0:100]} ROOT %cublas-gemm.15 = (f32[100,100]{1,0}, s8[120000]{0}) custom-call(%get-tuple-element.287, %slice.30, %slice.34), custom_call_target="__cublas$gemm", output_to_operand_aliasing={{0}: (2, {})}, backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":1, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["HIGHEST","HIGHEST"]}, "epilogue":"DEFAULT", "lhs_stride":"10000", "rhs_stride":"10000", "grad_x":false, "grad_y":false }} } )"; const char* expected = R"( ; CHECK: %dynamic-slice-fusion{{.*}} { ; CHECK-DAG: [[P2:%[^ ]+]] = f32[100,100]{1,0} parameter(2) ; CHECK-DAG: [[P1:%[^ ]+]] = f32[100,100]{1,0} parameter(1) ; CHECK-DAG: [[P0:%[^ ]+]] = f32[200,100]{1,0} parameter(0) ; CHECK-DAG: [[S1:%[^ ]+]] = f32[100,100]{1,0} slice([[P0]]), slice={[16:116], [0:100]} ; CHECK: [[CC:%[^ ]+]] = (f32[100,100]{1,0}, s8[120000]{0}) custom-call([[P1]], [[S1]], [[P2]]), ; CHECK: custom_call_target="__cublas$gemm" ; CHECK: } ; CHECK: ENTRY %main{{.*}} { ; CHECK: [[P:%[^ ]+]] = (f32[100,100]{1,0}, f32[100,100]{1,0}) parameter(0) ; CHECK: [[GTE0:%[^ ]+]] = f32[100,100]{1,0} get-tuple-element([[P]]), index=0 ; CHECK: [[GTE1:%[^ ]+]] = f32[100,100]{1,0} get-tuple-element([[P]]), index=1 ; CHECK: [[CONCAT:%[^ ]+]] = f32[200,100]{1,0} concatenate([[GTE0]], [[GTE1]]), dimensions={0} ; CHECK: [[S:%[^ ]+]] = f32[100,100]{1,0} slice([[CONCAT]]), slice={[99:199], [0:100]} ; CHECK: ROOT [[FUSION:%[^ ]+]] = (f32[100,100]{1,0}, s8[120000]{0}) fusion([[CONCAT]], [[GTE0]], [[S]]) ; CHECK: kind=kCustom, calls=%dynamic-slice-fusion, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"address_computation","kernel_index":0} ; CHECK: } ; CHECK: } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected); } TEST_F(DynamicSliceFusionRewriterTest, SimpleGemmOperandsFromSameSlice) { const char* hlo = R"( HloModule test ENTRY %main.9 { %p0 = f16[2,8,8]{2,1,0} parameter(0) %slice.13 = f16[1,8,8]{2,1,0} slice(%p0), slice={[0:1], [0:8], [0:8]} %bitcast.41 = f16[8,8]{1,0} bitcast(%slice.13) %bitcast.42 = f16[8,8]{0,1} bitcast(%slice.13) ROOT %custom-call.1 = f16[8,8]{1,0} custom-call(%bitcast.41, %bitcast.42), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} } )"; const char* expected = R"( ; CHECK: %dynamic-slice-fusion{{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} slice([[P0]]), slice={[0:1], [0:8], [0:8]} ; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]]) ; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{0,1} bitcast([[S0]]) ; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]), ; CHECK: custom_call_target="__cublas$gemm" ; CHECK: } ; CHECK: ENTRY %main{{.*}} { ; CHECK-DAG: [[A0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK: ROOT [[FUSION:%[^ ]+]] = f16[8,8]{1,0} fusion([[A0]]) ; CHECK: kind=kCustom, calls=%dynamic-slice-fusion, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"address_computation","kernel_index":0} ; CHECK: } ; CHECK: } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected); } static absl::Status Memcpy(se::Stream* stream, ffi::AnyBuffer src, ffi::AnyBuffer dst) { se::DeviceMemoryBase dst_mem = dst.device_memory(); se::DeviceMemoryBase src_mem = src.device_memory(); return stream->MemcpyD2D(&dst_mem, src_mem, src_mem.size()); } XLA_FFI_DEFINE_HANDLER(kMemcpy, Memcpy, ffi::Ffi::Bind() .Ctx<ffi::Stream>() .Arg<ffi::AnyBuffer>() .Arg<ffi::AnyBuffer>() ); XLA_FFI_REGISTER_HANDLER(ffi::GetXlaFfiApi(), "__xla_test$$memcpy", "gpu", kMemcpy); TEST_F(DynamicSliceFusionRewriterTest, SimpleCustomCall) { XlaBuilder b(TestName()); CustomCall(&b, "__xla_test$$memcpy", {Slice(Broadcast(ConstantR0WithType(&b, F32, 42.0), {256}), {0}, {128}, {1})}, ShapeUtil::MakeShape(F32, {128}), "", false, {}, nullptr, CustomCallSchedule::SCHEDULE_NONE, CustomCallApiVersion::API_VERSION_TYPED_FFI); TF_ASSERT_OK_AND_ASSIGN(auto computation, b.Build()); xla::HloModuleConfig hlo_config( xla::ProgramShape(computation.proto().host_program_shape()), false); DebugOptions debug_options = GetDebugOptionsForTest(); debug_options.set_xla_gpu_enable_dynamic_slice_fusion(false); hlo_config.set_debug_options(debug_options); TF_ASSERT_OK_AND_ASSIGN(auto hlo, xla::HloModule::CreateFromProto( computation.proto(), hlo_config)); const char* expected = R"( ; CHECK: %dynamic-slice-fusion{{.*}} { ; CHECK: [[P0:%[^ ]+]] = f32[256]{0} parameter(0) ; CHECK: [[S0:%[^ ]+]] = f32[128]{0} slice([[P0]]), slice={[0:128]} ; CHECK: ROOT [[CC:%[^ ]+]] = f32[128]{0} custom-call([[S0]]), ; CHECK: custom_call_target="__xla_test$$memcpy", ; CHECK: api_version=API_VERSION_TYPED_FFI ; CHECK: } ; CHECK: ENTRY %{{.*}} { ; CHECK: [[C0:%[^ ]+]] = f32[] constant(42) ; CHECK: [[BC:%[^ ]+]] = f32[256]{0} broadcast([[C0]]) ; CHECK: ROOT [[FUSION:%[^ ]+]] = f32[128]{0} fusion([[BC]]) ; CHECK: kind=kCustom, calls=%dynamic-slice-fusion, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"address_computation","kernel_index":0} ; CHECK: } ; CHECK: } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo->ToString(), DynamicSliceFusionRewriter("gpu"), expected); } void Callback_Void(se::gpu::GpuStreamHandle stream, void** buffers, const char* , size_t ) {} XLA_REGISTER_CUSTOM_CALL_TARGET(Callback_Void, "gpu"); TEST_F(DynamicSliceFusionRewriterTest, SimpleCustomCallLegacy) { XlaBuilder b(TestName()); CustomCall(&b, "Callback_Void", {Slice(Broadcast(ConstantR0WithType(&b, F32, 42.0), {256}), {0}, {128}, {1})}, ShapeUtil::MakeShape(F32, {128}), ""); TF_ASSERT_OK_AND_ASSIGN(auto computation, b.Build()); xla::HloModuleConfig hlo_config( xla::ProgramShape(computation.proto().host_program_shape()), false); DebugOptions debug_options = GetDebugOptionsForTest(); debug_options.set_xla_gpu_enable_dynamic_slice_fusion(false); hlo_config.set_debug_options(debug_options); TF_ASSERT_OK_AND_ASSIGN(auto hlo, xla::HloModule::CreateFromProto( computation.proto(), hlo_config)); const char* expected = R"( ; CHECK: %dynamic-slice-fusion{{.*}} { ; CHECK: [[P0:%[^ ]+]] = f32[256]{0} parameter(0) ; CHECK: [[S0:%[^ ]+]] = f32[128]{0} slice([[P0]]), slice={[0:128]} ; CHECK: ROOT [[CC:%[^ ]+]] = f32[128]{0} custom-call([[S0]]), ; CHECK: custom_call_target="Callback_Void" ; CHECK: } ; CHECK: ENTRY %{{.*}} { ; CHECK: [[C0:%[^ ]+]] = f32[] constant(42) ; CHECK: [[BC:%[^ ]+]] = f32[256]{0} broadcast([[C0]]) ; CHECK: ROOT [[FUSION:%[^ ]+]] = f32[128]{0} fusion([[BC]]) ; CHECK: kind=kCustom, calls=%dynamic-slice-fusion, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"address_computation","kernel_index":0} ; CHECK: } ; CHECK: } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo->ToString(), DynamicSliceFusionRewriter("gpu"), expected); } TEST_F(DynamicSliceFusionRewriterTest, TupleSliceCustomCallLegacy) { XlaBuilder b(TestName()); CustomCall( &b, "Callback_Void", { Tuple(&b, { Slice(Broadcast(ConstantR0WithType(&b, F32, 5), {8, 8}), {0, 0}, {4, 8}, {1, 1}), Broadcast(ConstantR0WithType(&b, F32, 2), {256}), }), Tuple(&b, { Broadcast(ConstantR0WithType(&b, F32, 3), {1024}), Broadcast(ConstantR0WithType(&b, F32, 4), {8}), }), }, ShapeUtil::MakeShape(F32, {128}), ""); TF_ASSERT_OK_AND_ASSIGN(auto computation, b.Build()); xla::HloModuleConfig hlo_config( xla::ProgramShape(computation.proto().host_program_shape()), false); DebugOptions debug_options = GetDebugOptionsForTest(); debug_options.set_xla_gpu_enable_dynamic_slice_fusion(false); hlo_config.set_debug_options(debug_options); TF_ASSERT_OK_AND_ASSIGN(auto hlo, xla::HloModule::CreateFromProto( computation.proto(), hlo_config)); const char* expected = R"( ; CHECK: %dynamic-slice-fusion{{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f32[8,8]{1,0} parameter(0) ; CHECK-DAG: [[S0:%[^ ]+]] = f32[4,8]{1,0} slice([[P0]]), slice={[0:4], [0:8]} ; CHECK-DAG: [[P1:%[^ ]+]] = f32[256]{0} parameter(1) ; CHECK-DAG: [[T0:%[^ ]+]] = (f32[4,8]{1,0}, f32[256]{0}) tuple([[S0]], [[P1]]) ; CHECK-DAG: [[P2:%[^ ]+]] = (f32[1024]{0}, f32[8]{0}) parameter(2) ; CHECK: ROOT [[CC:%[^ ]+]] = f32[128]{0} custom-call([[T0]], [[P2]]), ; CHECK: custom_call_target="Callback_Void" ; CHECK: } ; CHECK: ENTRY %{{.*}} { ; CHECK: ROOT [[FUSION:%[^ ]+]] = f32[128]{0} fusion( ; CHECK: kind=kCustom, calls=%dynamic-slice-fusion, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"address_computation","kernel_index":0} ; CHECK: } ; CHECK: } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo->ToString(), DynamicSliceFusionRewriter("gpu"), expected); } TEST_F(DynamicSliceFusionRewriterTest, TupledOutputCustomCallLegacy) { XlaBuilder b(TestName()); auto custom_call = CustomCall( &b, "Callback_Void", { Tuple(&b, { Slice(Broadcast(ConstantR0WithType(&b, F32, 5), {8, 8}), {0, 0}, {4, 8}, {1, 1}), Broadcast(ConstantR0WithType(&b, F32, 2), {256}), }), Tuple(&b, { Broadcast(ConstantR0WithType(&b, F32, 3), {1024}), Broadcast(ConstantR0WithType(&b, F32, 4), {8}), }), }, ShapeUtil::MakeTupleShape({ ShapeUtil::MakeShape(F32, {8}), ShapeUtil::MakeTupleShape({ ShapeUtil::MakeShape(F32, {128}), ShapeUtil::MakeShape(F32, {256}), }), ShapeUtil::MakeShape(F32, {1024}), ShapeUtil::MakeShape(F32, {4, 8}), }), ""); Tuple(&b, {GetTupleElement(GetTupleElement(custom_call, 1), 0), GetTupleElement(custom_call, 2)}); TF_ASSERT_OK_AND_ASSIGN(auto computation, b.Build()); xla::HloModuleConfig hlo_config( xla::ProgramShape(computation.proto().host_program_shape()), false); DebugOptions debug_options = GetDebugOptionsForTest(); debug_options.set_xla_gpu_enable_dynamic_slice_fusion(false); hlo_config.set_debug_options(debug_options); TF_ASSERT_OK_AND_ASSIGN(auto hlo, xla::HloModule::CreateFromProto( computation.proto(), hlo_config)); const char* expected = R"( ; CHECK: %dynamic-slice-fusion{{.*}} { ; CHECK-DAG: [[P2:%[^ ]+]] = (f32[1024]{0}, f32[8]{0}) parameter(2) ; CHECK-DAG: [[P1:%[^ ]+]] = f32[256]{0} parameter(1) ; CHECK-DAG: [[P0:%[^ ]+]] = f32[8,8]{1,0} parameter(0) ; CHECK-DAG: [[S0:%[^ ]+]] = f32[4,8]{1,0} slice([[P0]]), slice={[0:4], [0:8]} ; CHECK-DAG: [[T0:%[^ ]+]] = (f32[4,8]{1,0}, f32[256]{0}) tuple([[S0]], [[P1]]) ; CHECK: [[CC:%[^ ]+]] = (f32[8]{0}, (f32[128]{0}, f32[256]{0}), f32[1024]{0}, f32[4,8]{1,0}) custom-call([[T0]], [[P2]]), ; CHECK: custom_call_target="Callback_Void" ; CHECK-DAG: [[GTE0:%[^ ]+]] = f32[8]{0} get-tuple-element([[CC]]), index=0 ; CHECK-DAG: [[GTE1:%[^ ]+]] = (f32[128]{0}, f32[256]{0}) get-tuple-element([[CC]]), index=1 ; CHECK-DAG: [[GTE2:%[^ ]+]] = f32[128]{0} get-tuple-element([[GTE1]]), index=0 ; CHECK-DAG: [[GTE3:%[^ ]+]] = f32[256]{0} get-tuple-element([[GTE1]]), index=1 ; CHECK-DAG: [[T1:%[^ ]+]] = (f32[128]{0}, f32[256]{0}) tuple([[GTE2]], [[GTE3]]) ; CHECK-DAG: [[GTE4:%[^ ]+]] = f32[1024]{0} get-tuple-element([[CC]]), index=2 ; CHECK-DAG: [[GTE5:%[^ ]+]] = f32[4,8]{1,0} get-tuple-element([[CC]]), index=3 ; CHECK: ROOT {{.*}} = (f32[8]{0}, (f32[128]{0}, f32[256]{0}), f32[1024]{0}, f32[4,8]{1,0}) tuple([[GTE0]], [[T1]], [[GTE4]], [[GTE5]]) ; CHECK: } ; CHECK: ENTRY %{{.*}} { ; CHECK: [[FUSION:%[^ ]+]] = (f32[8]{0}, (f32[128]{0}, f32[256]{0}), f32[1024]{0}, f32[4,8]{1,0}) fusion ; CHECK: kind=kCustom, calls=%dynamic-slice-fusion, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"address_computation","kernel_index":0} ; CHECK: } ; CHECK-DAG: [[GTE6:%[^ ]+]] = f32[1024]{0} get-tuple-element([[FUSION]]), index=2 ; CHECK-DAG: [[GTE7:%[^ ]+]] = (f32[128]{0}, f32[256]{0}) get-tuple-element([[FUSION]]), index=1 ; CHECK-DAG: [[GTE8:%[^ ]+]] = f32[128]{0} get-tuple-element([[GTE7]]), index=0 ; CHECK: ROOT {{.*}} = (f32[128]{0}, f32[1024]{0}) tuple([[GTE8]], [[GTE6]]) ; CHECK: } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo->ToString(), DynamicSliceFusionRewriter("gpu"), expected); } TEST_F(DynamicSliceFusionRewriterTest, UnalignedSlice) { XlaBuilder b(TestName()); CustomCall( &b, "Callback_Void", {Slice(Broadcast(ConstantR0WithType(&b, S32, 42), {17}), {1}, {17}, {1})}, ShapeUtil::MakeShape(S32, {16}), ""); TF_ASSERT_OK_AND_ASSIGN(auto computation, b.Build()); xla::HloModuleConfig hlo_config( xla::ProgramShape(computation.proto().host_program_shape()), false); DebugOptions debug_options = GetDebugOptionsForTest(); debug_options.set_xla_gpu_enable_dynamic_slice_fusion(false); hlo_config.set_debug_options(debug_options); TF_ASSERT_OK_AND_ASSIGN(auto hlo, xla::HloModule::CreateFromProto( computation.proto(), hlo_config)); auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo->ToString(), DynamicSliceFusionRewriter("gpu"), std::nullopt); } TEST_F(DynamicSliceFusionRewriterTest, DynamicSimpleGemm) { const char* hlo = R"( HloModule test ENTRY main.9 { p0 = f16[2,8,8]{2,1,0} parameter(0) p1 = f16[2,8,8]{2,1,0} parameter(1) c1_s32 = s32[] constant(1) c0_s32 = s32[] constant(0) slice.13 = f16[1,8,8]{2,1,0} dynamic-slice(p0, c1_s32, c0_s32, c0_s32), dynamic_slice_sizes={1,8,8} bitcast.41 = f16[8,8]{1,0} bitcast(slice.13) slice.14 = f16[1,8,8]{2,1,0} dynamic-slice(p1, c1_s32, c0_s32, c0_s32), dynamic_slice_sizes={1,8,8} bitcast.42 = f16[8,8]{1,0} bitcast(slice.14) ROOT custom-call.1 = f16[8,8]{1,0} custom-call(bitcast.41, bitcast.42), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} } )"; const char* expected = R"( ; CHECK: dynamic-slice-fusion{{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(3) ; CHECK-DAG: [[C1:%[^ ]+]] = s32[] parameter(1) ; CHECK-DAG: [[C0:%[^ ]+]] = s32[] parameter(2) ; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} dynamic-slice([[P0]], [[C1]], [[C0]], [[C0]]), dynamic_slice_sizes={1,8,8} ; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]]) ; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} dynamic-slice([[P1]], [[C1]], [[C0]], [[C0]]), dynamic_slice_sizes={1,8,8} ; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]]) ; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]), ; CHECK: custom_call_target="__cublas$gemm" ; CHECK: } ; CHECK: ENTRY %main{{.*}} { ; CHECK: ROOT [[FUSION:%[^ ]+]] = f16[8,8]{1,0} fusion ; CHECK: kind=kCustom, calls=%dynamic-slice-fusion, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"dynamic_address_computation","kernel_index":0} ; CHECK: } ; CHECK: } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected); } TEST_F(DynamicSliceFusionRewriterTest, DynamicSimpleGemmWithWorkspace) { const char* hlo = R"( HloModule test ENTRY main.9 { p0 = f16[2,8,8]{2,1,0} parameter(0) p1 = f16[2,8,8]{2,1,0} parameter(1) c1_s32 = s32[] constant(1) c0_s32 = s32[] constant(0) slice.13 = f16[1,8,8]{2,1,0} dynamic-slice(p0, c1_s32, c0_s32, c0_s32), dynamic_slice_sizes={1,8,8} bitcast.41 = f16[8,8]{1,0} bitcast(slice.13) slice.14 = f16[1,8,8]{2,1,0} dynamic-slice(p1, c1_s32, c0_s32, c0_s32), dynamic_slice_sizes={1,8,8} bitcast.42 = f16[8,8]{1,0} bitcast(slice.14) ROOT custom-call.1 = (f16[8,8]{1,0}, s8[256]{0}) custom-call(bitcast.41, bitcast.42), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} } )"; const char* expected = R"( ; CHECK: dynamic-slice-fusion{{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(3) ; CHECK-DAG: [[C1:%[^ ]+]] = s32[] parameter(1) ; CHECK-DAG: [[C0:%[^ ]+]] = s32[] parameter(2) ; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} dynamic-slice([[P0]], [[C1]], [[C0]], [[C0]]), dynamic_slice_sizes={1,8,8} ; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]]) ; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} dynamic-slice([[P1]], [[C1]], [[C0]], [[C0]]), dynamic_slice_sizes={1,8,8} ; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]]) ; CHECK: [[CC:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0}) custom-call([[B0]], [[B1]]), ; CHECK: custom_call_target="__cublas$gemm" ; CHECK: [[DOT:%[^ ]+]] = f16[8,8]{1,0} get-tuple-element([[CC]]), index=0 ; CHECK: [[WORKSPACE:%[^ ]+]] = s8[256]{0} get-tuple-element([[CC]]), index=1 ; CHECK: ROOT [[TUPLE:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0}) ; CHECK: tuple([[DOT]], [[WORKSPACE]]) ; CHECK: } ; CHECK: ENTRY %main{{.*}} { ; CHECK: ROOT [[FUSION:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0}) fusion ; CHECK: kind=kCustom, calls=%dynamic-slice-fusion, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"dynamic_address_computation","kernel_index":0} ; CHECK: } ; CHECK: } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected); } TEST_F(DynamicSliceFusionRewriterTest, DynamicSimpleGemmWorkspaceIgnored) { const char* hlo = R"( HloModule test ENTRY main.9 { p0 = f16[2,8,8]{2,1,0} parameter(0) p1 = f16[2,8,8]{2,1,0} parameter(1) c1_s32 = s32[] constant(1) c0_s32 = s32[] constant(0) slice.13 = f16[1,8,8]{2,1,0} dynamic-slice(p0, c1_s32, c0_s32, c0_s32), dynamic_slice_sizes={1,8,8} bitcast.41 = f16[8,8]{1,0} bitcast(slice.13) slice.14 = f16[1,8,8]{2,1,0} dynamic-slice(p1, c1_s32, c0_s32, c0_s32), dynamic_slice_sizes={1,8,8} bitcast.42 = f16[8,8]{1,0} bitcast(slice.14) custom-call.1 = (f16[8,8]{1,0}, s8[256]{0}) custom-call(bitcast.41, bitcast.42), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} ROOT get-tuple-element.0 = f16[8,8]{1,0} get-tuple-element(custom-call.1), index=0 } )"; const char* expected = R"( ; CHECK: dynamic-slice-fusion{{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(3) ; CHECK-DAG: [[C1:%[^ ]+]] = s32[] parameter(1) ; CHECK-DAG: [[C0:%[^ ]+]] = s32[] parameter(2) ; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} dynamic-slice([[P0]], [[C1]], [[C0]], [[C0]]), dynamic_slice_sizes={1,8,8} ; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]]) ; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} dynamic-slice([[P1]], [[C1]], [[C0]], [[C0]]), dynamic_slice_sizes={1,8,8} ; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]]) ; CHECK: [[CC:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0}) custom-call([[B0]], [[B1]]), ; CHECK: custom_call_target="__cublas$gemm" ; CHECK: [[DOT:%[^ ]+]] = f16[8,8]{1,0} get-tuple-element([[CC]]), index=0 ; CHECK: [[WORKSPACE:%[^ ]+]] = s8[256]{0} get-tuple-element([[CC]]), index=1 ; CHECK: ROOT [[TUPLE:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0}) ; CHECK: tuple([[DOT]], [[WORKSPACE]]) ; CHECK: } ; CHECK: ENTRY %main{{.*}} { ; CHECK: [[FUSION:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0}) fusion ; CHECK: kind=kCustom, calls=%dynamic-slice-fusion, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"dynamic_address_computation","kernel_index":0} ; CHECK: } ; CHECK: ROOT [[DOT_MAIN:%[^ ]+]] = f16[8,8]{1,0} get-tuple-element([[FUSION]]), index=0 ; CHECK: } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected); } TEST_F(DynamicSliceFusionRewriterTest, DynamicSimpleGemmNotRoot) { const char* hlo = R"( HloModule test ENTRY main.9 { p0 = f16[2,8,8]{2,1,0} parameter(0) p1 = f16[2,8,8]{2,1,0} parameter(1) c1_s32 = s32[] constant(1) c0_s32 = s32[] constant(0) slice.13 = f16[1,8,8]{2,1,0} dynamic-slice(p0, c1_s32, c0_s32, c0_s32), dynamic_slice_sizes={1,8,8} bitcast.41 = f16[8,8]{1,0} bitcast(slice.13) slice.14 = f16[1,8,8]{2,1,0} dynamic-slice(p1, c1_s32, c0_s32, c0_s32), dynamic_slice_sizes={1,8,8} bitcast.42 = f16[8,8]{1,0} bitcast(slice.14) custom-call.1 = f16[8,8]{1,0} custom-call(bitcast.41, bitcast.42), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} ROOT res = f16[8,8]{1,0} add(custom-call.1, custom-call.1) } )"; const char* expected = R"( ; CHECK: dynamic-slice-fusion{{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(3) ; CHECK-DAG: [[C1:%[^ ]+]] = s32[] parameter(1) ; CHECK-DAG: [[C0:%[^ ]+]] = s32[] parameter(2) ; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} dynamic-slice([[P0]], [[C1]], [[C0]], [[C0]]), dynamic_slice_sizes={1,8,8} ; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]]) ; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} dynamic-slice([[P1]], [[C1]], [[C0]], [[C0]]), dynamic_slice_sizes={1,8,8} ; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]]) ; CHECK: ROOT [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]), ; CHECK: custom_call_target="__cublas$gemm" ; CHECK: } ; CHECK: ENTRY %main{{.*}} { ; CHECK: [[FUSION:%[^ ]+]] = f16[8,8]{1,0} fusion ; CHECK: kind=kCustom, calls=%dynamic-slice-fusion, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"dynamic_address_computation","kernel_index":0} ; CHECK: } ; CHECK: ROOT {{.*}} = f16[8,8]{1,0} add([[FUSION]], [[FUSION]]) ; CHECK: } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected); } TEST_F(DynamicSliceFusionRewriterTest, DUSSimpleGemm) { const char* hlo = R"( HloModule test ENTRY main.9 { p0 = f16[1,8,8]{2,1,0} parameter(0) p1 = f16[1,8,8]{2,1,0} parameter(1) p2 = f16[4,8,8]{2,1,0} parameter(2) c1_s32 = s32[] constant(1) c0_s32 = s32[] constant(0) bitcast.41 = f16[8,8]{1,0} bitcast(p0) bitcast.42 = f16[8,8]{1,0} bitcast(p1) custom-call.1 = f16[8,8]{1,0} custom-call(bitcast.41, bitcast.42), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} bitcast.43 = f16[1,8,8]{2,1,0} bitcast(custom-call.1) ROOT dus = f16[4,8,8]{2,1,0} dynamic-update-slice(p2, bitcast.43, c1_s32, c0_s32, c0_s32) } )"; const char* expected = R"( ; CHECK-DAG: [[P0:%[^ ]+]] = f16[8,8]{1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f16[8,8]{1,0} parameter(1) ; CHECK-DAG: [[P2:%[^ ]+]] = f16[4,8,8]{2,1,0} parameter(2) ; CHECK-DAG: [[C1:%[^ ]+]] = s32[] parameter(3) ; CHECK-DAG: [[C0:%[^ ]+]] = s32[] parameter(4) ; CHECK-DAG: [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[P0]], [[P1]]), ; CHECK-DAG: custom_call_target="__cublas$gemm" ; CHECK-DAG: [[BC:%[^ ]+]] = f16[1,8,8]{2,1,0} bitcast([[CC]]) ; CHECK: ROOT {{.*}} = f16[4,8,8]{2,1,0} dynamic-update-slice([[P2]], [[BC]], [[C1]], [[C0]], [[C0]]) ; CHECK: } ; CHECK: ENTRY %main{{.*}} { ; CHECK: ROOT [[FUSION:%[^ ]+]] = f16[4,8,8]{2,1,0} fusion ; CHECK: kind=kCustom, calls=%dynamic-slice-fusion, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"dynamic_address_computation","kernel_index":0} ; CHECK: } ; CHECK: } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected); } TEST_F(DynamicSliceFusionRewriterTest, DUSSimpleGemmNotRoot) { const char* hlo = R"( HloModule test ENTRY main.9 { p0 = f16[2,8,8]{2,1,0} parameter(0) p1 = f16[2,8,8]{2,1,0} parameter(1) p2 = f16[4,8,8]{2,1,0} parameter(2) c1_s32 = s32[] constant(1) c0_s32 = s32[] constant(0) slice.13 = f16[1,8,8]{2,1,0} dynamic-slice(p0, c1_s32, c0_s32, c0_s32), dynamic_slice_sizes={1,8,8} bitcast.41 = f16[8,8]{1,0} bitcast(slice.13) slice.14 = f16[1,8,8]{2,1,0} dynamic-slice(p1, c1_s32, c0_s32, c0_s32), dynamic_slice_sizes={1,8,8} bitcast.42 = f16[8,8]{1,0} bitcast(slice.14) custom-call.1 = f16[8,8]{1,0} custom-call(bitcast.41, bitcast.42), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} bitcast.43 = f16[1,8,8]{2,1,0} bitcast(custom-call.1) dus = f16[4,8,8]{2,1,0} dynamic-update-slice(p2, bitcast.43, c1_s32, c0_s32, c0_s32) ROOT res = f16[4,8,8]{2,1,0} log(dus) } )"; const char* expected = R"( ; CHECK: dynamic-slice-fusion{{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(3) ; CHECK-DAG: [[P2:%[^ ]+]] = f16[4,8,8]{2,1,0} parameter(4) ; CHECK-DAG: [[C1:%[^ ]+]] = s32[] parameter(1) ; CHECK-DAG: [[C0:%[^ ]+]] = s32[] parameter(2) ; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} dynamic-slice([[P0]], [[C1]], [[C0]], [[C0]]), dynamic_slice_sizes={1,8,8} ; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]]) ; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} dynamic-slice([[P1]], [[C1]], [[C0]], [[C0]]), dynamic_slice_sizes={1,8,8} ; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]]) ; CHECK-DAG: [[CC:%[^ ]+]] = f16[8,8]{1,0} custom-call([[B0]], [[B1]]), ; CHECK-DAG: custom_call_target="__cublas$gemm" ; CHECK-DAG: [[BC:%[^ ]+]] = f16[1,8,8]{2,1,0} bitcast([[CC]]) ; CHECK: ROOT {{.*}} = f16[4,8,8]{2,1,0} dynamic-update-slice([[P2]], [[BC]], [[C1]], [[C0]], [[C0]]) ; CHECK: } ; CHECK: ENTRY %main{{.*}} { ; CHECK: [[FUSION:%[^ ]+]] = f16[4,8,8]{2,1,0} fusion ; CHECK: kind=kCustom, calls=%dynamic-slice-fusion, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"dynamic_address_computation","kernel_index":0} ; CHECK: } ; CHECK: ROOT {{.*}} = f16[4,8,8]{2,1,0} log([[FUSION]]) ; CHECK: } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected); } TEST_F(DynamicSliceFusionRewriterTest, DUSSimpleGemmWithWorkspace) { const char* hlo = R"( HloModule test ENTRY main.9 { p0 = f16[2,8,8]{2,1,0} parameter(0) p1 = f16[2,8,8]{2,1,0} parameter(1) p2 = f16[4,8,8]{2,1,0} parameter(2) c1_s32 = s32[] constant(1) c0_s32 = s32[] constant(0) slice.13 = f16[1,8,8]{2,1,0} dynamic-slice(p0, c1_s32, c0_s32, c0_s32), dynamic_slice_sizes={1,8,8} bitcast.41 = f16[8,8]{1,0} bitcast(slice.13) slice.14 = f16[1,8,8]{2,1,0} dynamic-slice(p1, c1_s32, c0_s32, c0_s32), dynamic_slice_sizes={1,8,8} bitcast.42 = f16[8,8]{1,0} bitcast(slice.14) custom-call.1 = (f16[8,8]{1,0}, s8[256]{0}) custom-call(bitcast.41, bitcast.42), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} get-tuple-element.0 = f16[8,8]{1,0} get-tuple-element(custom-call.1), index=0 bitcast.43 = f16[1,8,8]{2,1,0} bitcast(get-tuple-element.0) dus = f16[4,8,8]{2,1,0} dynamic-update-slice(p2, bitcast.43, c1_s32, c0_s32, c0_s32) get-tuple-element.1 = s8[256]{0} get-tuple-element(custom-call.1), index=1 ROOT tuple = (f16[4,8,8]{2,1,0}, s8[256]{0}) tuple(dus, get-tuple-element.1) } )"; const char* expected = R"( ; CHECK: dynamic-slice-fusion{{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f16[2,8,8]{2,1,0} parameter(3) ; CHECK-DAG: [[P2:%[^ ]+]] = f16[4,8,8]{2,1,0} parameter(4) ; CHECK-DAG: [[C1:%[^ ]+]] = s32[] parameter(1) ; CHECK-DAG: [[C0:%[^ ]+]] = s32[] parameter(2) ; CHECK-DAG: [[S0:%[^ ]+]] = f16[1,8,8]{2,1,0} dynamic-slice([[P0]], [[C1]], [[C0]], [[C0]]), dynamic_slice_sizes={1,8,8} ; CHECK-DAG: [[B0:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S0]]) ; CHECK-DAG: [[S1:%[^ ]+]] = f16[1,8,8]{2,1,0} dynamic-slice([[P1]], [[C1]], [[C0]], [[C0]]), dynamic_slice_sizes={1,8,8} ; CHECK-DAG: [[B1:%[^ ]+]] = f16[8,8]{1,0} bitcast([[S1]]) ; CHECK: [[CC:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0}) custom-call([[B0]], [[B1]]), ; CHECK: custom_call_target="__cublas$gemm" ; CHECK: [[DOT:%[^ ]+]] = f16[8,8]{1,0} get-tuple-element([[CC]]), index=0 ; CHECK: [[BC:%[^ ]+]] = f16[1,8,8]{2,1,0} bitcast([[DOT]]) ; CHECK: [[DUS:%[^ ]+]] = f16[4,8,8]{2,1,0} dynamic-update-slice([[P2]], [[BC]], [[C1]], [[C0]], [[C0]]) ; CHECK: [[WORKSPACE:%[^ ]+]] = s8[256]{0} get-tuple-element([[CC]]), index=1 ; CHECK: ROOT [[TUPLE:%[^ ]+]] = (f16[4,8,8]{2,1,0}, s8[256]{0}) ; CHECK: tuple([[DUS]], [[WORKSPACE]]) ; CHECK: } ; CHECK: ENTRY %main{{.*}} { ; CHECK: [[FUSION:%[^ ]+]] = (f16[4,8,8]{2,1,0}, s8[256]{0}) fusion ; CHECK: kind=kCustom, calls=%dynamic-slice-fusion, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"dynamic_address_computation","kernel_index":0} ; CHECK: } ; CHECK: [[DUS_MAIN:%[^ ]+]] = f16[4,8,8]{2,1,0} get-tuple-element([[FUSION]]), index=0 ; CHECK: [[WORKSPACE_MAIN:%[^ ]+]] = s8[256]{0} get-tuple-element([[FUSION]]), index=1 ; CHECK: ROOT {{.*}} = (f16[4,8,8]{2,1,0}, s8[256]{0}) ; CHECK: tuple([[DUS_MAIN]], [[WORKSPACE_MAIN]]) ; CHECK: } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected); } TEST_F(DynamicSliceFusionRewriterTest, DUSSimpleGemmWorkspaceIgnored) { const char* hlo = R"( HloModule test ENTRY %main.9 { %p0 = f16[8,8]{1,0} parameter(0) %p1 = f16[8,8]{1,0} parameter(1) %p2 = f16[4,8,8]{2,1,0} parameter(2) %c1_s32 = s32[] constant(1) %c0_s32 = s32[] constant(0) %custom-call.1 = (f16[8,8]{1,0}, s8[256]{0}) custom-call(%p0, %p1), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} %get-tuple-element.0 = f16[8,8]{1,0} get-tuple-element(%custom-call.1), index=0 %bitcast.43 = f16[1,8,8]{2,1,0} bitcast(%get-tuple-element.0) ROOT %dus = f16[4,8,8]{2,1,0} dynamic-update-slice(%p2, %bitcast.43, %c1_s32, %c0_s32, %c0_s32) })"; const char* expected = R"( ; CHECK: dynamic-slice-fusion{{.*}} { ; CHECK-DAG: [[P0:%[^ ]+]] = f16[8,8]{1,0} parameter(0) ; CHECK-DAG: [[P1:%[^ ]+]] = f16[8,8]{1,0} parameter(1) ; CHECK-DAG: [[P2:%[^ ]+]] = f16[4,8,8]{2,1,0} parameter(2) ; CHECK-DAG: [[C1:%[^ ]+]] = s32[] parameter(3) ; CHECK-DAG: [[C0:%[^ ]+]] = s32[] parameter(4) ; CHECK-DAG: [[CC:%[^ ]+]] = (f16[8,8]{1,0}, s8[256]{0}) custom-call([[P0]], [[P1]]), ; CHECK-DAG: custom_call_target="__cublas$gemm" ; CHECK-DAG: [[DOT:%[^ ]+]] = f16[8,8]{1,0} get-tuple-element([[CC]]), index=0 ; CHECK-DAG: [[BC:%[^ ]+]] = f16[1,8,8]{2,1,0} bitcast([[DOT]]) ; CHECK-DAG: [[DUS:%[^ ]+]] = f16[4,8,8]{2,1,0} dynamic-update-slice([[P2]], [[BC]], [[C1]], [[C0]], [[C0]]) ; CHECK-DAG: [[WORKSPACE:%[^ ]+]] = s8[256]{0} get-tuple-element([[CC]]), index=1 ; CHECK: ROOT [[TUPLE:%[^ ]+]] = (f16[4,8,8]{2,1,0}, s8[256]{0}) ; CHECK: tuple([[DUS]], [[WORKSPACE]]) ; CHECK: } ; CHECK: ENTRY %main{{.*}} { ; CHECK: [[FUSION:%[^ ]+]] = (f16[4,8,8]{2,1,0}, s8[256]{0}) fusion ; CHECK: kind=kCustom, calls=%dynamic-slice-fusion, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"dynamic_address_computation","kernel_index":0} ; CHECK: } ; CHECK: ROOT [[DOT_MAIN:%[^ ]+]] = f16[4,8,8]{2,1,0} get-tuple-element([[FUSION]]), index=0 ; CHECK: } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected); } TEST_F(DynamicSliceFusionRewriterTest, ReduceScatterDUSConstantOffset) { const char* hlo = R"( HloModule test, replica_count=2 add { param_0 = f16[] parameter(0) param_1 = f16[] parameter(1) ROOT add.1 = f16[] add(param_0, param_1) } ENTRY main.9 { param_0 = f16[128,128]{1,0} parameter(0) param_1 = f16[128,128]{1,0} parameter(1) constant_20 = u32[] constant(20) constant_0 = u32[] constant(0) reduce-scatter = f16[64,128]{1,0} reduce-scatter(param_0), channel_id=64, replica_groups={{0,1}}, use_global_device_ids=true, dimensions={0}, to_apply=add ROOT loop_dynamic_update_slice_fusion = f16[128,128]{1,0} dynamic-update-slice(param_1, reduce-scatter, constant_20, constant_0) } )"; const char* expected = R"( )"; RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected); } TEST_F(DynamicSliceFusionRewriterTest, ReduceScatterDUSParameterOffset) { const char* hlo = R"( HloModule test, replica_count=2 add.clone { x.1 = f16[] parameter(0) y.1 = f16[] parameter(1) ROOT add.462 = f16[] add(x.1, y.1) } ENTRY %main.9 { param_0 = f16[128,128]{1,0} parameter(0) param_1 = f16[128,128]{1,0} parameter(1) param_2 = u32[] parameter(2) constant_0 = u32[] constant(0) reduce-scatter = f16[64,128]{1,0} reduce-scatter(param_0), channel_id=64, replica_groups={{0,1}}, use_global_device_ids=true, dimensions={0}, to_apply=add.clone ROOT dynamic-update-slice = f16[128,128]{1,0} dynamic-update-slice(param_1, reduce-scatter, param_2, constant_0) })"; RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), std::nullopt); } TEST_F(DynamicSliceFusionRewriterTest, ReduceScatterDUSLoopIterationOffset) { const char* hlo = R"( HloModule jit_scan, replica_count=2 add { param_0 = f32[] parameter(0) param_1 = f32[] parameter(1) ROOT add.6 = f32[] add(param_0, param_1) } Body { arg_tuple.1 = (s32[], f32[128,128]{1,0}, f32[128,128,128]{2,1,0}, f32[128,128]{1,0}) parameter(0) get-tuple-element.5 = s32[] get-tuple-element(arg_tuple.1), index=0 constant.1 = s32[] constant(1) add.7 = s32[] add(get-tuple-element.5, constant.1) get-tuple-element.6 = f32[128,128]{1,0} get-tuple-element(arg_tuple.1), index=3 get-tuple-element.7 = f32[128,128,128]{2,1,0} get-tuple-element(arg_tuple.1), index=2 reduce-scatter.0 = f32[64,128]{1,0} reduce-scatter(get-tuple-element.6), channel_id=64, replica_groups={{0,1}}, use_global_device_ids=true, dimensions={0}, to_apply=add bitcast.63 = f32[1,64,128]{2,1,0} bitcast(reduce-scatter.0) constant.2 = s32[] constant(0) compare.4 = pred[] compare(get-tuple-element.5, constant.2), direction=LT constant.3 = s32[] constant(128) add.8 = s32[] add(get-tuple-element.5, constant.3) select.2 = s32[] select(compare.4, add.8, get-tuple-element.5) dynamic-update-slice.2 = f32[128,128,128]{2,1,0} dynamic-update-slice(get-tuple-element.7, bitcast.63, select.2, constant.2, constant.2) ROOT tuple.1 = tuple(add.7, get-tuple-element.6, dynamic-update-slice.2, get-tuple-element.6) } Cond { arg_tuple.0 = (s32[], f32[128,128]{1,0}, f32[128,128,128]{2,1,0}, f32[128,128]{1,0}) parameter(0) get-tuple-element.4 = s32[] get-tuple-element(arg_tuple.0), index=0 constant.0 = s32[] constant(128) ROOT compare.5 = pred[] compare(get-tuple-element.4, constant.0), direction=LT } ENTRY main.55 { Arg_2.3 = f32[128,128,128]{2,1,0} parameter(2) constant.4 = s32[] constant(0) Arg_1.2 = f32[128,128]{1,0} parameter(1) constant.5 = f32[] constant(0) broadcast.1 = f32[128,128,128]{2,1,0} broadcast(constant.5), dimensions={} Arg_0.1 = f32[128,128]{1,0} parameter(0) tuple = tuple(constant.4, Arg_1.2, broadcast.1, Arg_0.1) while = while(tuple), condition=Cond, body=Body, backend_config={"known_trip_count":{"n":"128"}} get-tuple-element.50 = f32[128,128]{1,0} get-tuple-element(while), index=1 get-tuple-element.51 = f32[128,128,128]{2,1,0} get-tuple-element(while), index=2 ROOT tuple.54 = (f32[128,128]{1,0}, f32[128,128,128]{2,1,0}) tuple(get-tuple-element.50, get-tuple-element.51) })"; const char* expected = R"( )"; RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected); } TEST_F(DynamicSliceFusionRewriterTest, DUSSimpleGemmLoopIteration) { const char* hlo = R"( HloModule test %Body { param = (f16[1,8,8]{2,1,0}, f16[1,8,8]{2,1,0}, f16[4,8,8]{2,1,0}, u32[]) parameter(0) p0 = get-tuple-element(param), index=0 p1 = get-tuple-element(param), index=1 p2 = get-tuple-element(param), index=2 loop_iter = get-tuple-element(param), index=3 bitcast.41 = f16[8,8]{1,0} bitcast(p0) bitcast.42 = f16[8,8]{1,0} bitcast(p1) custom-call.1 = f16[8,8]{1,0} custom-call(bitcast.41, bitcast.42), custom_call_target="__cublas$gemm" bitcast.43 = f16[1,8,8]{2,1,0} bitcast(custom-call.1) c0 = u32[] constant(0) c_trip_count = u32[] constant(8) compare = pred[] compare(loop_iter, c0), direction=LT add = u32[] add(loop_iter, c_trip_count) offset = u32[] select(compare, add, loop_iter) dus = f16[4,8,8]{2,1,0} dynamic-update-slice(p2, bitcast.43, offset, c0, c0) c1 = u32[] constant(1) add2 = u32[] add(loop_iter, c1) ROOT tuple = tuple(p0, p1, dus, u32[] add2) } %Cond { %param.1 = (f16[1,8,8]{2,1,0}, f16[1,8,8]{2,1,0}, f16[4,8,8]{2,1,0}, u32[]) parameter(0) %i.1 = u32[] get-tuple-element(%param.1), index=3 %trip_count = u32[] constant(8) ROOT %done = pred[] compare(u32[] %i.1, u32[] %trip_count), direction=LT } ENTRY %test { %p0.1 = f16[1,8,8]{2,1,0} parameter(0) %p1.1 = f16[1,8,8]{2,1,0} parameter(1) %p2.1 = f16[4,8,8]{2,1,0} parameter(2) %c0.1 = u32[] constant(0) %initial_tuple = tuple(%p0.1, %p1.1, %p2.1, u32[] %c0.1) ROOT %while = while(%initial_tuple), condition=%Cond, body=%Body, backend_config={"known_trip_count":{"n":"8"}} })"; const char* expected = R"( } )"; auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected); } TEST_F(DynamicSliceFusionRewriterTest, DUSSimpleGemmParameterOffset) { const char* hlo = R"( HloModule test ENTRY main.9 { p0 = f16[1,8,8]{2,1,0} parameter(0) p1 = f16[1,8,8]{2,1,0} parameter(1) p2 = f16[4,8,8]{2,1,0} parameter(2) p3 = s32[] parameter(3) c1_s32 = s32[] constant(1) c0_s32 = s32[] constant(0) bitcast.41 = f16[8,8]{1,0} bitcast(p0) bitcast.42 = f16[8,8]{1,0} bitcast(p1) custom-call.1 = f16[8,8]{1,0} custom-call(bitcast.41, bitcast.42), custom_call_target="__cublas$gemm", backend_config={"gemm_backend_config":{ "alpha_real":1, "beta":0, "dot_dimension_numbers":{ "lhs_contracting_dimensions":["1"], "rhs_contracting_dimensions":["0"], "lhs_batch_dimensions":[], "rhs_batch_dimensions":[] }, "alpha_imag":0, "precision_config":{"operand_precision":["DEFAULT","DEFAULT"]}, "epilogue":"DEFAULT", "lhs_stride":"64", "rhs_stride":"64", "grad_x":false, "grad_y":false }} bitcast.43 = f16[1,8,8]{2,1,0} bitcast(custom-call.1) ROOT dus = f16[4,8,8]{2,1,0} dynamic-update-slice(p2, bitcast.43, p3, c0_s32, c0_s32) })"; RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), std::nullopt); } TEST_F(DynamicSliceFusionRewriterTest, DUSOffsetAsFunctionOfLoopIteration) { const char* hlo = R"( HloModule test_module, replica_count=2 add { a = s64[] parameter(0) b = s64[] parameter(1) ROOT add = s64[] add(a, b) } Body { param = (s64[], s64[16, 32], s64[8, 32]) parameter(0) i = s64[] get-tuple-element(param), index=0 dest = s64[16,32] get-tuple-element(param), index=1 src = s64[8,32] get-tuple-element(param), index=2 eight = s64[] constant(8) zero = s64[] constant(0) thirty_two = s64[] constant(32) add = s64[] add(eight, i) add.2 = s64[] subtract(add, thirty_two) compare = pred[] compare(add, thirty_two), direction=LT offset = s64[] select(compare, add, add.2) rs = s64[4,32] reduce-scatter(src), channel_id=1, replica_groups={{0,1}}, use_global_device_ids=true, dimensions={0}, to_apply=add fusion = s64[16,32] dynamic-update-slice(dest, rs, offset, zero) one = s64[] constant(1) i_plus_one = s64[] add(i, one) ROOT tuple = tuple(i_plus_one, fusion, src) } Cond { param = (s64[], s64[16,32], s64[8,32]) parameter(0) loop_iter = s64[] get-tuple-element(param), index=0 c16 = s64[] constant(16) ROOT compare = pred[] compare(loop_iter, c16), direction=LT } ENTRY main { zero = s64[] constant(0) dest = s64[16,32] parameter(0) src = s64[8,32] parameter(1) tuple = tuple(zero, dest, src) ROOT while = while(tuple), body=Body, condition=Cond } )"; const char* expected = R"( )"; TF_ASSERT_OK_AND_ASSIGN(auto module, GetOptimizedModule(hlo)); TF_ASSERT_OK_AND_ASSIGN( auto changed, RunHloPass(DynamicSliceFusionRewriter("gpu"), module.get())); EXPECT_TRUE(changed); std::vector<const HloComputation*> fusions; for (auto computation : module->computations()) { if (computation->IsFusionComputation()) { fusions.push_back(computation); } } ASSERT_EQ(fusions.size(), 1); const HloComputation* dynamic_slice_fusion = fusions[0]; TF_ASSERT_OK_AND_ASSIGN( auto filecheck_match, RunFileCheck(dynamic_slice_fusion->ToString( HloPrintOptions{}.set_print_large_constants(true)), expected)); EXPECT_TRUE(filecheck_match); } TEST_F(DynamicSliceFusionRewriterTest, DUSSimpleGemmLaxScan) { const char* hlo = R"( HloModule lax_scan Body { arg_tuple.15 = (s32[], f32[8,8]{1,0}, f32[8,8,8]{2,1,0}, f32[8,8,8]{2,1,0}, f32[8,8]{1,0}) parameter(0) get-tuple-element.16 = s32[] get-tuple-element(arg_tuple.15), index=0 constant.21 = s32[] constant(1) add.2 = s32[] add(get-tuple-element.16, constant.21) get-tuple-element.30 = get-tuple-element(arg_tuple.15), index=4 get-tuple-element.18 = get-tuple-element(arg_tuple.15), index=2 get-tuple-element.19 = get-tuple-element(arg_tuple.15), index=3 constant.23 = s32[] constant(0) compare.2 = pred[] compare(get-tuple-element.16, constant.23), direction=LT constant.22 = s32[] constant(8) add.3 = s32[] add(get-tuple-element.16, constant.22) select.1 = s32[] select(compare.2, add.3, get-tuple-element.16) dynamic-slice.1 = f32[1,8,8]{2,1,0} dynamic-slice(get-tuple-element.19, select.1, constant.23, constant.23), dynamic_slice_sizes={1,8,8} bitcast.72 = f32[8,8]{1,0} bitcast(dynamic-slice.1) get-tuple-element.17 = f32[8,8]{1,0} get-tuple-element(arg_tuple.15), index=1 custom-call.1 = (f32[8,8]{1,0}, s8[131072]{0}) custom-call(bitcast.72, get-tuple-element.17), custom_call_target="__cublas$gemm" get-tuple-element = f32[8,8]{1,0} get-tuple-element(custom-call.1), index=0 bitcast.77 = f32[1,8,8]{2,1,0} bitcast(get-tuple-element) dynamic-update-slice.1 = f32[8,8,8]{2,1,0} dynamic-update-slice(get-tuple-element.18, bitcast.77, select.1, constant.23, constant.23) ROOT tuple.38 = tuple(add.2, get-tuple-element.30, dynamic-update-slice.1, get-tuple-element.19, get-tuple-element.30) } Cond { arg_tuple.40 = (s32[], f32[8,8]{1,0}, f32[8,8,8]{2,1,0}, f32[8,8,8]{2,1,0}, f32[8,8]{1,0}) parameter(0) get-tuple-element.41 = s32[] get-tuple-element(arg_tuple.40), index=0 constant.46 = s32[] constant(8) ROOT compare.3 = pred[] compare(get-tuple-element.41, constant.46), direction=LT } ENTRY main { constant.4 = s32[] constant(0) Arg_1.2 = f32[8,8]{1,0} parameter(1) constant.5 = f32[] constant(0) broadcast.1 = f32[8,8,8]{2,1,0} broadcast(constant.5), dimensions={} Arg_2.3 = f32[8,8,8]{2,1,0} parameter(2) Arg_0.1 = f32[8,8]{1,0} parameter(0) tuple.7 = tuple(constant.4, Arg_1.2, broadcast.1, Arg_2.3, Arg_0.1) while.48 = while(tuple.7), condition=Cond, body=Body, backend_config={"known_trip_count":{"n":"8"}} get-tuple-element.50 = get-tuple-element(while.48), index=1 get-tuple-element.51 = get-tuple-element(while.48), index=2 ROOT tuple.54 = tuple(get-tuple-element.50, get-tuple-element.51) } )"; const char* expected = R"( )"; RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected); } TEST_F(DynamicSliceFusionRewriterTest, DUSReduceScatterTupleNoTransform) { const char* hlo = R"( HloModule test, replica_count=2 add { param_0 = f16[] parameter(0) param_1 = f16[] parameter(1) ROOT add.1 = f16[] add(param_0, param_1) } ENTRY main.9 { param_0 = f16[128,128]{1,0} parameter(0) param_1 = f16[128,128]{1,0} parameter(1) param_2 = f16[128,128]{1,0} parameter(2) constant_20 = u32[] constant(20) constant_0 = u32[] constant(0) reduce-scatter = (f16[64,128]{1,0}, f16[64,128]{1,0}) reduce-scatter(param_0, param_2), channel_id=64, replica_groups={{0,1}}, use_global_device_ids=true, dimensions={0}, to_apply=add rs1 = get-tuple-element(reduce-scatter), index=0 ROOT loop_dynamic_update_slice_fusion = f16[128,128]{1,0} dynamic-update-slice(param_1, rs1, constant_20, constant_0) })"; RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), std::nullopt); } TEST_F(DynamicSliceFusionRewriterTest, ReduceScatterSlice) { const char* hlo = R"( HloModule jit_slice, replica_count=2 add { a = s32[] parameter(0) b = s32[] parameter(1) ROOT add = add(a,b) } ENTRY %main.9 { p0 = s32[2,8,32]{2,1,0} parameter(0) slice = s32[1,8,32]{2,1,0} slice(%p0), slice={[1:2], [0:8], [0:32]} bc = s32[8,32]{1,0} bitcast(%slice) ROOT rs = s32[4,32] reduce-scatter(bc), channel_id=64, replica_groups={{0,1}}, use_global_device_ids=true, dimensions={0}, to_apply=add })"; const char* expected = R"( )"; RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected); } TEST_F(DynamicSliceFusionRewriterTest, ReduceScatterDynamicSlice) { const char* hlo = R"( HloModule jit_slice, replica_count=2 add { a = s32[] parameter(0) b = s32[] parameter(1) ROOT add = add(a,b) } ENTRY %main.9 { p0 = s32[2,8,32]{2,1,0} parameter(0) c0 = s32[] constant(0) c1 = s32[] constant(1) slice = s32[1,8,32]{2,1,0} dynamic-slice(p0, c1, c0, c0), dynamic_slice_sizes={1,8,32} bc = s32[8,32]{1,0} bitcast(%slice) ROOT rs = s32[4,32] reduce-scatter(bc), channel_id=64, replica_groups={{0,1}}, use_global_device_ids=true, dimensions={0}, to_apply=add })"; const char* expected = R"( )"; RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected); } TEST_F(DynamicSliceFusionRewriterTest, ReplicaIdAndPartitionIdAsOffset) { const char* hlo = R"( HloModule test_module, replica_count=2, num_partitions=2 ENTRY main { p0 = s32[32,32] parameter(0) p1 = s32[32,32] parameter(1) p2 = s32[64,32] parameter(2) c10 = u32[] constant(10) c0 = u32[] constant(0) call1 = s32[32,32] custom-call(p0, p1), custom_call_target="__cublas$gemm" dus1 = s32[64,32] dynamic-update-slice(p2, call1, c10, c0) replica = u32[] replica-id() call2 = s32[32,32] custom-call(p0, p1), custom_call_target="__cublas$gemm" dus2 = s32[64,32] dynamic-update-slice(p2, call2, replica, c0) partition = u32[] partition-id() call3 = s32[32,32] custom-call(p0, p1), custom_call_target="__cublas$gemm" dus3 = s32[64,32] dynamic-update-slice(p2, call3, partition, c0) ROOT tuple = tuple(dus1, dus2, dus3) } )"; const char* expected = R"( )"; RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), expected); } TEST_F(DynamicSliceFusionRewriterTest, ParameterOffsetThroughWhileLoop) { const char* hlo = R"( HloModule test Body { p = (s32[], s32[32,32], s32[32,32], s32[64,32], s32[]) parameter(0) i = get-tuple-element(p), index=0 p0 = get-tuple-element(p), index=1 p1 = get-tuple-element(p), index=2 p2 = s32[64,32] get-tuple-element(p), index=3 offset = s32[] get-tuple-element(p), index=4 c0 = s32[] constant(0) call = s32[32,32] custom-call(p0, p1), custom_call_target="__cublas$gemm" dus = s32[64,32] dynamic-update-slice(p2, call, offset, c0) c1 = s32[] constant(1) i_plus_one = add(i, c1) ROOT tuple = tuple(i_plus_one, p1, p0, dus, offset) } Cond { p = (s32[], s32[32,32], s32[32,32], s32[64,32], s32[]) parameter(0) i = get-tuple-element(p), index=0 c4 = s32[] constant(4) ROOT compare = compare(i, c4), direction=LT } ENTRY main { offset = s32[] parameter(0) p0 = s32[32,32] parameter(1) p1 = s32[32,32] parameter(2) p2 = s32[64,32] parameter(3) c0 = s32[] constant(0) tuple = tuple(c0, p0, p1, p2, offset) ROOT while = while(tuple), body=Body, condition=Cond } )"; RunAndFilecheckHloRewrite(hlo, DynamicSliceFusionRewriter("gpu"), std::nullopt); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/dynamic_slice_fusion_rewriter.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/dynamic_slice_fusion_rewriter_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
572bf3bf-5817-49f1-84e8-2c61e8d34dfe
cpp
tensorflow/tensorflow
cudnn_custom_call_converter
third_party/xla/xla/service/gpu/transforms/cudnn_custom_call_converter.cc
third_party/xla/xla/service/gpu/transforms/cudnn_custom_call_converter_test.cc
#include "xla/service/gpu/transforms/cudnn_custom_call_converter.h" #include "absl/container/flat_hash_set.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/dfs_hlo_visitor_with_default.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/ir_emission_utils.h" #include "tsl/platform/errors.h" namespace xla { namespace gpu { namespace { class CustomCallVisitor : public DfsHloRewriteVisitor { public: absl::Status HandleCustomCall(HloInstruction *hlo) override { if (hlo->custom_call_target() != kCuDnnFusionKind) { return absl::OkStatus(); } HloComputation *computation = hlo->GetModule()->AddEmbeddedComputation( hlo->called_computations()[0]->Clone()); HloInstruction *fusion = hlo->parent()->AddInstruction(HloInstruction::CreateFusion( hlo->shape(), HloInstruction::FusionKind::kCustom, hlo->operands(), computation)); GpuBackendConfig gpu_config; FusionBackendConfig &backend_config = *gpu_config.mutable_fusion_backend_config(); backend_config.set_kind(hlo->custom_call_target()); TF_RETURN_IF_ERROR(fusion->set_backend_config(gpu_config)); TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, fusion)); return absl::OkStatus(); } }; } absl::StatusOr<bool> CuDnnCustomCallConverter::Run( HloModule *module, const absl::flat_hash_set<absl::string_view> &execution_threads) { return CustomCallVisitor().RunOnModule(module, execution_threads); } } }
#include "xla/service/gpu/transforms/cudnn_custom_call_converter.h" #include <gtest/gtest.h> #include "xla/tests/hlo_test_base.h" namespace xla { namespace gpu { namespace { using ConverterTest = HloTestBase; TEST_F(ConverterTest, CustomCallGetsConvertedToCustomFusion) { RunAndFilecheckHloRewrite(R"( f { a = s8[] parameter(0) ROOT r = s8[] add(a, a) } ENTRY e { b = s8[] parameter(0) ROOT c = s8[] custom-call(b), custom_call_target="__cudnn$fusion", called_computations={f} })", CuDnnCustomCallConverter(), R"( ; CHECK: ROOT %fusion = s8[] fusion(%b), kind=kCustom, calls=%f ; CHECK-SAME: "fusion_backend_config":{"kind":"__cudnn$fusion"} )"); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/cudnn_custom_call_converter.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/cudnn_custom_call_converter_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
a3c2a240-90d4-4864-80a2-208148d1b716
cpp
tensorflow/tensorflow
horizontal_loop_fusion
third_party/xla/xla/service/gpu/transforms/horizontal_loop_fusion.cc
third_party/xla/xla/service/gpu/transforms/horizontal_loop_fusion_test.cc
#include "xla/service/gpu/transforms/horizontal_loop_fusion.h" #include <algorithm> #include <cstddef> #include <cstdint> #include <memory> #include <string> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/layout_util.h" #include "xla/service/gpu/gpu_fusible.h" #include "xla/service/hlo_creation_utils.h" #include "xla/service/sub_byte_normalization.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { PrimitiveType GetUniqueOutputTypeOfFusible(const HloInstruction& fusible) { auto outputs = GetOutputsOfFusible(fusible); CHECK(!outputs.empty()); PrimitiveType first_output_type = outputs[0]->shape().element_type(); for (size_t i = 1; i < outputs.size(); ++i) { PrimitiveType cur_output_type = outputs[i]->shape().element_type(); CHECK(first_output_type == cur_output_type) << "Output types are expected to be unique, but see " << PrimitiveType_Name(first_output_type) << " and " << PrimitiveType_Name(cur_output_type); } return first_output_type; } class HorizontalLoopFusionImpl { public: explicit HorizontalLoopFusionImpl(HloComputation* computation, absl::string_view prefix) : computation_(computation), prefix_(prefix) {} ~HorizontalLoopFusionImpl() = default; absl::StatusOr<bool> Run(); private: absl::Status Fuse(absl::Span<HloInstruction*> fused_fusion_instrs, bool sliced_input_fusion, std::vector<HloInstruction*>& to_fuse_candidates); absl::Status CreateFusedComputation( absl::Span<HloInstruction*> fused_fusion_instrs, std::unique_ptr<HloComputation>* uniq_computation, std::vector<HloInstruction*>* bound_operands, bool sliced_input_fusion); absl::StatusOr<bool> FuseConsumerOperands( HloInstruction* consumer, bool sliced_input_fusion, std::vector<HloInstruction*>& to_fuse_candidates); class FusionCandidates { public: explicit FusionCandidates(HloInstruction* consumer, bool sliced_input_fusion) : fusible_instrs_(), pos_(0), sliced_input_fusion_(sliced_input_fusion) { Initialize(consumer); } absl::Span<HloInstruction*> GetNextSpanOfFusions(); private: void Initialize(HloInstruction*); std::vector<HloInstruction*> fusible_instrs_; size_t pos_; bool sliced_input_fusion_; }; HloComputation* computation_; std::string prefix_; }; bool IsFusibleCandidate(const HloInstruction& instr) { if (!instr.control_successors().empty() || !instr.control_predecessors().empty()) { return false; } if (IsNestableVariadicReduction(instr)) { return false; } if (instr.IsElementwise() && instr.operand_count() > 0) { return true; } if (!instr.IsLoopFusion()) { return false; } auto outputs = GetOutputsOfFusible(instr); CHECK(!outputs.empty()); const HloInstruction* first_output = outputs[0]; for (size_t i = 1; i < outputs.size(); ++i) { if (first_output->shape().element_type() != outputs[i]->shape().element_type()) { return false; } } return true; } bool IsProfitableFusionCandidate(const HloInstruction& instr, bool sliced_input_fusion) { const int64_t kShapeThreshold = sliced_input_fusion ? 128 * 2048 : 8192 * 8192; const int64_t kInstrCountThreshold = sliced_input_fusion ? 30 : 128; const HloInstruction* root = (instr.opcode() == HloOpcode::kFusion) ? instr.fused_expression_root() : &instr; if (root->opcode() == HloOpcode::kTuple) { Shape shape = root->operand(0)->shape(); if (ShapeUtil::ElementsIn(shape) > kShapeThreshold) { VLOG(2) << "Profitable check failed due to element count with " "sliced_input_fusion=" << sliced_input_fusion; return false; } } else { Shape shape = root->shape(); if (ShapeUtil::ElementsIn(shape) > kShapeThreshold) { VLOG(2) << "Profiltable check failed due to element size with " "sliced_input_fusion=" << sliced_input_fusion; return false; } } if (instr.opcode() == HloOpcode::kFusion && instr.fused_instruction_count() > kInstrCountThreshold) { return false; } return true; } bool HasOnlyRowMajorLayout(const HloInstruction& instr) { if (instr.opcode() != HloOpcode::kFusion) { return LayoutUtil::IsMonotonicWithDim0Major(instr.shape().layout()); } auto fused_instrs = instr.fused_instructions_computation()->instructions(); for (HloInstruction* i : fused_instrs) { if (!LayoutUtil::IsDenseArray(i->shape())) { continue; } if (!LayoutUtil::IsMonotonicWithDim0Major(i->shape().layout())) { return false; } } return true; } bool AnyOpndIsParamSharedAmongFusions( const HloInstruction* instr, const absl::flat_hash_set<HloInstruction*>& fusion_instrs) { return absl::c_any_of(instr->operands(), [&](const HloInstruction* opnd) { return opnd->opcode() == HloOpcode::kParameter && absl::c_any_of(opnd->users(), [&](const HloInstruction* user) { return user != instr && fusion_instrs.contains(user); }); }); } void HorizontalLoopFusionImpl::FusionCandidates::Initialize( HloInstruction* consumer) { absl::flat_hash_set<HloInstruction*> fusible_candidates; std::vector<HloInstruction*> ordered_fusible_candidates; for (HloInstruction* opnd : consumer->operands()) { HloInstruction* predecessor = opnd->LatestNonGteAncestor(); if (IsFusibleCandidate(*predecessor)) { if (fusible_candidates.insert(predecessor).second) { ordered_fusible_candidates.push_back(predecessor); } } } for (HloInstruction* instr : ordered_fusible_candidates) { if (!IsConsumerTheOnlyNonRootUser(*instr, *consumer)) { VLOG(2) << "sliced_input_fusion=" << sliced_input_fusion_ << " rejects maybe illegal instr " << instr->ToString() << "; including it may create cycles in HLO."; continue; } else if (!IsProfitableFusionCandidate(*instr, sliced_input_fusion_)) { VLOG(2) << "sliced_input_fusion=" << sliced_input_fusion_ << " rejects may-not-be profitable fusion instr" << instr->ToString(); continue; } else if (!HasOnlyRowMajorLayout(*instr)) { VLOG(2) << "sliced_input_fusion=" << sliced_input_fusion_ << " rejects non-row-major fusion instr " << instr->ToString(); continue; } else if (AnyOpndIsParamSharedAmongFusions(instr, fusible_candidates)) { VLOG(2) << "sliced_input_fusion=" << sliced_input_fusion_ << " rejects the fusion instr because it shares parameter with" << " other fusion candidates, instr: " << instr->ToString(); continue; } else { VLOG(2) << "Find a fusion candidate " << instr->ToString(); fusible_instrs_.push_back(instr); } } std::stable_sort( fusible_instrs_.begin(), fusible_instrs_.end(), [&](const HloInstruction* a, const HloInstruction* b) { if (GetUniqueOutputTypeOfFusible(*a) != GetUniqueOutputTypeOfFusible(*b)) { return GetUniqueOutputTypeOfFusible(*a) < GetUniqueOutputTypeOfFusible(*b); } else if (GetOutputSizeOfFusible(*a) != GetOutputSizeOfFusible(*b)) { return GetOutputSizeOfFusible(*a) < GetOutputSizeOfFusible(*b); } else if (GetInstrCountOfFusible(*a) != GetInstrCountOfFusible(*b)) { return GetInstrCountOfFusible(*a) < GetInstrCountOfFusible(*b); } else { return ShapeUtil::ElementsIn(GetOutputsOfFusible(*a)[0]->shape()) < ShapeUtil::ElementsIn(GetOutputsOfFusible(*b)[0]->shape()); } }); } absl::Span<HloInstruction*> HorizontalLoopFusionImpl::FusionCandidates::GetNextSpanOfFusions() { if (pos_ >= fusible_instrs_.size()) { return absl::Span<HloInstruction*>(); } const auto kMaxFusionBatchSize = [&]() -> int64_t { if (sliced_input_fusion_) { return 32; } else { if (fusible_instrs_[pos_]->opcode() == HloOpcode::kFusion) { return 32; } else { return 64; } } }(); size_t left = pos_; size_t right = pos_ + 1; size_t first_output_size = GetOutputSizeOfFusible(*fusible_instrs_[left]); PrimitiveType first_output_type = GetUniqueOutputTypeOfFusible(*fusible_instrs_[left]); constexpr int64_t kMaxCudaParamSize = 4000; size_t accum_io_size = 0; size_t accum_num_outputs = 0; for (; right < fusible_instrs_.size(); ++right) { PrimitiveType cur_output_type = GetUniqueOutputTypeOfFusible(*fusible_instrs_[right]); if (first_output_type != cur_output_type) { break; } if (first_output_size != GetOutputSizeOfFusible(*fusible_instrs_[right])) { break; } if (GetInstrCountOfFusible(*fusible_instrs_[left]) != GetInstrCountOfFusible(*fusible_instrs_[right])) { break; } if (!sliced_input_fusion_ && !ShapeUtil::EqualIgnoringElementType( GetOutputsOfFusible(*fusible_instrs_[left])[0]->shape(), GetOutputsOfFusible(*fusible_instrs_[right])[0]->shape())) { break; } size_t num_outputs = GetOutputSizeOfFusible(*fusible_instrs_[right]); accum_num_outputs += num_outputs; if (accum_num_outputs >= kMaxFusionBatchSize) { break; } accum_io_size += fusible_instrs_.at(right)->operand_count() + num_outputs; if (accum_io_size * 8 >= kMaxCudaParamSize) { break; } } VLOG(2) << "horizontal fuse get instruction span with " << (right - left) << " instructions for sliced_input_fusion=" << sliced_input_fusion_ << " fusion"; pos_ = right; return absl::MakeSpan(fusible_instrs_).subspan(left, right - left); } absl::StatusOr<bool> HorizontalLoopFusionImpl::FuseConsumerOperands( HloInstruction* consumer, bool sliced_input_fusion, std::vector<HloInstruction*>& to_fuse_candidates) { bool changed = false; FusionCandidates loop_fusion_candidates(consumer, sliced_input_fusion); while (true) { auto fusibles = loop_fusion_candidates.GetNextSpanOfFusions(); if (fusibles.empty()) { break; } else if (fusibles.size() == 1) { continue; } changed = true; std::vector<HloInstruction*> fusion_instrs; for (HloInstruction* instr : fusibles) { if (instr->opcode() == HloOpcode::kFusion) { fusion_instrs.push_back(instr); } else { TF_ASSIGN_OR_RETURN( HloInstruction * fusion_instr, MakeFusionInstruction(instr, HloInstruction::FusionKind::kLoop)); fusion_instrs.push_back(fusion_instr); } } TF_RETURN_IF_ERROR(Fuse(absl::MakeSpan(fusion_instrs), sliced_input_fusion, to_fuse_candidates)); } return changed; } absl::Status HorizontalLoopFusionImpl::CreateFusedComputation( absl::Span<HloInstruction*> fused_fusion_instrs, std::unique_ptr<HloComputation>* uniq_computation, std::vector<HloInstruction*>* bound_operands, bool sliced_input_fusion) { HloComputation::Builder b(prefix_ + "horizontally_fused_computation"); size_t fused_comp_param_id = 0; for (size_t i = 0; i < fused_fusion_instrs.size(); ++i) { auto old_params = fused_fusion_instrs[i]->fused_parameters(); for (size_t j = 0; j < old_params.size(); ++j) { HloInstruction* bound_opnd = fused_fusion_instrs[i]->mutable_operand(j); b.AddInstruction(HloInstruction::CreateParameter( fused_comp_param_id++, bound_opnd->shape(), absl::StrCat("param_", i, "_", j))); bound_operands->push_back(bound_opnd); } } HloInstruction* dummy_root = b.AddInstruction( HloInstruction::CreateTuple(std::vector<HloInstruction*>{})); *uniq_computation = b.Build(dummy_root); HloComputation* comp = uniq_computation->get(); absl::flat_hash_map<const HloInstruction*, HloInstruction*> clone_map; size_t new_param_id = 0; for (size_t i = 0; i < fused_fusion_instrs.size(); ++i) { auto old_params = fused_fusion_instrs[i]->fused_parameters(); for (size_t j = 0; j < old_params.size(); ++j) { HloInstruction* old_param = old_params[j]; HloInstruction* new_param = comp->parameter_instruction(new_param_id++); clone_map.insert({old_param, new_param}); } } const OpMetadata* metadata = nullptr; for (size_t i = 0; i < fused_fusion_instrs.size(); ++i) { auto def_to_use_order = fused_fusion_instrs[i] ->fused_instructions_computation() ->MakeInstructionPostOrder(); for (HloInstruction* old_instr : def_to_use_order) { if (old_instr->opcode() == HloOpcode::kParameter || (sliced_input_fusion && old_instr->opcode() == HloOpcode::kTuple && old_instr == fused_fusion_instrs[i]->fused_expression_root())) { continue; } std::vector<HloInstruction*> new_opnds; const auto& old_opnds = old_instr->operands(); new_opnds.reserve(old_opnds.size()); for (HloInstruction* old_opnd : old_opnds) { CHECK(clone_map.find(old_opnd) != clone_map.end()); new_opnds.push_back(clone_map[old_opnd]); } HloInstruction* new_instr = comp->AddInstruction( old_instr->CloneWithNewOperands(old_instr->shape(), new_opnds)); clone_map.insert({old_instr, new_instr}); metadata = &old_instr->metadata(); } } size_t fused_instr_output_size = GetOutputSizeOfFusible(*fused_fusion_instrs[0]); if (sliced_input_fusion) { std::vector<HloInstruction*> concated_outputs; for (size_t i = 0; i < fused_instr_output_size; ++i) { std::vector<HloInstruction*> instr_outputs(fused_fusion_instrs.size()); for (size_t j = 0; j < fused_fusion_instrs.size(); ++j) { const HloInstruction* old_output = GetOutputsOfFusible(*fused_fusion_instrs[j])[i]; HloInstruction* new_output = clone_map[old_output]; if (new_output->shape().dimensions_size() == 1) { instr_outputs[j] = new_output; } else { Shape new_shape = ShapeUtil::MakeShapeWithDenseLayout( new_output->shape().element_type(), {ShapeUtil::ElementsIn(new_output->shape())}, std::vector<int64_t>(1, 0)); TF_ASSIGN_OR_RETURN(instr_outputs[j], MakeReshapeHlo(new_shape, new_output)); } } TF_ASSIGN_OR_RETURN(HloInstruction * concated_output, MakeConcatHlo(instr_outputs, 0)); concated_outputs.push_back(concated_output); } std::vector<HloInstruction*> output_slices(concated_outputs.size() * fused_fusion_instrs.size()); for (size_t i = 0; i < concated_outputs.size(); ++i) { HloInstruction* concated_output = concated_outputs[i]; int64_t slice_start = 0; for (size_t j = 0; j < fused_fusion_instrs.size(); ++j) { const HloInstruction* old_output = GetOutputsOfFusible(*fused_fusion_instrs[j])[i]; Shape shape = old_output->shape(); int64_t slice_limit = slice_start + ShapeUtil::ElementsIn(shape); TF_ASSIGN_OR_RETURN( output_slices[concated_outputs.size() * j + i], MakeSliceHlo(concated_output, {slice_start}, {slice_limit}, {1})); slice_start = slice_limit; } } HloInstruction* tuple = comp->AddInstruction( HloInstruction::CreateTuple(output_slices), metadata); comp->set_root_instruction(tuple, true); TF_RETURN_IF_ERROR(comp->RemoveInstruction(dummy_root)); } else { std::vector<HloInstruction*> tuple_operands(fused_instr_output_size * fused_fusion_instrs.size()); for (size_t i = 0; i < fused_instr_output_size; ++i) { for (size_t j = 0; j < fused_fusion_instrs.size(); ++j) { const HloInstruction* old_output = GetOutputsOfFusible(*fused_fusion_instrs[j])[i]; HloInstruction* new_output = clone_map[old_output]; tuple_operands[fused_instr_output_size * j + i] = new_output; } } HloInstruction* tuple = comp->AddInstruction(HloInstruction::CreateTuple(tuple_operands)); comp->set_root_instruction(tuple, true); TF_RETURN_IF_ERROR(comp->RemoveInstruction(dummy_root)); } return absl::OkStatus(); } absl::Status HorizontalLoopFusionImpl::Fuse( absl::Span<HloInstruction*> fused_fusion_instrs, bool sliced_input_fusion, std::vector<HloInstruction*>& to_fuse_candidates) { std::unique_ptr<HloComputation> uniq_computation; std::vector<HloInstruction*> bound_operands; TF_RETURN_IF_ERROR(CreateFusedComputation(fused_fusion_instrs, &uniq_computation, &bound_operands, sliced_input_fusion)); HloComputation* fused_comp = computation_->parent()->AddEmbeddedComputation( std::move(uniq_computation)); HloInstruction* hori_fusion_instr = computation_->AddInstruction( HloInstruction::CreateFusion(fused_comp->root_instruction()->shape(), sliced_input_fusion ? HloInstruction::FusionKind::kInput : HloInstruction::FusionKind::kLoop, bound_operands, fused_comp, prefix_), &fused_comp->root_instruction()->metadata()); fused_comp->SetFusionInstruction(hori_fusion_instr); to_fuse_candidates.push_back(hori_fusion_instr); size_t total_output_id = 0; for (size_t i = 0; i < fused_fusion_instrs.size(); ++i) { std::vector<HloInstruction*> bitcasts_or_gte; HloInstruction* fused_instr = fused_fusion_instrs[i]; size_t num_outputs = GetOutputSizeOfFusible(*fused_instr); for (size_t j = 0; j < num_outputs; ++j) { const HloInstruction* output = GetOutputsOfFusible(*fused_instr)[j]; TF_ASSIGN_OR_RETURN( HloInstruction * gep, MakeGetTupleElementHlo(hori_fusion_instr, total_output_id++)); if (output->shape().dimensions_size() == 1) { bitcasts_or_gte.push_back(gep); } else { bitcasts_or_gte.push_back(computation_->AddInstruction( HloInstruction::CreateBitcast(output->shape(), gep))); } } HloInstruction* bitcast_or_tuple = (bitcasts_or_gte.size() == 1) ? bitcasts_or_gte.at(0) : computation_->AddInstruction( HloInstruction::CreateTuple(bitcasts_or_gte)); HloComputation* old_computation = fused_instr->fused_instructions_computation(); HloModule* module = old_computation->parent(); TF_RETURN_IF_ERROR( computation_->ReplaceInstruction(fused_instr, bitcast_or_tuple)); TF_RETURN_IF_ERROR(module->RemoveEmbeddedComputation(old_computation)); } TF_RETURN_IF_ERROR(Cast<HloFusionInstruction>(hori_fusion_instr) ->DeduplicateFusionOperands()); VLOG(1) << "Fused " << fused_fusion_instrs.size() << " instructions into: " << hori_fusion_instr->ToString(); return absl::OkStatus(); } absl::StatusOr<bool> HorizontalLoopFusionImpl::Run() { bool changed = false; XLA_VLOG_LINES(3, computation_->ToString()); std::vector<HloInstruction*> to_fuse_candidates = computation_->MakeInstructionPostOrder(); while (!to_fuse_candidates.empty()) { HloInstruction* consumer = to_fuse_candidates.back(); to_fuse_candidates.pop_back(); if (consumer->IsDead()) { continue; } TF_ASSIGN_OR_RETURN( bool loop_fusion_changed, FuseConsumerOperands(consumer, false, to_fuse_candidates)); TF_ASSIGN_OR_RETURN( bool sliced_input_fusion_changed, FuseConsumerOperands(consumer, true, to_fuse_candidates)); changed = changed || loop_fusion_changed || sliced_input_fusion_changed; } return changed; } } absl::StatusOr<bool> HorizontalLoopFusion::RunOnComputation( HloComputation* computation) { HorizontalLoopFusionImpl horizontal_fusion_impl(computation, prefix_); return horizontal_fusion_impl.Run(); } absl::StatusOr<bool> HorizontalLoopFusion::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { VLOG(2) << "Run horizontal fusion."; TF_ASSIGN_OR_RETURN(bool changed, RunOnComputation(module->entry_computation())); if (changed) { TF_ASSIGN_OR_RETURN( [[maybe_unused]] bool unused, SubByteNormalization{SubByteNormalization::SET_ELEMENT_SIZE}.Run( module)); } return changed; } } }
#include "xla/service/gpu/transforms/horizontal_loop_fusion.h" #include <cstdint> #include <optional> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/log/log.h" #include "xla/error_spec.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/pass/hlo_pass_fix.h" #include "xla/hlo/pass/hlo_pass_pipeline.h" #include "xla/service/gpu/gpu_device_info_for_tests.h" #include "xla/service/gpu/transforms/instruction_fusion.h" #include "xla/service/hlo_dce.h" #include "xla/service/hlo_parser.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/stream_executor/device_description.h" #include "xla/test.h" #include "xla/tests/hlo_test_base.h" #include "xla/tsl/lib/core/status_test_util.h" namespace xla { namespace gpu { namespace { namespace m = ::xla::match; class HorizontalLoopFusionTest : public HloTestBase { public: static bool IsFusion(const HloInstruction* instr) { return instr->opcode() == HloOpcode::kFusion; } }; TEST_F(HorizontalLoopFusionTest, BasicTest) { auto module = ParseAndReturnVerifiedModule(R"( HloModule BasicTest fused_computation.1 { arg.1 = f16[1024]{0} parameter(0) arg.2 = f16[1024]{0} parameter(1) ROOT mul.1 = f16[1024]{0} multiply(arg.1, arg.2) } fused_computation.2 { arg.1 = f16[123]{0} parameter(0) arg.2 = f16[123]{0} parameter(1) ROOT add.1 = f16[123]{0} add(arg.1, arg.2) } ENTRY entry_computation { arg.1 = f16[1024]{0} parameter(0) arg.2 = f16[1024]{0} parameter(1) arg.3 = f16[123]{0} parameter(2) arg.4 = f16[123]{0} parameter(3) fusion.1 = f16[1024]{0} fusion(arg.1, arg.2), kind=kLoop, calls=fused_computation.1 fusion.2 = f16[123]{0} fusion(arg.3, arg.4), kind=kLoop, calls=fused_computation.2 ROOT tuple.1 = (f16[1024]{0}, f16[123]{0}) tuple(fusion.1, fusion.2) } )") .value(); EXPECT_TRUE(HorizontalLoopFusion().Run(module.get()).value()); TF_ASSERT_OK(verifier().Run(module.get()).status()); EXPECT_FALSE(HloDCE().Run(module.get()).value()); const HloInstruction* entry_root = module->entry_computation()->root_instruction(); const HloInstruction* fusion = nullptr; ASSERT_THAT(entry_root, GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion)), m::GetTupleElement(m::Fusion())))); ASSERT_TRUE(fusion->IsMultiOutputFusion()); EXPECT_THAT(fusion->fused_expression_root(), GmockMatch(m::Tuple(m::Slice(m::Concatenate()), m::Slice(m::Concatenate())))); } TEST_F(HorizontalLoopFusionTest, NegativeTestForCycle) { auto module = ParseAndReturnVerifiedModule(R"( HloModule NegativeTestForCycle fused_computation.1 { arg.1 = f16[123]{0} parameter(0) arg.2 = f16[123]{0} parameter(1) ROOT mul.1 = f16[123]{0} multiply(arg.1, arg.2) } fused_computation.2 { arg.1 = f16[123]{0} parameter(0) arg.2 = f16[123]{0} parameter(1) ROOT add.1 = f16[123]{0} add(arg.1, arg.2) } ENTRY entry_computation { arg.1 = f16[123]{0} parameter(0) arg.2 = f16[123]{0} parameter(1) arg.3 = f16[123]{0} parameter(2) arg.4 = f16[123]{0} parameter(3) fusion.1 = f16[123]{0} fusion(arg.1, arg.2), kind=kLoop, calls=fused_computation.1 add.2 = f16[123]{0} add(fusion.1, arg.4) fusion.2 = f16[123]{0} fusion(add.2, arg.3), kind=kLoop, calls=fused_computation.2 ROOT tuple.1 = (f16[123]{0}, f16[123]{0}, f16[123]{0}) tuple(fusion.1, fusion.2, add.2) } )") .value(); EXPECT_FALSE(HorizontalLoopFusion().Run(module.get()).value()); } TEST_F(HorizontalLoopFusionTest, NegativeTestForIncompatibleTypes) { auto module = ParseAndReturnVerifiedModule(R"( HloModule NegativeTestForIncompatibleTypes fused_computation.1 { arg.1 = f16[1024]{0} parameter(0) arg.2 = f16[1024]{0} parameter(1) ROOT mul.1 = f16[1024]{0} multiply(arg.1, arg.2) } fused_computation.2 { arg.1 = s32[123]{0} parameter(0) arg.2 = s32[123]{0} parameter(1) ROOT add.1 = s32[123]{0} add(arg.1, arg.2) } ENTRY entry_computation { arg.1 = f16[1024]{0} parameter(0) arg.2 = f16[1024]{0} parameter(1) arg.3 = s32[123]{0} parameter(2) arg.4 = s32[123]{0} parameter(3) fusion.1 = f16[1024]{0} fusion(arg.1, arg.2), kind=kLoop, calls=fused_computation.1 fusion.2 = s32[123]{0} fusion(arg.3, arg.4), kind=kLoop, calls=fused_computation.2 ROOT tuple.1 = (f16[1024]{0}, s32[123]{0}) tuple(fusion.1, fusion.2) } )") .value(); EXPECT_FALSE(HorizontalLoopFusion().Run(module.get()).value()); } TEST_F(HorizontalLoopFusionTest, FusingIntoKLoopAndKInputTogether) { auto module = ParseAndReturnVerifiedModule(R"( HloModule FusingIntoKLoopAndKInputTogether fused_computation.1 { arg.1 = f16[129, 2048]{1, 0} parameter(0) arg.2 = f16[129, 2048]{1, 0} parameter(1) ROOT mul.1 = f16[129,2048]{1, 0} multiply(arg.1, arg.2) } fused_computation.2 { arg.1 = f16[129, 2048]{1, 0} parameter(0) arg.2 = f16[129, 2048]{1, 0} parameter(1) ROOT mul.1 = f16[129,2048]{1, 0} multiply(arg.1, arg.2) } fused_computation.3 { arg.1 = f16[130, 2048]{1, 0} parameter(0) arg.2 = f16[130, 2048]{1, 0} parameter(1) ROOT mul.1 = f16[130,2048]{1, 0} multiply(arg.1, arg.2) } fused_computation.4 { arg.1 = f16[130, 2048]{1, 0} parameter(0) arg.2 = f16[130, 2048]{1, 0} parameter(1) ROOT mul.1 = f16[130,2048]{1, 0} multiply(arg.1, arg.2) } fused_computation.5 { arg.1 = f16[123]{0} parameter(0) arg.2 = f16[123]{0} parameter(1) ROOT add.1 = f16[123]{0} add(arg.1, arg.2) } fused_computation.6 { arg.1 = f16[128]{0} parameter(0) arg.2 = f16[128]{0} parameter(1) ROOT add.1 = f16[128]{0} add(arg.1, arg.2) } ENTRY entry_computation { arg.1 = f16[129, 2048]{1, 0} parameter(0) arg.2 = f16[129, 2048]{1, 0} parameter(1) arg.3 = f16[129, 2048]{1, 0} parameter(2) arg.4 = f16[129, 2048]{1, 0} parameter(3) arg.5 = f16[130, 2048]{1, 0} parameter(4) arg.6 = f16[130, 2048]{1, 0} parameter(5) arg.7 = f16[130, 2048]{1, 0} parameter(6) arg.8 = f16[130, 2048]{1, 0} parameter(7) arg.9 = f16[123]{0} parameter(8) arg.10 = f16[123]{0} parameter(9) arg.11 = f16[128]{0} parameter(10) arg.12 = f16[128]{0} parameter(11) fusion.1 = f16[129,2048]{1, 0} fusion(arg.1, arg.2), kind=kLoop, calls=fused_computation.1 fusion.2 = f16[129,2048]{1, 0} fusion(arg.3, arg.4), kind=kLoop, calls=fused_computation.2 fusion.3 = f16[130,2048]{1, 0} fusion(arg.5, arg.6), kind=kLoop, calls=fused_computation.3 fusion.4 = f16[130,2048]{1, 0} fusion(arg.7, arg.8), kind=kLoop, calls=fused_computation.4 fusion.5 = f16[123]{0} fusion(arg.9, arg.10), kind=kLoop, calls=fused_computation.5 fusion.6 = f16[128]{0} fusion(arg.11, arg.12), kind=kLoop, calls=fused_computation.6 ROOT tuple.1 = (f16[129,2048]{1, 0}, f16[129,2048]{1, 0}, f16[130,2048]{1, 0}, f16[130,2048]{1, 0}, f16[123]{0}, f16[128]{0}) tuple(fusion.1, fusion.2, fusion.3, fusion.4, fusion.5, fusion.6) } )") .value(); EXPECT_TRUE(HorizontalLoopFusion().Run(module.get()).value()); int input_fusion_count = 0; int loop_fusion_count = 0; for (auto inst : module->entry_computation()->MakeInstructionPostOrder()) { if (inst->opcode() == HloOpcode::kFusion) { input_fusion_count += (inst->fusion_kind() == HloInstruction::FusionKind::kInput) ? 1 : 0; loop_fusion_count += (inst->fusion_kind() == HloInstruction::FusionKind::kLoop) ? 1 : 0; } } EXPECT_EQ(input_fusion_count, 1); EXPECT_EQ(loop_fusion_count, 2); } TEST_F(HorizontalLoopFusionTest, HorizontalLoopFusionAfterVerticalFusion) { auto module = ParseAndReturnVerifiedModule(R"( HloModule MergeSharedFusionInstruction ENTRY MergeSharedFusionInstruction.Computation0 { param.1.1 = f32[4,1024]{1,0} parameter(0) param.1.2 = f32[4,1024]{1,0} parameter(1) param.1.3 = f32[4,1024]{1,0} parameter(2) param.2.1 = f32[321,5]{1,0} parameter(3) param.2.2 = f32[321,5]{1,0} parameter(4) param.2.3 = f32[321,5]{1,0} parameter(5) const.1 = f32[] constant(3) const.2 = f32[] constant(3) broadcast.1 = f32[4,1024]{1,0} broadcast(const.1), dimensions={} broadcast.2 = f32[321,5]{1,0} broadcast(const.2), dimensions={} mul.1.1 = f32[4,1024]{1,0} multiply(param.1.1, param.1.2) mul.1.2 = f32[4,1024]{1,0} multiply(param.1.3, broadcast.1) add.1 = f32[4,1024]{1,0} add(mul.1.1, mul.1.2) mul.2.1 = f32[321,5]{1,0} multiply(param.2.1, param.2.2) mul.2.2 = f32[321,5]{1,0} multiply(param.2.3, broadcast.2) add.2 = f32[321,5]{1,0} add(mul.2.1, mul.2.2) ROOT tuple = (f32[4,1024]{1,0}, f32[321,5]{1,0}) tuple(add.1, add.2) })") .value(); HloPassPipeline fusion("fusion"); const se::DeviceDescription device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo(); fusion.AddPass<xla::gpu::GpuInstructionFusion>(false, device_info); fusion.AddPass<xla::gpu::GpuInstructionFusion>(true, device_info); EXPECT_TRUE(fusion.Run(module.get()).value()); EXPECT_TRUE(HorizontalLoopFusion().Run(module.get()).value()); TF_ASSERT_OK(verifier().Run(module.get()).status()); VLOG(2) << "Dump after horizontal fusion:"; VLOG(2) << module->ToString(); const HloInstruction* entry_root = module->entry_computation()->root_instruction(); const HloInstruction* fusion_instr = nullptr; ASSERT_THAT(entry_root, GmockMatch(m::Tuple( m::Bitcast(m::GetTupleElement(m::Fusion(&fusion_instr))), m::Bitcast(m::GetTupleElement(m::Fusion()))))); ASSERT_TRUE(fusion_instr->IsMultiOutputFusion()); EXPECT_THAT(fusion_instr->fused_expression_root(), GmockMatch(m::Tuple( m::Slice(m::Concatenate(m::Reshape(), m::Reshape())), m::Slice(m::Concatenate(m::Reshape(), m::Reshape()))))); EXPECT_TRUE(RunAndCompareNoHloPasses(std::move(module), ErrorSpec{0, 0})); } TEST_F(HorizontalLoopFusionTest, GradientDescentOptimizerLike) { HloComputation::Builder builder(TestName()); std::vector<HloInstruction*> var_outs; for (int64_t i = 0; i < 128; ++i) { Shape shape = ShapeUtil::MakeShape(F32, {i + 1, 1024}); HloInstruction* param_var_in = builder.AddInstruction( HloInstruction::CreateParameter(i * 3 + 0, shape, "var.in")); HloInstruction* param_alpha = builder.AddInstruction(HloInstruction::CreateParameter( i * 3 + 1, ShapeUtil::MakeShape(F32, {}), "alpha")); HloInstruction* param_delta = builder.AddInstruction( HloInstruction::CreateParameter(i * 3 + 2, shape, "delta")); HloInstruction* alpha_broadcasted = builder.AddInstruction( HloInstruction::CreateBroadcast(shape, param_alpha, {})); HloInstruction* alpha_delta = builder.AddInstruction(HloInstruction::CreateBinary( shape, HloOpcode::kMultiply, alpha_broadcasted, param_delta)); HloInstruction* var_out = builder.AddInstruction(HloInstruction::CreateBinary( shape, HloOpcode::kSubtract, param_var_in, alpha_delta)); var_outs.push_back(var_out); } builder.AddInstruction(HloInstruction::CreateTuple(var_outs)); auto module = CreateNewVerifiedModule(); module->AddEntryComputation(builder.Build()); EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{0, 0})); } TEST_F(HorizontalLoopFusionTest, FusingDifferentOutputs) { auto module = ParseAndReturnVerifiedModule(R"( HloModule HeterogeneousMultiOutputFusions fused_computation.1 { arg.1 = f16[1024]{0} parameter(0) arg.2 = f16[1024]{0} parameter(1) arg.3 = f16[1024]{0} parameter(2) arg.4 = f16[1024]{0} parameter(3) mul.1 = f16[1024]{0} multiply(arg.1, arg.2) mul.2 = f16[1024]{0} multiply(arg.3, arg.4) add.1 = f16[1024]{0} add(mul.1, mul.2) ROOT tuple.1 = (f16[1024]{0}, f16[1024]{0}) tuple(add.1, mul.1) } fused_computation.2 { arg.1 = f16[123]{0} parameter(0) arg.2 = f16[123]{0} parameter(1) arg.3 = f16[123]{0} parameter(2) arg.4 = f16[123]{0} parameter(3) add.1 = f16[123]{0} add(arg.1, arg.2) add.2 = f16[123]{0} add(arg.3, arg.4) mul.1 = f16[123]{0} multiply(add.1, add.2) ROOT tuple.1 = (f16[123]{0}, f16[123]{0}) tuple(mul.1, add.1) } ENTRY entry_computation { arg.1 = f16[1024]{0} parameter(0) arg.2 = f16[1024]{0} parameter(1) arg.3 = f16[1024]{0} parameter(2) arg.4 = f16[1024]{0} parameter(3) arg.5 = f16[123]{0} parameter(4) arg.6 = f16[123]{0} parameter(5) arg.7 = f16[123]{0} parameter(6) arg.8 = f16[123]{0} parameter(7) fusion.1 = (f16[1024]{0}, f16[1024]{0}) fusion(arg.1, arg.2, arg.3, arg.4), kind=kLoop, calls=fused_computation.1 fusion.2 = (f16[123]{0}, f16[123]{0}) fusion(arg.5, arg.6, arg.7, arg.8), kind=kLoop, calls=fused_computation.2 gte.1 = f16[1024]{0} get-tuple-element(fusion.1), index=0 gte.2 = f16[1024]{0} get-tuple-element(fusion.1), index=1 gte.3 = f16[123]{0} get-tuple-element(fusion.2), index=0 gte.4 = f16[123]{0} get-tuple-element(fusion.2), index=1 ROOT tuple.1 = (f16[1024]{0}, f16[1024]{0}, f16[123]{0}, f16[123]{0}) tuple(gte.1, gte.2, gte.3, gte.4) } )") .value(); EXPECT_TRUE(HorizontalLoopFusion().Run(module.get()).value()); TF_ASSERT_OK(verifier().Run(module.get()).status()); EXPECT_FALSE(HloDCE().Run(module.get()).value()); VLOG(2) << "Dump after horizontal fusion:"; VLOG(2) << module->ToString(); EXPECT_TRUE(RunAndCompareNoHloPasses(std::move(module), ErrorSpec{0, 0})); } TEST_F(HorizontalLoopFusionTest, RMSPropLike) { HloComputation::Builder builder(TestName()); std::vector<HloInstruction*> all_outputs; for (int64_t i = 0; i < 48; ++i) { Shape shape = ShapeUtil::MakeShape(F32, {2, 1024 + i}); HloInstruction* grad = builder.AddInstruction( HloInstruction::CreateParameter(i * 9 + 0, shape, "grad")); HloInstruction* ms = builder.AddInstruction( HloInstruction::CreateParameter(i * 9 + 1, shape, "ms")); HloInstruction* rho = builder.AddInstruction(HloInstruction::CreateParameter( i * 9 + 2, ShapeUtil::MakeShape(F32, {}), "rho")); HloInstruction* one_minus_rho = builder.AddInstruction(HloInstruction::CreateParameter( i * 9 + 3, ShapeUtil::MakeShape(F32, {}), "one_minus_rho")); HloInstruction* rho_broadcasted = builder.AddInstruction(HloInstruction::CreateBroadcast(shape, rho, {})); HloInstruction* one_mins_rho_broadcasted = builder.AddInstruction( HloInstruction::CreateBroadcast(shape, one_minus_rho, {})); HloInstruction* grad_squared = builder.AddInstruction( HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, grad, grad)); HloInstruction* ms_1st_term = builder.AddInstruction( HloInstruction::CreateBinary(shape, HloOpcode::kMultiply, grad_squared, one_mins_rho_broadcasted)); HloInstruction* ms_2nd_term = builder.AddInstruction(HloInstruction::CreateBinary( shape, HloOpcode::kMultiply, ms, rho_broadcasted)); HloInstruction* ms_out = builder.AddInstruction(HloInstruction::CreateBinary( shape, HloOpcode::kAdd, ms_1st_term, ms_2nd_term)); HloInstruction* momentum = builder.AddInstruction( HloInstruction::CreateParameter(i * 9 + 4, shape, "momemtum")); HloInstruction* mom = builder.AddInstruction( HloInstruction::CreateParameter(i * 9 + 5, shape, "mom")); HloInstruction* lr = builder.AddInstruction(HloInstruction::CreateParameter( i * 9 + 6, ShapeUtil::MakeShape(F32, {}), "lr")); HloInstruction* epsilon = builder.AddInstruction(HloInstruction::CreateParameter( i * 9 + 7, ShapeUtil::MakeShape(F32, {}), "epsilon")); HloInstruction* lr_broadcasted = builder.AddInstruction(HloInstruction::CreateBroadcast(shape, lr, {})); HloInstruction* epsilon_broadcasted = builder.AddInstruction( HloInstruction::CreateBroadcast(shape, epsilon, {})); HloInstruction* mom_1st_term = builder.AddInstruction(HloInstruction::CreateBinary( shape, HloOpcode::kMultiply, momentum, mom)); HloInstruction* ms_eps = builder.AddInstruction(HloInstruction::CreateBinary( shape, HloOpcode::kAdd, ms_out, epsilon_broadcasted)); HloInstruction* ms_eps_rsq = builder.AddInstruction( HloInstruction::CreateUnary(shape, HloOpcode::kRsqrt, ms_eps)); HloInstruction* grad_ms_eps_rsq = builder.AddInstruction(HloInstruction::CreateBinary( shape, HloOpcode::kMultiply, grad, ms_eps_rsq)); HloInstruction* mom_2nd_term = builder.AddInstruction(HloInstruction::CreateBinary( shape, HloOpcode::kMultiply, lr_broadcasted, grad_ms_eps_rsq)); HloInstruction* mom_out = builder.AddInstruction(HloInstruction::CreateBinary( shape, HloOpcode::kAdd, mom_1st_term, mom_2nd_term)); HloInstruction* var = builder.AddInstruction( HloInstruction::CreateParameter(i * 9 + 8, shape, "var")); HloInstruction* var_out = builder.AddInstruction(HloInstruction::CreateBinary( shape, HloOpcode::kSubtract, var, mom_out)); all_outputs.push_back(ms_out); all_outputs.push_back(mom_out); all_outputs.push_back(var_out); } builder.AddInstruction(HloInstruction::CreateTuple(all_outputs)); auto module = CreateNewVerifiedModule(); module->AddEntryComputation(builder.Build()); EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{1.0e-5, 1.0e-5})); } TEST_F(HorizontalLoopFusionTest, DynamicUpdateSlice) { auto module = ParseAndReturnVerifiedModule(R"( HloModule NegativeTestForDynamicUpdateSlice fusion.1 { p.0 = f16[5,9,10]{2,1,0} parameter(0) p.1 = s32[] parameter(1) p.2 = f16[1,9,10]{2,1,0} parameter(2) c.0 = s32[] constant(0) ROOT %dynamic-update-slice = f16[5,9,10]{2,1,0} dynamic-update-slice(p.0, p.2, p.1, c.0, c.0) } fusion.2 { p.0 = f16[5,9,10]{2,1,0} parameter(0) p.1 = s32[] parameter(1) p.2 = f16[1,9,10]{2,1,0} parameter(2) c.0 = s32[] constant(0) ROOT %dynamic-update-slice = f16[5,9,10]{2,1,0} dynamic-update-slice(p.0, p.2, p.1, c.0, c.0) } ENTRY entry { p.00 = f16[5,9,10]{2,1,0} parameter(0) p.01 = f16[5,9,10]{2,1,0} parameter(1) p.10 = s32[] parameter(2) p.11 = s32[] parameter(3) p.20 = f16[1,9,10]{2,1,0} parameter(4) p.21 = f16[1,9,10]{2,1,0} parameter(5) f1 = f16[5,9,10] fusion(p.00, p.10, p.20), kind=kLoop, calls=fusion.1 f2 = f16[5,9,10] fusion(p.01, p.11, p.21), kind=kLoop, calls=fusion.2 ROOT tuple = (f16[5,9,10],f16[5,9,10]) tuple(f1, f2) })") .value(); EXPECT_TRUE(HorizontalLoopFusion().Run(module.get()).value()); TF_ASSERT_OK(verifier().Run(module.get()).status()); EXPECT_FALSE(HloDCE().Run(module.get()).value()); VLOG(2) << "Dump after horizontal fusion:"; VLOG(2) << module->ToString(); EXPECT_TRUE(RunAndCompareNoHloPasses(std::move(module), ErrorSpec{0, 0})); } TEST_F(HorizontalLoopFusionTest, NegativeTestForSharedParam) { auto module = ParseAndReturnVerifiedModule(R"( HloModule BasicTest fused_computation.1 { arg.1 = f16[123]{0} parameter(0) arg.2 = f16[123]{0} parameter(1) ROOT mul.1 = f16[123]{0} multiply(arg.1, arg.2) } fused_computation.2 { arg.1 = f16[123]{0} parameter(0) arg.2 = f16[123]{0} parameter(1) ROOT add.1 = f16[123]{0} add(arg.1, arg.2) } ENTRY entry_computation { arg.1 = f16[123]{0} parameter(0) arg.2 = f16[123]{0} parameter(1) arg.3 = f16[123]{0} parameter(2) fusion.1 = f16[123]{0} fusion(arg.1, arg.2), kind=kLoop, calls=fused_computation.1 fusion.2 = f16[123]{0} fusion(arg.3, arg.2), kind=kLoop, calls=fused_computation.2 ROOT tuple.1 = (f16[123]{0}, f16[123]{0}) tuple(fusion.1, fusion.2) } )") .value(); EXPECT_FALSE(HorizontalLoopFusion().Run(module.get()).value()); } TEST_F(HorizontalLoopFusionTest, IterativeHorizontalFusion) { auto module = ParseAndReturnVerifiedModule(R"( HloModule NonfusionInstrs fused_computation.0 { arg.0 = f16[] parameter(0) arg.1 = f16[123]{0} parameter(1) broadcast.0 = f16[123]{0} broadcast(arg.0), dimensions={} ROOT mul.1 = f16[123]{0} multiply(broadcast.0, arg.1) } fused_computation.1 { arg.0 = f16[] parameter(0) arg.1 = f16[456]{0} parameter(1) broadcast.0 = f16[456]{0} broadcast(arg.0), dimensions={} ROOT add.1 = f16[456]{0} add(broadcast.0, arg.1) } ENTRY entry_computation { arg.0 = f16[] parameter(0) arg.1 = f16[] parameter(1) arg.2 = f16[123]{0} parameter(2) arg.3 = f16[456]{0} parameter(3) sqrt.0 = f16[] sqrt(arg.0) sqrt.1 = f16[] sqrt(arg.1) fusion.0 = f16[123]{0} fusion(sqrt.0, arg.2), kind=kLoop, calls=fused_computation.0 fusion.1 = f16[456]{0} fusion(sqrt.1, arg.3), kind=kLoop, calls=fused_computation.1 ROOT tuple.1 = (f16[123]{0}, f16[456]{0}) tuple(fusion.0, fusion.1) } )") .value(); HloPassFix<HloPassPipeline> iterative_h_fusion("iterative_h_fusion"); iterative_h_fusion.AddPass<HorizontalLoopFusion>(); iterative_h_fusion.AddPass<HloDCE>(); EXPECT_TRUE(iterative_h_fusion.Run(module.get()).value()); const HloInstruction* entry_root = module->entry_computation()->root_instruction(); const HloInstruction* fusion = nullptr; ASSERT_THAT(entry_root, GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion)), m::GetTupleElement(m::Fusion())))); EXPECT_TRUE(fusion->IsMultiOutputFusion()); EXPECT_EQ( absl::c_count_if(module->entry_computation()->instructions(), IsFusion), 2); } TEST_F(HorizontalLoopFusionTest, TraversalOrder) { auto module = ParseAndReturnVerifiedModule(R"( HloModule cluster %fused_computation (param_0: f32[256,256], param_1: f32[], param_2: f32[]) -> f32[256,256] { %param_0 = f32[256,256]{1,0} parameter(0) %param_1 = f32[] parameter(1) %param_2 = f32[] parameter(2) %multiply.0 = f32[] multiply(f32[] %param_1, f32[] %param_2) %broadcast.0 = f32[256,256]{1,0} broadcast(f32[] %multiply.0), dimensions={} ROOT %multiply.1 = f32[256,256]{1,0} multiply(f32[256,256]{1,0} %param_0, f32[256,256]{1,0} %broadcast.0) } %fused_computation.1 (param_0: f32[256,256], param_1: f32[], param_2: f32[]) -> f32[256,256] { %param_0 = f32[256,256]{1,0} parameter(0) %param_1 = f32[] parameter(1) %param_2 = f32[] parameter(2) %multiply.0 = f32[] multiply(f32[] %param_1, f32[] %param_2) %broadcast.0 = f32[256,256]{1,0} broadcast(f32[] %multiply.0), dimensions={} ROOT %multiply.1 = f32[256,256]{1,0} multiply(f32[256,256]{1,0} %param_0, f32[256,256]{1,0} %broadcast.0) } ENTRY %entry_computation (arg0: f32[256,256], arg1: f32[256,256], arg2: f32[], arg3: f32[], arg4: f32[], arg5: f32[]) -> (f32[256,256], f32[256,256]) { %arg0 = f32[256,256]{1,0} parameter(0), parameter_replication={false} %arg1 = f32[256,256]{1,0} parameter(1), parameter_replication={false} %arg2 = f32[] parameter(2), parameter_replication={false} %arg3 = f32[] parameter(3), parameter_replication={false} %arg4 = f32[] parameter(4), parameter_replication={false} %arg5 = f32[] parameter(5), parameter_replication={false} %sqrt = f32[] sqrt(f32[] %arg2) %sqrt.1 = f32[] sqrt(f32[] %arg3) %fusion = f32[256,256]{1,0} fusion(f32[256,256]{1,0} %arg0, f32[] %sqrt, f32[] %sqrt.1), kind=kLoop, calls=%fused_computation %sqrt.2 = f32[] sqrt(f32[] %arg4) %sqrt.3 = f32[] sqrt(f32[] %arg5) %fusion.1 = f32[256,256]{1,0} fusion(f32[256,256]{1,0} %arg1, f32[] %sqrt.2, f32[] %sqrt.3), kind=kLoop, calls=%fused_computation.1 ROOT %tuple.163 = (f32[256,256]{1,0}, f32[256,256]{1,0}) tuple(f32[256,256]{1,0} %fusion.1, f32[256,256]{1,0} %fusion) } )") .value(); HloPassFix<HloPassPipeline> iterative_h_fusion("iterative_h_fusion"); iterative_h_fusion.AddPass<HorizontalLoopFusion>(); EXPECT_TRUE(iterative_h_fusion.Run(module.get()).value()); EXPECT_EQ( absl::c_count_if(module->entry_computation()->instructions(), IsFusion), 2); } TEST_F(HorizontalLoopFusionTest, NoBufferAliasingOfDuplicateParameter) { const char* hlo_text = R"( HloModule m branch_a { p0 = s32[] parameter(0) c0 = s32[] constant(1) c1 = s32[] constant(2) b0 = s32[4096] broadcast(c0), dimensions={} b1 = s32[4096] broadcast(c1), dimensions={} ROOT r = (s32[4096], s32[4096]) tuple(b0, b1) } branch_b { p0 = s32[] parameter(0) c0 = s32[] constant(1) c1 = s32[] constant(2) b0 = s32[4096] broadcast(c0), dimensions={} b1 = s32[4096] broadcast(c1), dimensions={} ROOT r = (s32[4096], s32[4096]) tuple(b0, b1) } ENTRY e { p0 = s32[] parameter(0) c0 = s32[] constant(0) cond = (s32[4096], s32[4096]) conditional(p0, c0, c0), branch_computations={branch_a, branch_b} p1 = s32[4096] parameter(1) gte0 = s32[4096] get-tuple-element(cond), index=0 gte1 = s32[4096] get-tuple-element(cond), index=1 a0 = s32[4096] add(gte1, gte0) m0 = s32[4096] multiply(gte1, gte0) ROOT r = (s32[4096], s32[4096]) tuple(m0, a0) } )"; EXPECT_TRUE(RunAndCompare(hlo_text, std::nullopt)); } TEST_F(HorizontalLoopFusionTest, CopyInsertionFusionControlFlow) { const char* hlo_text = R"( HloModule cluster ENTRY main { cst = f32[1]{0} constant({0}) cp1 = f32[1]{0} copy(cst) cp2 = f32[1]{0} copy(cst) cp3 = f32[1]{0} copy(cst) cp4 = f32[1]{0} copy(cst), control-predecessors={cp1} ROOT tuple_out = (f32[1]{0}, f32[1]{0}, f32[1]{0}, f32[1]{0}) tuple(cp1, cp2, cp3, cp4) } )"; auto module = ParseAndReturnUnverifiedModule(hlo_text).value(); EXPECT_TRUE(HorizontalLoopFusion().Run(module.get()).value()); VLOG(2) << module->ToString(); EXPECT_EQ( absl::c_count_if(module->entry_computation()->instructions(), IsFusion), 1); const HloInstruction* entry_root = module->entry_computation()->root_instruction(); EXPECT_THAT(entry_root, GmockMatch(m::Tuple(m::Copy(), m::GetTupleElement(m::Fusion()), m::GetTupleElement(m::Fusion()), m::Copy()))); } TEST_F(HorizontalLoopFusionTest, DoNotMergeVariadicReductions) { auto module = ParseAndReturnVerifiedModule(R"( HloModule m fused_computation.94 { tmp_0 = f32[] parameter(0) tmp_1 = f32[] parameter(1) tmp_2 = pred[] compare(tmp_0, tmp_1), direction=GE tmp_3 = f32[] select(tmp_2, tmp_0, tmp_1) tmp_4 = pred[] compare(tmp_0, tmp_1), direction=EQ tmp_5 = s32[] parameter(2) tmp_6 = s32[] parameter(3) tmp_7 = s32[] minimum(tmp_5, tmp_6) tmp_8 = s32[] select(tmp_2, tmp_5, tmp_6) tmp_9 = s32[] select(tmp_4, tmp_7, tmp_8) ROOT tmp_10 = (f32[], s32[]) tuple(tmp_3, tmp_9) } minmax_func.1536 { tmp_0 = f32[] parameter(0) tmp_1 = f32[] parameter(2) tmp_2 = s32[] parameter(1) tmp_3 = s32[] parameter(3) ROOT tmp_4 = (f32[], s32[]) fusion(tmp_0, tmp_1, tmp_2, tmp_3), kind=kLoop, calls=fused_computation.94 } fused_computation { tmp_0 = f32[554112,10]{1,0} parameter(0) tmp_1 = s32[554112,10]{1,0} iota(), iota_dimension=1 tmp_2 = f32[] constant(-inf) tmp_3 = s32[] constant(0) ROOT tmp_4 = (f32[554112]{0}, s32[554112]{0}) reduce(tmp_0, tmp_1, tmp_2, tmp_3), dimensions={1}, to_apply=minmax_func.1536 } fused_computation2 { tmp_0 = f32[554112,10]{1,0} parameter(0) tmp_1 = s32[554112,10]{1,0} iota(), iota_dimension=1 tmp_2 = f32[] constant(inf) tmp_3 = s32[] constant(1) ROOT tmp_4 = (f32[554112]{0}, s32[554112]{0}) reduce(tmp_0, tmp_1, tmp_2, tmp_3), dimensions={1}, to_apply=minmax_func.1536 } ENTRY e { tmp_0 = f32[554112,10]{1,0} parameter(0) tmp_1 = (f32[554112]{0}, s32[554112]{0}) fusion(tmp_0), kind=kLoop, calls=fused_computation tmp_2 = s32[554112]{0} get-tuple-element(tmp_1), index=1 tmp_3 = f32[554112,10]{1,0} parameter(1) tmp_4 = (f32[554112]{0}, s32[554112]{0}) fusion(tmp_3), kind=kLoop, calls=fused_computation2 tmp_5 = s32[554112]{0} get-tuple-element(tmp_4), index=1 ROOT tmp_6 = s32[554112]{0} add(tmp_2, tmp_5) })") .value(); EXPECT_FALSE(HorizontalLoopFusion().Run(module.get()).value()); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/horizontal_loop_fusion.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/horizontal_loop_fusion_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
e782c79f-e9a8-483b-9862-b4e5938a0e81
cpp
tensorflow/tensorflow
reduction_splitter
third_party/xla/xla/service/gpu/transforms/reduction_splitter.cc
third_party/xla/xla/service/gpu/transforms/reduction_splitter_test.cc
#include "xla/service/gpu/transforms/reduction_splitter.h" #include <algorithm> #include <cstdint> #include <cstdlib> #include <memory> #include <utility> #include <vector> #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/dfs_hlo_visitor_with_default.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/layout_util.h" #include "xla/service/gpu/reduction_utils.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { class ReductionSplitterVisitor : public DfsHloRewriteVisitor { public: explicit ReductionSplitterVisitor(bool ignore_small_dims) : ignore_small_dims_(ignore_small_dims) {} absl::Status HandleReduce(HloInstruction *reduce) override { VLOG(4) << "Input: " << reduce->ToString(); if (IsReductionFromOrToContiguousDimensions(*reduce)) { VLOG(4) << "Reduction with contiguous dimensions. Return."; return absl::OkStatus(); } if (reduce->dimensions().size() < 2) { return absl::OkStatus(); } if (!reduce->shape().IsArray()) { return absl::OkStatus(); } HloInstruction *operand = reduce->mutable_operand(0); const Shape &shape = operand->shape(); CHECK(shape == LayoutUtil::GetWithDefaultLayout(shape)) << "Default layout should be enforced on reduction operand"; for (int64_t i = 0; i < reduce->dimensions().size(); ++i) { for (int64_t j = i + 1; j < reduce->dimensions().size(); ++j) { CHECK(abs(reduce->dimensions(i) - reduce->dimensions(j)) > 1) << "Reduction dimensions must not be consecutive"; } } int64_t max_shape_dim = 0; int64_t max_reduce_dim = 0; const auto &input_shape = reduce->operand(0)->shape(); for (int64_t i = 0; i < reduce->dimensions().size(); ++i) { if (input_shape.dimensions(reduce->dimensions(i)) > max_shape_dim) { max_reduce_dim = reduce->dimensions(i); max_shape_dim = input_shape.dimensions(max_reduce_dim); } } if (ignore_small_dims_ && max_shape_dim <= 8) { return absl::OkStatus(); } VLOG(3) << "Splitting reduction " << reduce->name() << " at dimension " << max_reduce_dim; std::vector<int64_t> pre_reduce_dims; pre_reduce_dims.push_back(max_reduce_dim); std::vector<int64_t> pre_reduce_shape_dims(input_shape.dimensions().begin(), input_shape.dimensions().end()); pre_reduce_shape_dims.erase(pre_reduce_shape_dims.begin() + max_reduce_dim); Shape pre_reduce_shape = ShapeUtil::MakeShape( reduce->shape().element_type(), pre_reduce_shape_dims); std::unique_ptr<HloInstruction> pre_reduce = HloInstruction::CreateReduce( pre_reduce_shape, reduce->mutable_operand(0), reduce->mutable_operand(1), pre_reduce_dims, reduce->to_apply()); pre_reduce->set_metadata(reduce->metadata()); std::vector<int64_t> final_reduce_dims(reduce->dimensions().begin(), reduce->dimensions().end()); final_reduce_dims.erase( std::remove(final_reduce_dims.begin(), final_reduce_dims.end(), max_reduce_dim), final_reduce_dims.end()); for (int64_t i = 0; i < final_reduce_dims.size(); ++i) { if (final_reduce_dims[i] > max_reduce_dim) { final_reduce_dims[i]--; } } std::unique_ptr<HloInstruction> final_reduce = HloInstruction::CreateReduce( reduce->shape(), reduce->parent()->AddInstruction(std::move(pre_reduce)), reduce->mutable_operand(1), final_reduce_dims, reduce->to_apply()); return ReplaceWithNewInstruction(reduce, std::move(final_reduce)); } private: bool ignore_small_dims_; }; absl::StatusOr<bool> ReductionSplitter::Run( HloModule *module, const absl::flat_hash_set<absl::string_view> &execution_threads) { TF_ASSIGN_OR_RETURN(bool changed, ReductionSplitterVisitor(ignore_small_dims_) .RunOnModule(module, execution_threads)); return changed; } } }
#include "xla/service/gpu/transforms/reduction_splitter.h" #include <cstdint> #include <vector> #include "xla/hlo/ir/hlo_instruction.h" #include "xla/service/hlo_parser.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/shape_util.h" #include "xla/test.h" #include "xla/tests/hlo_test_base.h" namespace xla { namespace gpu { namespace { namespace m = ::xla::match; class ReductionSplitterTest : public HloTestBase {}; TEST_F(ReductionSplitterTest, SplitReductionAtDimensionTwo) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test add_computation { x = f32[] parameter(0) y = f32[] parameter(1) ROOT add = f32[] add(x, y) } ENTRY entry_computation { param_0 = f16[6,16,512,64]{3,2,1,0} parameter(0) transpose.1781 = f16[6,512,16,64]{3,1,2,0} transpose(param_0), dimensions={0,2,1,3} convert.6986 = f32[6,512,16,64]{3,1,2,0} convert(transpose.1781) bitcast.2136 = f32[6,16,512,64]{3,2,1,0} bitcast(convert.6986) constant_11111 = f32[] constant(0) ROOT reduce.982 = f32[16,64]{1,0} reduce(bitcast.2136, constant_11111), dimensions={0,2}, to_apply=add_computation } )") .value(); ASSERT_TRUE( ReductionSplitter(true).Run(module.get()).value()); SCOPED_TRACE(module->ToString()); const HloInstruction* root_reduction = module->entry_computation()->root_instruction(); ASSERT_THAT(root_reduction, GmockMatch(m::Reduce(m::Reduce(), m::Constant()))); auto* pre_reduction = root_reduction->operand(0); EXPECT_THAT(pre_reduction->dimensions(), std::vector<int64_t>({2})); EXPECT_THAT(pre_reduction->shape(), ShapeUtil::MakeShape(F32, {6, 16, 64})); EXPECT_THAT(root_reduction->dimensions(), std::vector<int64_t>({0})); EXPECT_THAT(root_reduction->shape(), ShapeUtil::MakeShape(F32, {16, 64})); } TEST_F(ReductionSplitterTest, SplitReductionAtDimensionZero) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test add_computation { x = f32[] parameter(0) y = f32[] parameter(1) ROOT add = f32[] add(x, y) } ENTRY entry_computation { param_0 = f32[1024,16,512,64,128]{4,3,2,1,0} parameter(0) constant_11111 = f32[] constant(0) ROOT reduce.982 = f32[16,64]{1,0} reduce(param_0, constant_11111), dimensions={2,0,4}, to_apply=add_computation } )") .value(); ASSERT_TRUE( ReductionSplitter(false).Run(module.get()).value()); SCOPED_TRACE(module->ToString()); const HloInstruction* root_reduction = module->entry_computation()->root_instruction(); ASSERT_THAT(root_reduction, GmockMatch(m::Reduce(m::Reduce(), m::Constant()))); auto* pre_reduction = root_reduction->operand(0); EXPECT_THAT(pre_reduction->dimensions(), std::vector<int64_t>({0})); EXPECT_THAT(pre_reduction->shape(), ShapeUtil::MakeShape(F32, {16, 512, 64, 128})); EXPECT_THAT(root_reduction->dimensions(), std::vector<int64_t>({1, 3})); EXPECT_THAT(root_reduction->shape(), ShapeUtil::MakeShape(F32, {16, 64})); } TEST_F(ReductionSplitterTest, DontSplitReductionWithSmallDimensions) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test add_computation { x = f32[] parameter(0) y = f32[] parameter(1) ROOT add = f32[] add(x, y) } ENTRY entry_computation { param_0 = f32[16,8,1024,8]{3,2,1,0} parameter(0) constant_11111 = f32[] constant(0) ROOT reduce.982 = f32[16,1024]{1,0} reduce(param_0, constant_11111), dimensions={3,1}, to_apply=add_computation } )") .value(); EXPECT_FALSE( ReductionSplitter(true).Run(module.get()).value()); EXPECT_TRUE( ReductionSplitter(false).Run(module.get()).value()); } TEST_F(ReductionSplitterTest, DontSplitReductionsWithContiguousDimensions) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test add_computation { x = f32[] parameter(0) y = f32[] parameter(1) ROOT add = f32[] add(x, y) } ENTRY entry_computation { param_0 = f32[128,128,64,128]{3,2,1,0} parameter(0) constant_11111 = f32[] constant(0) ROOT reduce.982 = f32[128,64]{1,0} reduce(param_0, constant_11111), dimensions={3,0}, to_apply=add_computation } )") .value(); EXPECT_FALSE( ReductionSplitter(false).Run(module.get()).value()); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/reduction_splitter.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/reduction_splitter_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
a0ee1f0c-a249-4365-b8f1-cd4ef7e24f54
cpp
tensorflow/tensorflow
cublas_pad_for_gemms
third_party/xla/xla/service/gpu/transforms/cublas_pad_for_gemms.cc
third_party/xla/xla/service/gpu/transforms/cublas_pad_for_gemms_test.cc
#include "xla/service/gpu/transforms/cublas_pad_for_gemms.h" #include <cstdint> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_set.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/literal_util.h" #include "xla/service/gpu/fusions/triton/triton_support_legacy.h" #include "xla/service/gpu/ir_emission_utils.h" #include "xla/service/gpu/transforms/gemm_fusion.h" #include "xla/shape.h" #include "xla/stream_executor/device_description.h" #include "xla/util.h" #include "tsl/platform/logging.h" #include "tsl/platform/status.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { static absl::StatusOr<bool> PadForGemm(HloDotInstruction* dot, PrimitiveType datatype, int pad_to_multiple_of) { auto* lhs = dot->mutable_operand(0); auto* rhs = dot->mutable_operand(1); Shape lshape = lhs->shape(); Shape rshape = rhs->shape(); Shape result_shape = dot->shape(); if (lshape.element_type() != datatype || rshape.element_type() != datatype) { return false; } auto pad_dim = [&](Shape& s, int dim) { s.set_dimensions(dim, RoundUpTo<int64_t>(s.dimensions(dim), pad_to_multiple_of)); }; auto pad_matrix_dims = [&pad_dim](Shape s) { pad_dim(s, s.rank() - 2); pad_dim(s, s.rank() - 1); return s; }; Shape new_lshape = pad_matrix_dims(lshape); Shape new_rshape = pad_matrix_dims(rshape); Shape new_result_shape = pad_matrix_dims(result_shape); if (new_lshape == lshape && new_rshape == rshape) { return false; } VLOG(3) << "old shape: " << lshape << " " << rshape << " " << result_shape; VLOG(3) << "new shape: " << new_lshape << " " << new_rshape << " " << new_result_shape; auto create_padding_config = [](Shape& shape, Shape& new_shape) { PaddingConfig padding_config; for (int i = 0; i < shape.rank(); ++i) { auto dimension = padding_config.add_dimensions(); dimension->set_edge_padding_high(new_shape.dimensions()[i] - shape.dimensions()[i]); dimension->set_edge_padding_low(0); dimension->set_interior_padding(0); } return padding_config; }; auto l_padding_config = create_padding_config(lshape, new_lshape); auto r_padding_config = create_padding_config(rshape, new_rshape); HloComputation* parent = dot->parent(); HloInstruction* zero_float = parent->AddInstruction( HloInstruction::CreateConstant(LiteralUtil::Zero(datatype))); zero_float->set_metadata(dot->metadata()); HloInstruction* lpad = parent->AddInstruction( HloInstruction::CreatePad(new_lshape, lhs, zero_float, l_padding_config)); lpad->set_metadata(dot->metadata()); HloInstruction* rpad = parent->AddInstruction( HloInstruction::CreatePad(new_rshape, rhs, zero_float, r_padding_config)); rpad->set_metadata(dot->metadata()); HloInstruction* new_dot = parent->AddInstruction( dot->CloneWithNewOperands(new_result_shape, {lpad, rpad})); std::vector<int64_t> start_indices(result_shape.rank(), 0); std::vector<int64_t> strides(result_shape.rank(), 1); HloInstruction* slice = parent->AddInstruction( HloInstruction::CreateSlice(result_shape, new_dot, start_indices, result_shape.dimensions(), strides)); slice->set_metadata(dot->metadata()); bool is_root = dot->user_count() == 0; TF_CHECK_OK(parent->ReplaceInstruction(dot, slice)); if (is_root) { parent->set_root_instruction(slice); } return true; } namespace { bool CheckCanonical(HloDotInstruction* dot) { const auto& dimension_numbers = dot->dot_dimension_numbers(); if (dimension_numbers.lhs_batch_dimensions_size() + 2 != dot->operand(0)->shape().rank() || dimension_numbers.rhs_batch_dimensions_size() + 2 != dot->operand(1)->shape().rank()) { VLOG(2) << dot->ToString() << " is not canonical: Expected all dimensions but 2 to be " "batch_dimensions. Hence, this dot is not a candidate for padding."; return false; } std::vector<int64_t> canonical_batch_dims( dimension_numbers.lhs_batch_dimensions_size()); absl::c_iota(canonical_batch_dims, 0); if (!absl::c_equal(dimension_numbers.lhs_batch_dimensions(), canonical_batch_dims) || !absl::c_equal(dimension_numbers.rhs_batch_dimensions(), canonical_batch_dims)) { VLOG(2) << dot->ToString() << " is not canonical: Expected batch dimensions to be all " "dimensions except for the last 2 ones. Hence, this dot is not a " "candidate for padding."; return false; } return true; } } static std::vector<HloDotInstruction*> GetRelevantDots( const se::GpuComputeCapability& gpu_compute_capability, HloComputation* comp, PrimitiveType datatype) { std::vector<HloDotInstruction*> gemms; for (HloInstruction* instr : comp->instructions()) { if (IsMatrixMultiplication(*instr)) { HloDotInstruction* dot = Cast<HloDotInstruction>(instr); if (instr->operand(0)->shape().element_type() == datatype && CheckCanonical(dot) && !(instr->GetModule() ->config() .debug_options() .xla_gpu_enable_triton_gemm() && legacy_triton::IsTritonSupportedInstruction( *dot, gpu_compute_capability) && ShouldTritonHandleGEMM(*dot, gpu_compute_capability))) { gemms.push_back(dot); } } } return gemms; } absl::StatusOr<bool> CublasPadForGemms::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool changed = false; for (HloComputation* comp : module->MakeNonfusionComputations(execution_threads)) { for (HloDotInstruction* dot : GetRelevantDots(gpu_compute_capability_, comp, datatype_)) { TF_ASSIGN_OR_RETURN(bool result, PadForGemm(dot, datatype_, pad_to_multiple_of_)); changed |= result; } } return changed; } } }
#include "xla/service/gpu/transforms/cublas_pad_for_gemms.h" #include <gmock/gmock.h> #include <gtest/gtest.h> #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/stream_executor/device_description.h" #include "xla/tests/hlo_test_base.h" namespace m = ::xla::match; namespace xla { namespace gpu { namespace { class CublasGemmPadForTensorCoresTest : public HloTestBase { protected: bool PadForF16Gemms(HloModule* module) { return CublasPadForGemms(se::CudaComputeCapability(7, 0), PrimitiveType::F16, 8) .Run(module) .value(); } DebugOptions GetDebugOptionsForTest() override { DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest(); debug_options.set_xla_gpu_triton_gemm_any(false); return debug_options; } }; TEST_F(CublasGemmPadForTensorCoresTest, OneDotRootComputation) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { %param1 = f16[2048,1024] parameter(0) %param2 = f16[1024,33708] parameter(1) ROOT %dot.2309 = f16[2048,33708]{1,0} dot(f16[2048,1024]{1,0} %param1, f16[1024,33708]{0,1} %param2), lhs_contracting_dims={1}, rhs_contracting_dims={0} })") .value(); EXPECT_TRUE(PadForF16Gemms(module.get())); SCOPED_TRACE(module->ToString()); auto* root = module->entry_computation()->root_instruction(); EXPECT_THAT( root, GmockMatch( m::Slice(m::Dot(m::Pad(m::Parameter().WithShape(F16, {2048, 1024}), m::Constant().WithShape(F16, {})) .WithShape(F16, {2048, 1024}), m::Pad(m::Parameter().WithShape(F16, {1024, 33708}), m::Constant().WithShape(F16, {})) .WithShape(F16, {1024, 33712})) .WithShape(F16, {2048, 33712}) .WithContractingDims({1}, {0})) .WithShape(F16, {2048, 33708}))); } TEST_F(CublasGemmPadForTensorCoresTest, OneDotS8RootComputation) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { %param1 = s8[2047,1023] parameter(0) %param2 = s8[1023,33707] parameter(1) ROOT %dot.2309 = s32[2047,33707]{1,0} dot(s8[2047,1023]{1,0} %param1, s8[1023,33707]{0,1} %param2), lhs_contracting_dims={1}, rhs_contracting_dims={0} })") .value(); EXPECT_TRUE( CublasPadForGemms(se::CudaComputeCapability(7, 0), PrimitiveType::S8, 4) .Run(module.get()) .value()); SCOPED_TRACE(module->ToString()); auto* root = module->entry_computation()->root_instruction(); EXPECT_THAT( root, GmockMatch( m::Slice(m::Dot(m::Pad(m::Parameter().WithShape(S8, {2047, 1023}), m::Constant().WithShape(S8, {})) .WithShape(S8, {2048, 1024}), m::Pad(m::Parameter().WithShape(S8, {1023, 33707}), m::Constant().WithShape(S8, {})) .WithShape(S8, {1024, 33708})) .WithShape(S32, {2048, 33708}) .WithContractingDims({1}, {0})) .WithShape(S32, {2047, 33707}))); } TEST_F(CublasGemmPadForTensorCoresTest, TwoDotsComputation) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { %param1 = f16[2048, 1024] parameter(0) %param2 = f16[1024, 33708] parameter(1) %param3 = f16[33708, 1] parameter(2) %dot1 = f16[2048, 33708]{1,0} dot(f16[2048, 1024]{1,0} %param1, f16[1024, 33708]{0,1} %param2), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT %dot2 = f16[2048, 1]{1,0} dot(f16[2048, 33708]{1,0} %dot1, f16[33708, 1]{0,1} %param3), lhs_contracting_dims={1}, rhs_contracting_dims={0} })") .value(); EXPECT_TRUE(PadForF16Gemms(module.get())); SCOPED_TRACE(module->ToString()); auto* root = module->entry_computation()->root_instruction(); const HloInstruction* dot2 = nullptr; ASSERT_THAT( root, GmockMatch( m::Slice( m::Dot( m::Pad(m::Slice(m::Dot(&dot2, m::Pad().WithShape(F16, {2048, 1024}), m::Pad().WithShape(F16, {1024, 33712})) .WithContractingDims( {1}, {0}) .WithShape(F16, {2048, 33712})) .WithShape(F16, {2048, 33708}), m::Constant().WithShape(F16, {})) .WithShape(F16, {2048, 33712}), m::Pad(m::Parameter().WithShape(F16, {33708, 1}), m::Constant().WithShape(F16, {})) .WithShape(F16, {33712, 8})) .WithShape(F16, {2048, 8}) .WithContractingDims({1}, {0})) .WithShape(F16, {2048, 1}))); EXPECT_THAT( dot2, GmockMatch(m::Dot(m::Pad(m::Parameter().WithShape(F16, {2048, 1024}), m::Constant().WithShape(F16, {})) .WithShape(F16, {2048, 1024}), m::Pad(m::Parameter().WithShape(F16, {1024, 33708}), m::Constant().WithShape(F16, {})) .WithShape(F16, {1024, 33712})) .WithContractingDims({1}, {0}))); } TEST_F(CublasGemmPadForTensorCoresTest, DotWithBatchDimensions) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { %param1 = f16[3, 5, 2048, 1024] parameter(0) %param2 = f16[3, 5, 1024, 33708] parameter(1) ROOT %dot.2309 = f16[3, 5, 2048, 33708]{3, 2, 1,0} dot(f16[3, 5, 2048, 1024]{3, 2, 1,0} %param1, f16[3, 5, 1024, 33708]{2, 3, 0,1} %param2), lhs_batch_dims={0, 1}, rhs_batch_dims={0, 1}, lhs_contracting_dims={3}, rhs_contracting_dims={2}})") .value(); EXPECT_TRUE(PadForF16Gemms(module.get())); SCOPED_TRACE(module->ToString()); auto* root = module->entry_computation()->root_instruction(); EXPECT_THAT( root, GmockMatch( m::Slice( m::Dot(m::Pad(m::Parameter().WithShape(F16, {3, 5, 2048, 1024}), m::Constant().WithShape(F16, {})) .WithShape(F16, {3, 5, 2048, 1024}), m::Pad(m::Parameter().WithShape(F16, {3, 5, 1024, 33708}), m::Constant().WithShape(F16, {})) .WithShape(F16, {3, 5, 1024, 33712})) .WithShape(F16, {3, 5, 2048, 33712}) .WithContractingDims({3}, {2})) .WithShape(F16, {3, 5, 2048, 33708}))); } TEST_F(CublasGemmPadForTensorCoresTest, NoDotComputation) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { %x = f32[] parameter(0) %y = f32[] parameter(1) ROOT %maximum = f32[] maximum(f32[] %x, f32[] %y) })") .value(); EXPECT_FALSE(PadForF16Gemms(module.get())); } TEST_F(CublasGemmPadForTensorCoresTest, F32DotComputation) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { %param1 = f32[2048,1024] parameter(0) %param2 = f32[1024,33708] parameter(1) ROOT %dot.2309 = f32[2048,33708]{1,0} dot(f32[2048,1024]{1,0} %param1, f32[1024,33708]{0,1} %param2), lhs_contracting_dims={1}, rhs_contracting_dims={0}})") .value(); EXPECT_FALSE(PadForF16Gemms(module.get())); } TEST_F(CublasGemmPadForTensorCoresTest, F64DotComputation) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { %param1 = f64[2048,1024] parameter(0) %param2 = f64[1024,33708] parameter(1) ROOT %dot.2309 = f64[2048,33708]{1,0} dot(f64[2048,1024]{1,0} %param1, f64[1024,33708]{0,1} %param2), lhs_contracting_dims={1}, rhs_contracting_dims={0}})") .value(); EXPECT_FALSE(PadForF16Gemms(module.get())); } TEST_F(CublasGemmPadForTensorCoresTest, MultiplesOf8DotComputation) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { %param1 = f16[2048,1024] parameter(0) %param2 = f16[1024,33712] parameter(1) ROOT %dot.2309 = f16[2048,33712]{1,0} dot(f16[2048,1024]{1,0} %param1, f16[1024,33712]{0,1} %param2), lhs_contracting_dims={1}, rhs_contracting_dims={0}})") .value(); EXPECT_FALSE(PadForF16Gemms(module.get())); } TEST_F(CublasGemmPadForTensorCoresTest, CheckSavingMetadata) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { %param1 = f16[2048,1024] parameter(0) %param2 = f16[1024,33708] parameter(1) ROOT %dot.2309 = f16[2048,33708]{1,0} dot(f16[2048,1024]{1,0} %param1, f16[1024,33708]{0,1} %param2), lhs_contracting_dims={1}, rhs_contracting_dims={0}, metadata={op_type="MatMul" op_name="transformer_v2/Transformer/decode/embedding_shared_weights_1/presoftmax_linear/MatMul"} })") .value(); SCOPED_TRACE(module->ToString()); EXPECT_TRUE(PadForF16Gemms(module.get())); auto metadata = module->entry_computation()->root_instruction()->metadata(); EXPECT_EQ("MatMul", metadata.op_type()); EXPECT_EQ( "transformer_v2/Transformer/decode/embedding_shared_weights_1/" "presoftmax_linear/MatMul", metadata.op_name()); } TEST_F(CublasGemmPadForTensorCoresTest, NotCanonicalizedDot) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { %param1 = f16[3, 5, 2048, 1024] parameter(0) %param2 = f16[3, 5, 1024, 33708] parameter(1) ROOT %dot.2309 = f16[3,2048, 33708]{2, 1, 0} dot(f16[3, 5, 2048, 1024]{3, 2, 1, 0} %param1, f16[3, 5, 1024, 33708]{3, 2, 1, 0} %param2), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={3, 1}, rhs_contracting_dims={2, 1}})") .value(); EXPECT_FALSE(PadForF16Gemms(module.get())); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/cublas_pad_for_gemms.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/cublas_pad_for_gemms_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
9a537f92-18a4-4555-9980-088c5e663ff9
cpp
tensorflow/tensorflow
dot_sparsity_rewriter
third_party/xla/xla/service/gpu/transforms/dot_sparsity_rewriter.cc
third_party/xla/xla/service/gpu/transforms/dot_sparsity_rewriter_test.cc
#include "xla/service/gpu/transforms/dot_sparsity_rewriter.h" #include <utility> #include "absl/container/flat_hash_set.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/dfs_hlo_visitor_with_default.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/service/hlo_creation_utils.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { class SparseDotRewriterImpl : public DfsHloRewriteVisitor { public: absl::Status HandleDot(HloInstruction* instr) override { HloDotInstruction* dot = Cast<HloDotInstruction>(instr); if (dot->sparse_operands() != 1 || dot->sparsity().front().index() != 1) { return absl::OkStatus(); } HloInstruction* lhs = dot->mutable_operand(0); HloInstruction* rhs = dot->mutable_operand(1); HloInstruction* meta = dot->mutable_operand(2); DotDimensionNumbers dnums = dot->dot_dimension_numbers(); std::swap(*dnums.mutable_lhs_batch_dimensions(), *dnums.mutable_rhs_batch_dimensions()); std::swap(*dnums.mutable_lhs_contracting_dimensions(), *dnums.mutable_rhs_contracting_dimensions()); PrecisionConfig precision_config = dot->precision_config(); std::swap(precision_config.mutable_operand_precision()->at(0), precision_config.mutable_operand_precision()->at(1)); SparsityDescriptor sparsity = dot->sparsity().front(); sparsity.set_index(0); TF_ASSIGN_OR_RETURN( HloInstruction * new_dot, MakeDotHlo(rhs, lhs, dnums, precision_config, dot->shape().element_type(), {std::move(sparsity)}, {meta})); dot->SetupDerivedInstruction(new_dot); int batch_dims = dnums.lhs_batch_dimensions().size(); int new_lhs_noncontracting = rhs->shape().rank() - batch_dims - dnums.lhs_contracting_dimensions().size(); int new_rhs_noncontracting = lhs->shape().rank() - batch_dims - dnums.rhs_contracting_dimensions().size(); int rank = dot->shape().rank(); DimensionVector dimensions(rank); for (int i = 0; i < batch_dims; ++i) { dimensions[i] = i; } for (int i = 0; i < new_lhs_noncontracting; ++i) { dimensions[i + batch_dims] = i + batch_dims + new_rhs_noncontracting; } for (int i = 0; i < new_rhs_noncontracting; ++i) { dimensions[i + batch_dims + new_lhs_noncontracting] = i + batch_dims; } TF_ASSIGN_OR_RETURN(HloInstruction * transpose, MakeTransposeHlo(new_dot, dimensions)); transpose->set_metadata(dot->metadata()); *transpose->mutable_shape()->mutable_layout() = dot->shape().layout(); return ReplaceInstruction(dot, transpose); } }; } absl::StatusOr<bool> DotSparsityRewriter::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { return SparseDotRewriterImpl().RunOnModule(module, execution_threads); } } }
#include "xla/service/gpu/transforms/dot_sparsity_rewriter.h" #include <memory> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/tests/hlo_test_base.h" #include "xla/xla_data.pb.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { using ::testing::ElementsAre; class DotSparsityRewriterTest : public HloTestBase { public: DotSparsityRewriterTest() : HloTestBase(true) {} }; TEST_F(DotSparsityRewriterTest, SparseDotRhsToLhs) { const char* module_string = R"( HloModule m ENTRY e { lhs = f16[4,2,16,8,64] parameter(0) rhs = f16[2,4,8,32,128] parameter(1) meta = u16[2,4,8,4,128] parameter(2) ROOT dot = f16[4,2,16,128] dot(lhs, rhs, meta), lhs_contracting_dims={3,4}, rhs_contracting_dims={2,3}, lhs_batch_dims={0,1}, rhs_batch_dims={1,0}, sparsity=R.3@2:4 } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); TF_ASSERT_OK_AND_ASSIGN(bool modified, DotSparsityRewriter().Run(module.get())); EXPECT_TRUE(modified); const HloTransposeInstruction* transpose = DynCast<HloTransposeInstruction>( module->entry_computation()->root_instruction()); ASSERT_TRUE(transpose != nullptr); EXPECT_THAT(transpose->dimensions(), ElementsAre(0, 1, 3, 2)); const HloDotInstruction* dot = DynCast<HloDotInstruction>(transpose->operand(0)); ASSERT_TRUE(dot != nullptr); const DotDimensionNumbers& dnums = dot->dot_dimension_numbers(); EXPECT_EQ(dnums.lhs_contracting_dimensions(0), 2); EXPECT_EQ(dnums.lhs_contracting_dimensions(1), 3); EXPECT_EQ(dnums.rhs_contracting_dimensions(0), 3); EXPECT_EQ(dnums.rhs_contracting_dimensions(1), 4); EXPECT_EQ(dnums.lhs_batch_dimensions(0), 1); EXPECT_EQ(dnums.lhs_batch_dimensions(1), 0); EXPECT_EQ(dnums.rhs_batch_dimensions(0), 0); EXPECT_EQ(dnums.rhs_batch_dimensions(1), 1); EXPECT_EQ(dot->sparse_operands(), 1); EXPECT_EQ(dot->sparsity().front().index(), 0); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/dot_sparsity_rewriter.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/dot_sparsity_rewriter_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
480551a7-65d2-466d-950f-2315d2cafa93
cpp
tensorflow/tensorflow
cudnn_pad_for_convolutions
third_party/xla/xla/service/gpu/transforms/cudnn_pad_for_convolutions.cc
third_party/xla/xla/service/gpu/transforms/cudnn_pad_for_convolutions_test.cc
#include "xla/service/gpu/transforms/cudnn_pad_for_convolutions.h" #include <cstdint> #include <functional> #include <memory> #include <optional> #include <tuple> #include <utility> #include <vector> #include "absl/container/flat_hash_set.h" #include "absl/functional/bind_front.h" #include "absl/status/status.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/literal_util.h" #include "xla/primitive_util.h" #include "xla/service/gpu/cublas_cudnn.h" #include "xla/service/gpu/cudnn_support_utils.h" #include "xla/service/gpu/stream_executor_util.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/stream_executor/device_description.h" #include "xla/util.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { static HloInstruction* PadInstruction(HloInstruction* instr, const Shape& new_shape) { HloComputation* comp = instr->parent(); const Shape& shape = instr->shape(); PaddingConfig pad_config = MakeNoPaddingConfig(shape.rank()); bool added_padding = false; for (int64_t dim = 0; dim < shape.rank(); ++dim) { if (shape.dimensions(dim) == new_shape.dimensions(dim)) { continue; } CHECK_GT(new_shape.dimensions(dim), shape.dimensions(dim)); pad_config.mutable_dimensions(dim)->set_edge_padding_high( new_shape.dimensions(dim) - shape.dimensions(dim)); added_padding = true; } if (!added_padding) { return instr; } auto* zero = comp->AddInstruction( HloInstruction::CreateConstant(LiteralUtil::Zero(shape.element_type()))); return comp->AddInstruction( HloInstruction::CreatePad(new_shape, instr, zero, pad_config), &instr->metadata()); } static absl::Status PadConv(HloCustomCallInstruction* conv, absl::Span<const Shape> new_input_shapes, const Shape& new_result_shape) { CHECK_EQ(0, conv->shape().tuple_shapes(1).dimensions(0)) << "conv must use 0 scratch bytes, i.e. this pass must be run " "before CudnnConvAlgorithmPicker."; std::vector<HloInstruction*> new_operands; new_operands.reserve(conv->operand_count()); for (int i = 0; i < conv->operand_count(); ++i) { new_operands.push_back( PadInstruction(conv->mutable_operand(i), new_input_shapes[i])); } const Shape& result_shape = conv->shape().tuple_shapes(0); bool changed = false; for (int i = 0; i < conv->operand_count(); ++i) { changed |= (new_operands[i] != conv->mutable_operand(i)); } CHECK(changed) << "We should have had to pad at least one input operand."; auto add = [&](std::unique_ptr<HloInstruction> new_instr) { return conv->parent()->AddInstruction(std::move(new_instr)); }; Shape new_conv_shape = ShapeUtil::MakeTupleShape( {new_result_shape, ShapeUtil::MakeShape(U8, {0})}); auto* new_conv = add(conv->CloneWithNewOperands(new_conv_shape, new_operands)); new_conv->SetAndSanitizeName(conv->name()); VLOG(2) << "Padded features of " << conv->ToString() << ", replaced with " << new_conv->ToString(); if (!ShapeUtil::Equal(result_shape, new_result_shape)) { std::vector<int64_t> start_indices(result_shape.dimensions_size(), 0); std::vector<int64_t> end_indices(result_shape.dimensions().begin(), result_shape.dimensions().end()); std::vector<int64_t> strides(result_shape.dimensions_size(), 1); auto* new_conv_result = add( HloInstruction::CreateGetTupleElement(new_result_shape, new_conv, 0)); auto* empty_temp_buffer = add(HloInstruction::CreateConstant(LiteralUtil::CreateR1<uint8_t>({}))); auto* sliced_result = add(HloInstruction::CreateSlice( result_shape, new_conv_result, start_indices, end_indices, strides)); new_conv = add(HloInstruction::CreateTuple({sliced_result, empty_temp_buffer})); } return conv->parent()->ReplaceInstruction(conv, new_conv); } static std::vector<HloCustomCallInstruction*> GetRelevantConvs( HloComputation* comp) { std::vector<HloCustomCallInstruction*> convs; for (HloInstruction* instr : comp->instructions()) { if (IsCustomCallToDnnConvolution(*instr)) { convs.push_back(Cast<HloCustomCallInstruction>(instr)); } } return convs; } static absl::StatusOr<bool> ResolveAndPad( HloCustomCallInstruction* conv, std::function<absl::StatusOr<bool>(HloCustomCallInstruction* conv, std::vector<Shape>* new_input_shapes, Shape* new_result_shape)> resolve_pad_shapes) { std::vector<Shape> new_input_shapes; Shape new_result_shape; TF_ASSIGN_OR_RETURN(bool result, resolve_pad_shapes(conv, &new_input_shapes, &new_result_shape)); if (result) { TF_RETURN_IF_ERROR(PadConv(conv, new_input_shapes, new_result_shape)); return true; } return false; } static absl::StatusOr<bool> TryResolvePaddedShapesForTensorCore( HloCustomCallInstruction* conv, std::vector<Shape>* new_input_shapes_ptr, Shape* new_result_shape_ptr) { TF_ASSIGN_OR_RETURN(auto kind, GetCudnnConvKind(conv)); const auto& dnums = conv->convolution_dimension_numbers(); auto* lhs = conv->mutable_operand(0); auto* rhs = conv->mutable_operand(1); const Shape& result_shape = conv->shape().tuple_shapes(0); if (result_shape.element_type() != PrimitiveType::F16) { return false; } if (conv->feature_group_count() > 1 || conv->batch_group_count() > 1) { VLOG(2) << "Do not pad grouped convolution."; return false; } if (kind == CudnnConvKind::kForwardActivation) { return false; } Shape new_lhs_shape = lhs->shape(); Shape new_rhs_shape = rhs->shape(); Shape& new_result_shape = *new_result_shape_ptr; new_result_shape = conv->shape().tuple_shapes(0); Shape* new_input_shape; Shape* new_filter_shape; Shape* new_output_shape; std::tie(new_input_shape, new_filter_shape, new_output_shape) = [&] { switch (kind) { case CudnnConvKind::kForward: case CudnnConvKind::kForwardActivation: case CudnnConvKind::kForwardGraph: return std::make_tuple(&new_lhs_shape, &new_rhs_shape, &new_result_shape); case CudnnConvKind::kBackwardInput: return std::make_tuple(&new_result_shape, &new_rhs_shape, &new_lhs_shape); case CudnnConvKind::kBackwardFilter: return std::make_tuple(&new_lhs_shape, &new_result_shape, &new_rhs_shape); } }(); auto input_features = new_input_shape->dimensions(dnums.input_feature_dimension()); auto output_features = new_output_shape->dimensions(dnums.output_feature_dimension()); if (input_features == 3 && (output_features == 32 || output_features == 64)) { new_input_shape->set_dimensions(dnums.input_feature_dimension(), 4); new_filter_shape->set_dimensions(dnums.kernel_input_feature_dimension(), 4); } else { auto pad_dim = [](Shape* s, int64_t dim) { s->set_dimensions(dim, RoundUpTo<int64_t>(s->dimensions(dim), 8)); }; pad_dim(new_input_shape, dnums.input_feature_dimension()); pad_dim(new_filter_shape, dnums.kernel_input_feature_dimension()); pad_dim(new_filter_shape, dnums.kernel_output_feature_dimension()); pad_dim(new_output_shape, dnums.output_feature_dimension()); static constexpr double kMaxBytesTouchedBound = 1.35; auto check_size_increase = [&](const Shape& old_shape, const Shape& new_shape) { int64_t old_bytes = ShapeUtil::ByteSizeOf(old_shape); int64_t new_bytes = ShapeUtil::ByteSizeOf(new_shape); if (new_bytes <= old_bytes * kMaxBytesTouchedBound) { return true; } VLOG(3) << "Not padding convolution; doing so would change input / result " "shape from " << ShapeUtil::HumanString(old_shape) << " to " << ShapeUtil::HumanString(new_shape) << ", a size increase of " << new_bytes / static_cast<double>(old_bytes) << "x > " << kMaxBytesTouchedBound << "x: " << conv->ToString(); return false; }; if (!check_size_increase(lhs->shape(), new_lhs_shape) || !check_size_increase(rhs->shape(), new_rhs_shape) || !check_size_increase(result_shape, new_result_shape)) { return false; } } if (ShapeUtil::Equal(lhs->shape(), new_lhs_shape) && ShapeUtil::Equal(rhs->shape(), new_rhs_shape)) { VLOG(3) << "No need to pad features of " << conv->ToString(); return false; } new_input_shapes_ptr->push_back(new_lhs_shape); new_input_shapes_ptr->push_back(new_rhs_shape); return true; } absl::StatusOr<bool> TryResolvePaddedShapesForIntegerConvolution( int pad_to, const se::CudaComputeCapability& compute_capability, HloCustomCallInstruction* conv, std::vector<Shape>* new_input_shapes_ptr, Shape* new_result_shape_ptr) { TF_ASSIGN_OR_RETURN(auto kind, GetCudnnConvKind(conv)); const Shape& input_shape = conv->operand(0)->shape(); const Shape& kernel_shape = conv->operand(1)->shape(); const Shape& result_shape = conv->shape().tuple_shapes(0); if (!primitive_util::IsIntegralType(input_shape.element_type())) { return false; } if (kind != CudnnConvKind::kForward && kind != CudnnConvKind::kForwardActivation) { return false; } const auto& dnums = conv->convolution_dimension_numbers(); std::vector<Shape>& new_input_shapes = *new_input_shapes_ptr; for (auto operand : conv->operands()) { new_input_shapes.push_back(operand->shape()); } Shape& new_result_shape = *new_result_shape_ptr; new_result_shape = conv->shape().tuple_shapes(0); std::optional<int64_t> input_vect_dim; std::optional<int64_t> kernel_vect_dim; std::optional<int64_t> result_vect_dim; std::tie(input_vect_dim, kernel_vect_dim, result_vect_dim) = FindVectorizedFeatureDims(dnums, input_shape, kernel_shape, result_shape); int64_t input_vect_size = input_vect_dim.has_value() ? input_shape.dimensions(*input_vect_dim) : 1; int64_t kernel_vect_size = kernel_vect_dim.has_value() ? kernel_shape.dimensions(*kernel_vect_dim) : 1; int64_t result_vect_size = result_vect_dim.has_value() ? result_shape.dimensions(*result_vect_dim) : 1; if (pad_to % input_vect_size != 0 || pad_to % kernel_vect_size != 0 || pad_to % result_vect_size != 0) { return false; } TF_ASSIGN_OR_RETURN(bool cudnn_supports, CudnnSupportsOptimizedIntegerConvolution( compute_capability, *conv, pad_to)); if (!cudnn_supports) { return false; } { auto pad_dim = [&](Shape* s, int64_t dim, int64_t cur_vect_size) { CHECK_EQ(pad_to % cur_vect_size, 0); s->set_dimensions( dim, RoundUpTo<int64_t>(s->dimensions(dim), pad_to / cur_vect_size)); }; switch (kind) { case CudnnConvKind::kForward: CHECK_EQ(new_input_shapes.size(), 2); pad_dim(new_input_shapes.data(), dnums.input_feature_dimension(), input_vect_size); pad_dim(&new_input_shapes[1], dnums.kernel_input_feature_dimension(), kernel_vect_size); pad_dim(&new_input_shapes[1], dnums.kernel_output_feature_dimension(), 1); pad_dim(&new_result_shape, dnums.output_feature_dimension(), result_vect_size); break; case CudnnConvKind::kForwardActivation: CHECK(new_input_shapes.size() == 3 || new_input_shapes.size() == 4); pad_dim(new_input_shapes.data(), dnums.input_feature_dimension(), input_vect_size); pad_dim(&new_input_shapes[1], dnums.kernel_input_feature_dimension(), kernel_vect_size); pad_dim(&new_input_shapes[1], dnums.kernel_output_feature_dimension(), 1); pad_dim(&new_input_shapes[2], 0, 1); if (new_input_shapes.size() == 4) { pad_dim(&new_input_shapes[3], dnums.output_feature_dimension(), result_vect_size); } pad_dim(&new_result_shape, dnums.output_feature_dimension(), result_vect_size); break; default: CHECK(false); } static constexpr double kMaxBytesTouchedBound = 2; auto check_size_increase = [&](const Shape& old_shape, const Shape& new_shape) { int64_t old_bytes = ShapeUtil::ByteSizeOf(old_shape); int64_t new_bytes = ShapeUtil::ByteSizeOf(new_shape); if (new_bytes < old_bytes * kMaxBytesTouchedBound) { return true; } VLOG(3) << "Not padding convolution; doing so would change input / result " "shape from " << ShapeUtil::HumanString(old_shape) << " to " << ShapeUtil::HumanString(new_shape) << ", a size increase of " << new_bytes / static_cast<double>(old_bytes) << "x >= " << kMaxBytesTouchedBound << "x: " << conv->ToString(); return false; }; if (!check_size_increase(conv->operand(0)->shape(), new_input_shapes[0]) || !check_size_increase(result_shape, new_result_shape)) { return false; } } bool changed = false; for (int64_t i = 0; i < conv->operand_count(); ++i) { changed |= !ShapeUtil::Equal(conv->operand(i)->shape(), new_input_shapes[i]); } if (!changed) { VLOG(3) << "No need to pad features of " << conv->ToString(); } return changed; } absl::StatusOr<bool> CudnnPadForConvolutions::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool changed = false; for (HloComputation* comp : module->MakeNonfusionComputations(execution_threads)) { for (HloCustomCallInstruction* conv : GetRelevantConvs(comp)) { bool local_changed = false; if (compute_capability_.IsAtLeast(7, 5)) { TF_ASSIGN_OR_RETURN( local_changed, ResolveAndPad(conv, absl::bind_front( TryResolvePaddedShapesForIntegerConvolution, 32, compute_capability_))); } if (!local_changed) { TF_ASSIGN_OR_RETURN( local_changed, ResolveAndPad(conv, absl::bind_front( TryResolvePaddedShapesForIntegerConvolution, 4, compute_capability_))); } changed |= local_changed; } if (compute_capability_.IsAtLeast(se::CudaComputeCapability::VOLTA)) { for (HloCustomCallInstruction* conv : GetRelevantConvs(comp)) { TF_ASSIGN_OR_RETURN( bool local_changed, ResolveAndPad(conv, TryResolvePaddedShapesForTensorCore)); changed |= local_changed; } } } return changed; } } }
#include "xla/service/gpu/transforms/cudnn_pad_for_convolutions.h" #include <gmock/gmock.h> #include <gtest/gtest.h> #include "xla/service/gpu/cublas_cudnn.h" #include "xla/service/hlo_parser.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/tests/hlo_test_base.h" namespace xla { namespace gpu { namespace { namespace m = xla::match; class CudnnPadForConvolutionsTest : public HloTestBase {}; TEST_F(CudnnPadForConvolutionsTest, DoNotPadF16ForwardConvWhenGrouped) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = f16[704,48,1,49]{3,2,1,0} parameter(0) filter = f16[44,768,1,50]{3,2,1,0} parameter(1) ROOT result = (f16[1,128,48,768]{3,2,1,0}, u8[0]{0}) custom-call(input, filter) , window={size=1x50 pad=0_0x64_64} , dim_labels=fb01_io01->01bf , feature_group_count=16 , custom_call_target="__cudnn$convForward" })") .value(); EXPECT_FALSE(CudnnPadForConvolutions({7, 5}).Run(module.get()).value()); } TEST_F(CudnnPadForConvolutionsTest, PadF16ForwardConvInputChannels) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = f16[10,20,30,41] parameter(0) filter = f16[2,2,41,40] parameter(1) ROOT result = (f16[10,20,30,40], u8[0]) custom-call(input, filter), window={size=2x2}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" })") .value(); EXPECT_TRUE(CudnnPadForConvolutions({7, 0}).Run(module.get()).value()); auto* root = module->entry_computation()->root_instruction(); SCOPED_TRACE(module->ToString()); EXPECT_THAT( root, GmockMatch(m::CustomCall( {kCudnnConvForwardCallTarget}, m::Pad(m::Parameter(0), m::Op()).WithShape(F16, {10, 20, 30, 48}), m::Pad(m::Parameter(1), m::Op()).WithShape(F16, {2, 2, 48, 40})))); } TEST_F(CudnnPadForConvolutionsTest, PadF16BackwardInputConvOutputChannels) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { output = f16[10,20,30,41] parameter(0) filter = f16[2,2,40,41] parameter(1) ROOT result = (f16[10,20,30,40], u8[0]) custom-call(output, filter), window={size=2x2}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convBackwardInput" })") .value(); EXPECT_TRUE(CudnnPadForConvolutions({7, 0}).Run(module.get()).value()); auto* root = module->entry_computation()->root_instruction(); EXPECT_THAT( root, GmockMatch(m::CustomCall( {kCudnnConvBackwardInputCallTarget}, m::Pad(m::Parameter(0), m::Op()).WithShape(F16, {10, 20, 30, 48}), m::Pad(m::Parameter(1), m::Op()).WithShape(F16, {2, 2, 40, 48})))); } TEST_F(CudnnPadForConvolutionsTest, PadF16ForwardConvOutputChannels) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = f16[10,20,30,40] parameter(0) filter = f16[2,2,40,41] parameter(1) ROOT result = (f16[10,20,30,41], u8[0]) custom-call(input, filter), window={size=2x2}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" })") .value(); EXPECT_TRUE(CudnnPadForConvolutions({7, 0}).Run(module.get()).value()); auto* root = module->entry_computation()->root_instruction(); EXPECT_THAT(root, GmockMatch(m::Tuple( m::Slice(m::GetTupleElement(m::CustomCall( {kCudnnConvForwardCallTarget}, m::Parameter(0), m::Pad(m::Parameter(1), m::Op())))), m::Op()))); } TEST_F(CudnnPadForConvolutionsTest, PadF16BackwardInputConvInputChannels) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { output = f16[10,20,30,40] parameter(0) filter = f16[2,2,41,40] parameter(1) result = (f16[10,20,30,41], u8[0]) custom-call(output, filter), window={size=2x2}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convBackwardInput" ROOT gte = f16[10,20,30,41] get-tuple-element(result), index=0 })") .value(); EXPECT_TRUE(CudnnPadForConvolutions({7, 0}).Run(module.get()).value()); auto* root = module->entry_computation()->root_instruction(); EXPECT_THAT(root, GmockMatch(m::GetTupleElement(m::Tuple( m::Slice(m::GetTupleElement(m::CustomCall( {kCudnnConvBackwardInputCallTarget}, m::Parameter(0), m::Pad(m::Parameter(1), m::Op())))), m::Op())))); } TEST_F(CudnnPadForConvolutionsTest, PadF16BackwardFilterConvInputChannels) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = f16[10,20,30,41] parameter(0) output = f16[10,20,30,40] parameter(1) result = (f16[2,2,41,40], u8[0]) custom-call(input, output), window={size=2x2}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convBackwardFilter" ROOT gte = f16[2,2,41,40] get-tuple-element(result), index=0 })") .value(); EXPECT_TRUE(CudnnPadForConvolutions({7, 0}).Run(module.get()).value()); auto* root = module->entry_computation()->root_instruction(); EXPECT_THAT(root, GmockMatch(m::GetTupleElement(m::Tuple( m::Slice(m::GetTupleElement(m::CustomCall( {kCudnnConvBackwardFilterCallTarget}, m::Pad(m::Parameter(0), m::Op()), m::Parameter(1)))), m::Op())))); } TEST_F(CudnnPadForConvolutionsTest, PadF16BackwardFilterConvOutputChannels) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = f16[10,20,30,40] parameter(0) output = f16[10,20,30,41] parameter(1) result = (f16[2,2,40,41], u8[0]) custom-call(input, output), window={size=2x2}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convBackwardFilter" ROOT gte = f16[2,2,40,41] get-tuple-element(result), index=0 })") .value(); EXPECT_TRUE(CudnnPadForConvolutions({7, 0}).Run(module.get()).value()); auto* root = module->entry_computation()->root_instruction(); EXPECT_THAT(root, GmockMatch(m::GetTupleElement(m::Tuple( m::Slice(m::GetTupleElement(m::CustomCall( {kCudnnConvBackwardFilterCallTarget}, m::Parameter(0), m::Pad(m::Parameter(1), m::Op())))), m::Op())))); } TEST_F(CudnnPadForConvolutionsTest, PadInputFeatures3To4) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = f16[10,20,30,3] parameter(0) filter = f16[2,2,3,32] parameter(1) ROOT result = (f16[10,20,30,32], u8[0]) custom-call(input, filter), window={size=2x2}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" })") .value(); EXPECT_TRUE(CudnnPadForConvolutions({7, 0}).Run(module.get()).value()); auto* root = module->entry_computation()->root_instruction(); SCOPED_TRACE(module->ToString()); EXPECT_THAT( root, GmockMatch(m::CustomCall( {kCudnnConvForwardCallTarget}, m::Pad(m::Parameter(0), m::Op()).WithShape(F16, {10, 20, 30, 4}), m::Pad(m::Parameter(1), m::Op()).WithShape(F16, {2, 2, 4, 32})))); } TEST_F(CudnnPadForConvolutionsTest, PadIntForwardConvInputChannels) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[10,20,30,41] parameter(0) filter = s8[2,2,41,40] parameter(1) ROOT result = (f32[10,20,30,40], u8[0]) custom-call(input, filter), window={size=2x2}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" })") .value(); EXPECT_TRUE(CudnnPadForConvolutions({7, 0}).Run(module.get()).value()); auto* root = module->entry_computation()->root_instruction(); SCOPED_TRACE(module->ToString()); EXPECT_THAT( root, GmockMatch(m::CustomCall( {kCudnnConvForwardCallTarget}, m::Pad(m::Parameter(0), m::Op()).WithShape(S8, {10, 20, 30, 44}), m::Pad(m::Parameter(1), m::Op()).WithShape(S8, {2, 2, 44, 40})))); } TEST_F(CudnnPadForConvolutionsTest, PadIntForwardConvOutputChannels) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[10,20,30,40] parameter(0) filter = s8[2,2,40,41] parameter(1) ROOT result = (f32[10,20,30,41], u8[0]) custom-call(input, filter), window={size=2x2}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" })") .value(); EXPECT_TRUE(CudnnPadForConvolutions({7, 0}).Run(module.get()).value()); auto* root = module->entry_computation()->root_instruction(); EXPECT_THAT(root, GmockMatch(m::Tuple( m::Slice(m::GetTupleElement(m::CustomCall( {kCudnnConvForwardCallTarget}, m::Parameter(0), m::Pad(m::Parameter(1), m::Op())))), m::Op()))); } TEST_F(CudnnPadForConvolutionsTest, PadInt8To32OnSm75) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[10,20,30,40] parameter(0) filter = s8[2,2,40,41] parameter(1) ROOT result = (s8[10,20,30,41], u8[0]) custom-call(input, filter), window={size=2x2}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" })") .value(); EXPECT_TRUE(CudnnPadForConvolutions({7, 5}).Run(module.get()).value()); auto* root = module->entry_computation()->root_instruction(); EXPECT_THAT( root, GmockMatch(m::Tuple( m::Slice(m::GetTupleElement(m::CustomCall( {kCudnnConvForwardCallTarget}, m::Pad(m::Parameter(0), m::Op()).WithShape(S8, {10, 20, 30, 64}), m::Pad(m::Parameter(1), m::Op()).WithShape(S8, {2, 2, 64, 64})))), m::Op()))); } TEST_F(CudnnPadForConvolutionsTest, NoPadInt8To32OnSm70) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[10,20,30,40] parameter(0) filter = s8[2,2,40,41] parameter(1) ROOT result = (s8[10,20,30,41], u8[0]) custom-call(input, filter), window={size=2x2}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" })") .value(); EXPECT_TRUE(CudnnPadForConvolutions({7, 0}).Run(module.get()).value()); auto* root = module->entry_computation()->root_instruction(); EXPECT_THAT( root, GmockMatch(m::Tuple( m::Slice(m::GetTupleElement(m::CustomCall( {kCudnnConvForwardCallTarget}, m::Parameter(0), m::Pad(m::Parameter(1), m::Op()).WithShape(S8, {2, 2, 40, 44})))), m::Op()))); } TEST_F(CudnnPadForConvolutionsTest, NoPadInt8To32FloatOutputSm75) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[10,20,30,38] parameter(0) filter = s8[2,2,38,41] parameter(1) ROOT result = (f32[10,20,30,41], u8[0]) custom-call(input, filter), window={size=2x2}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" })") .value(); EXPECT_TRUE(CudnnPadForConvolutions({7, 5}).Run(module.get()).value()); auto* root = module->entry_computation()->root_instruction(); EXPECT_THAT( root, GmockMatch(m::Tuple( m::Slice(m::GetTupleElement(m::CustomCall( {kCudnnConvForwardCallTarget}, m::Pad(m::Parameter(0), m::Op()).WithShape(S8, {10, 20, 30, 40}), m::Pad(m::Parameter(1), m::Op()).WithShape(S8, {2, 2, 40, 44})))), m::Op()))); } TEST_F(CudnnPadForConvolutionsTest, NoPadInt8UnsupportedFilterTypeOutputSm75) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[10,20,30,38] parameter(0) filter = f32[2,2,38,41] parameter(1) ROOT result = (s8[10,20,30,41], u8[0]) custom-call(input, filter), window={size=2x2}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" })") .value(); EXPECT_FALSE(CudnnPadForConvolutions({7, 5}).Run(module.get()).value()); } TEST_F(CudnnPadForConvolutionsTest, NoPadToInt8x32ExcessiveBlowup) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[128,4,48,48] parameter(0) filter = s8[64,4,3,3] parameter(1) ROOT result = (f32[128,64,48,48], u8[0]) custom-call(input, filter), window={size=3x3}, dim_labels=bf01_io01->bf01, custom_call_target="__cudnn$convForward" })") .value(); EXPECT_FALSE(CudnnPadForConvolutions({7, 5}).Run(module.get()).value()); } TEST_F(CudnnPadForConvolutionsTest, PadInt8x4To32) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[10,20,30,41,4] parameter(0) filter = s8[2,2,41,4,168] parameter(1) ROOT result = (s8[10,20,30,42,4], u8[0]) custom-call(input, filter), window={size=2x2}, dim_labels=b01f?_01i?o->b01f?, custom_call_target="__cudnn$convForward" })") .value(); EXPECT_TRUE(CudnnPadForConvolutions({7, 5}).Run(module.get()).value()); auto* root = module->entry_computation()->root_instruction(); EXPECT_THAT( root, GmockMatch(m::Tuple( m::Slice(m::GetTupleElement( m::CustomCall({kCudnnConvForwardCallTarget}, m::Pad(m::Parameter(0), m::Op()) .WithShape(S8, {10, 20, 30, 48, 4}), m::Pad(m::Parameter(1), m::Op()) .WithShape(S8, {2, 2, 48, 4, 192}))) .WithShape(S8, {10, 20, 30, 48, 4})), m::Op()))); } TEST_F(CudnnPadForConvolutionsTest, PadInt8x4To32BiasActivation) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[10,20,30,41,4] parameter(0) filter = s8[2,2,41,4,168] parameter(1) bias = f32[10] parameter(2) side_input = s8[10,20,30,42,4] parameter(3) ROOT result = (s8[10,20,30,42,4], u8[0]) custom-call(input, filter, bias, side_input), window={size=2x2}, dim_labels=b01f?_01i?o->b01f?, custom_call_target="__cudnn$convBiasActivationForward" })") .value(); EXPECT_TRUE(CudnnPadForConvolutions({7, 5}).Run(module.get()).value()); auto* root = module->entry_computation()->root_instruction(); EXPECT_THAT( root, GmockMatch(m::Tuple( m::Slice( m::GetTupleElement( m::CustomCall( {kCudnnConvBiasActivationForwardCallTarget}, m::Pad(m::Parameter(0), m::Op()) .WithShape(S8, {10, 20, 30, 48, 4}), m::Pad(m::Parameter(1), m::Op()) .WithShape(S8, {2, 2, 48, 4, 192}), m::Pad(m::Parameter(2), m::Op()).WithShape(F32, {32}), m::Pad(m::Parameter(3), m::Op()) .WithShape(S8, {10, 20, 30, 48, 4}))) .WithShape(S8, {10, 20, 30, 48, 4})), m::Op()))); } TEST_F(CudnnPadForConvolutionsTest, PadIntFusedForwardConvInputAndOutputChannels) { auto module = ParseAndReturnVerifiedModule(R"( HloModule Test ENTRY %Test (input: s8[1,3,3,2], filter: s8[3,3,2,5], side_input: s8[1,3,3,5], bias: s8[5]) -> f32[1,3,3,5] { %input = s8[1,3,3,3]{3,2,1,0} parameter(0) %filter = s8[3,3,2,5]{3,2,1,0} parameter(1) %bias = s8[5]{0} parameter(3) %convert = f32[5]{0} convert(s8[5]{0} %bias) %side_input = f32[1,3,3,5]{3,2,1,0} parameter(2) %custom-call.1 = (f32[1,3,3,5]{3,2,1,0}, u8[0]{0}) custom-call(s8[1,3,3,3]{3,2,1,0} %input, s8[3,3,2,5]{3,2,1,0} %filter, f32[5]{0} %convert, f32[1,3,3,5]{3,2,1,0} %side_input), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convBiasActivationForward", backend_config="{\"activationMode\":\"2\",\"convResultScale\":1,\"sideInputScale\":1}" ROOT %get-tuple-element.1 = f32[1,3,3,5]{3,2,1,0} get-tuple-element((f32[1,3,3,5]{3,2,1,0}, u8[0]{0}) %custom-call.1), index=0 })") .value(); EXPECT_TRUE(CudnnPadForConvolutions({7, 0}).Run(module.get()).value()); auto* root = module->entry_computation()->root_instruction(); EXPECT_THAT(root, GmockMatch(m::GetTupleElement(m::Tuple( m::Slice(m::GetTupleElement(m::CustomCall( {kCudnnConvBiasActivationForwardCallTarget}, m::Pad(m::Parameter(0), m::Op()), m::Pad(m::Parameter(1), m::Op()), m::Pad(m::Convert(m::Parameter(3)), m::Op()), m::Pad(m::Parameter(2), m::Op())))), m::Op())))); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/cudnn_pad_for_convolutions.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/cudnn_pad_for_convolutions_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
2afc9150-0c31-4ec4-b238-3aee0943c1dd
cpp
tensorflow/tensorflow
cudnn_simplify_padding
third_party/xla/xla/service/gpu/transforms/cudnn_simplify_padding.cc
third_party/xla/xla/service/gpu/transforms/cudnn_simplify_padding_test.cc
#include "xla/service/gpu/transforms/cudnn_simplify_padding.h" #include <algorithm> #include <cstdint> #include <iterator> #include <optional> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_set.h" #include "absl/container/inlined_vector.h" #include "absl/strings/str_join.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/literal.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/cublas_cudnn.h" #include "xla/service/hlo_creation_utils.h" #include "xla/service/pattern_matcher.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" #include "tsl/platform/statusor.h" namespace xla::gpu { namespace { namespace m = ::xla::match; std::optional<int64_t> FindFalseIndex(absl::Span<const bool> vals) { std::optional<int64_t> missing_dim; for (int i = 0; i < vals.size(); i++) { if (vals[i]) { continue; } if (missing_dim.has_value()) { VLOG(2) << "Multiple dimensions are missing from conv dnums; can't " "determine which is vect_c dimension"; return std::nullopt; } missing_dim = i; } return missing_dim; } std::optional<int64_t> FindOutputVectCDim(HloInstruction* conv) { const ConvolutionDimensionNumbers& dnums = conv->convolution_dimension_numbers(); int64_t num_dims = conv->shape().tuple_shapes(0).dimensions_size(); absl::InlinedVector<bool, 5> seen_dims(num_dims); seen_dims[dnums.output_batch_dimension()] = true; seen_dims[dnums.output_feature_dimension()] = true; for (int64_t d : dnums.output_spatial_dimensions()) { seen_dims[d] = true; } return FindFalseIndex(seen_dims); } std::optional<int64_t> FindKernelVectCDim(HloInstruction* conv) { const ConvolutionDimensionNumbers& dnums = conv->convolution_dimension_numbers(); int64_t num_dims = conv->operand(1)->shape().dimensions_size(); absl::InlinedVector<bool, 5> seen_dims(num_dims); seen_dims[dnums.kernel_input_feature_dimension()] = true; seen_dims[dnums.kernel_output_feature_dimension()] = true; for (int64_t d : dnums.kernel_spatial_dimensions()) { seen_dims[d] = true; } return FindFalseIndex(seen_dims); } std::optional<int64_t> NumTrailingZeroOutputFeatures(HloInstruction* conv) { const ConvolutionDimensionNumbers& dnums = conv->convolution_dimension_numbers(); int64_t feature_dim = dnums.kernel_output_feature_dimension(); const HloInstruction* weights = conv->operand(1); auto backend_config = conv->backend_config<GpuBackendConfig>(); if (backend_config.ok() && backend_config->cudnn_conv_backend_config().reordered_int8_nchw_vect()) { VLOG(2) << "Matched int8x32 convolution with filter reordering"; const HloInstruction *reshape, *transpose; bool matched = Match(weights, m::Reshape(m::Transpose( &transpose, m::Reshape(&reshape, m::Op(&weights))))); if (!matched || feature_dim != 0 || transpose->shape().rank() != 8) { VLOG(2) << "The filter output feature dimension cannot be determined, as " "the reordering sequence is modified"; return std::nullopt; } const auto& transpose_dimensions = Cast<HloTransposeInstruction>(transpose)->dimensions(); int64_t preceding_size = 1; for (int64_t i = transpose_dimensions.at(3) - 1; i >= 0; --i) { preceding_size *= reshape->shape().dimensions(i); } int64_t accumulated_size = 1; for (int64_t size : weights->shape().dimensions()) { if (accumulated_size < preceding_size) { accumulated_size *= size; ++feature_dim; } else { break; } } if (accumulated_size != preceding_size) { VLOG(2) << "Something is really wrong here, I give up"; return std::nullopt; } VLOG(2) << "Computed output feature dimension: " << feature_dim; } VLOG(2) << "Computing NumTrailingZeroOutputFeatures of " << conv->ToString() << "\nwith weights " << weights->ToString(); if (Match(weights, m::Pad(m::Op(), m::ConstantEffectiveScalar(0)))) { const PaddingConfig::PaddingConfigDimension& padding_config = weights->padding_config().dimensions(feature_dim); VLOG(2) << "Success: Weights is a pad; padding on output feature dim is " << padding_config.edge_padding_high(); return padding_config.edge_padding_high(); } else if (const HloInstruction * pad; Match( weights, m::Reshape(m::Pad(&pad, m::Op(), m::ConstantEffectiveScalar(0))))) { std::optional<int64_t> vect_c_dim = FindKernelVectCDim(conv); if (!vect_c_dim.has_value()) { VLOG(2) << "fail: Can't find vect_c dimension in conv."; return std::nullopt; } if (*vect_c_dim != dnums.kernel_input_feature_dimension() + 1) { VLOG(2) << "fail: vect_c dim is in the wrong place; should be right " "after kernel input feature dims in conv."; return std::nullopt; } absl::InlinedVector<int64_t, 5> expected_pad_dim_sizes( weights->shape().dimensions().begin(), weights->shape().dimensions().end()); expected_pad_dim_sizes[dnums.kernel_input_feature_dimension()] *= weights->shape().dimensions(*vect_c_dim); expected_pad_dim_sizes.erase(expected_pad_dim_sizes.begin() + *vect_c_dim); if (pad->shape().dimensions() != expected_pad_dim_sizes) { VLOG(2) << "fail: Reshape doesn't simply merge vect_c dimension into " "input features dim " << weights->ToString() << " but expected dims " << absl::StrJoin(expected_pad_dim_sizes, ","); return std::nullopt; } int64_t feature_dim_before_reshape = feature_dim; if (dnums.kernel_output_feature_dimension() > dnums.kernel_input_feature_dimension()) { feature_dim_before_reshape--; } const PaddingConfig::PaddingConfigDimension& padding_config = pad->padding_config().dimensions(feature_dim_before_reshape); VLOG(2) << "Success: Weights is a reshape of a pad; padding on output " "feature dim is " << padding_config.edge_padding_high(); return padding_config.edge_padding_high(); } else if (Match(weights, m::Constant())) { const Literal& lit = weights->literal(); const auto& dims = weights->shape().dimensions(); absl::InlinedVector<int64_t, 5> multi_index; for (int64_t dim : dims) { multi_index.push_back(dim - 1); } auto decrement_multi_index = [&] { for (int i = 0; i < multi_index.size(); ++i) { if (i != feature_dim) { int64_t& idx = multi_index[i]; --idx; if (idx == -1) { idx = dims[i] - 1; } else { return true; } } } int64_t& idx = multi_index[feature_dim]; --idx; return idx != -1; }; do { if (!lit.IsZero(multi_index)) { break; } } while (decrement_multi_index()); int64_t first_trailing_zero_feature = multi_index[feature_dim] + 1; if (first_trailing_zero_feature == 0) { VLOG(2) << "Weights constant is entirely zero."; } else { VLOG(2) << "First nonzero index in weights constant is " << absl::StrJoin(multi_index, ","); } int64_t ret = std::max<int64_t>(0, weights->shape().dimensions(feature_dim) - first_trailing_zero_feature); VLOG(2) << "Success: weights is a constant; num zero trailing output " "features is " << ret; return ret; } return std::nullopt; } absl::StatusOr<bool> TrySimplifyPadding(HloInstruction* instr) { HloInstruction* conv; HloInstruction* transpose = nullptr; HloInstruction* reshape = nullptr; HloInstruction* slice; HloInstruction* pad; auto conv_matcher = m::GetTupleElement( m::CustomCall(&conv).WithPredicate([](const HloInstruction* instr) { return instr->custom_call_target() == kCudnnConvForwardCallTarget || instr->custom_call_target() == kCudnnConvBiasActivationForwardCallTarget; }), 0); auto pad_matcher = m::Pad(m::Op(), m::ConstantEffectiveScalar(0)); if (!MatchAndLogIfFailed(instr, "conv-slice-pad", m::Pad(&pad, m::Slice(&slice, conv_matcher), m::ConstantEffectiveScalar(0)), VLOG_IS_ON(3), pad_matcher) && !MatchAndLogIfFailed( instr, "conv-reshape-slice-pad", m::Pad(&pad, m::Slice(&slice, m::Reshape(&reshape, conv_matcher)), m::ConstantEffectiveScalar(0)), VLOG_IS_ON(3), pad_matcher) && !MatchAndLogIfFailed( instr, "conv-transpose-reshape-slice-pad", m::Pad(&pad, m::Slice(&slice, m::Reshape(&reshape, m::Transpose(&transpose, conv_matcher))), m::ConstantEffectiveScalar(0)), VLOG_IS_ON(3), pad_matcher)) { return false; } VLOG(2) << "Found pattern to attempt to simplify:\n" << "conv: " << conv->ToString() << "\ntranspose: " << (transpose != nullptr ? transpose->ToString() : "(null)") << "\nreshape: " << (reshape != nullptr ? reshape->ToString() : "(null)") << "\nslice: " << slice->ToString() << "\npad: " << pad->ToString(); std::optional<int64_t> num_known_zero_output_features = NumTrailingZeroOutputFeatures(conv); if (!num_known_zero_output_features.has_value() || *num_known_zero_output_features == 0) { VLOG(2) << "fail: Didn't find any known-zero output features"; return false; } const auto& dnums = conv->convolution_dimension_numbers(); int64_t output_feature_dim; if (reshape == nullptr) { CHECK_EQ(transpose, nullptr); output_feature_dim = dnums.output_feature_dimension(); } else { std::optional<int64_t> vect_c_dim_before_transpose = FindOutputVectCDim(conv); if (!vect_c_dim_before_transpose.has_value()) { VLOG(2) << "Couldn't find vect_c output dim in conv."; return false; } int64_t feature_dim_after_transpose; int64_t vect_c_dim_after_transpose; if (transpose == nullptr) { feature_dim_after_transpose = dnums.output_feature_dimension(); vect_c_dim_after_transpose = *vect_c_dim_before_transpose; } else { const auto& transpose_dims = transpose->dimensions(); feature_dim_after_transpose = std::distance( transpose->dimensions().begin(), absl::c_find(transpose_dims, dnums.output_feature_dimension())); vect_c_dim_after_transpose = std::distance( transpose->dimensions().begin(), absl::c_find(transpose_dims, *vect_c_dim_before_transpose)); } if (vect_c_dim_after_transpose != feature_dim_after_transpose + 1) { VLOG(2) << "fail: after transpose (if present), vect_c dim must appear " "immediately after output feature dim: Computed " "vect_d_dim_after_transpose to be " << vect_c_dim_after_transpose; return false; } absl::InlinedVector<int64_t, 5> expected_reshape_dim_sizes( reshape->operand(0)->shape().dimensions().begin(), reshape->operand(0)->shape().dimensions().end()); expected_reshape_dim_sizes[feature_dim_after_transpose] *= expected_reshape_dim_sizes[vect_c_dim_after_transpose]; expected_reshape_dim_sizes.erase(expected_reshape_dim_sizes.begin() + vect_c_dim_after_transpose); if (reshape->shape().dimensions() != expected_reshape_dim_sizes) { VLOG(2) << "fail: Reshape doesn't merge vect_c with feature dimension."; return false; } output_feature_dim = feature_dim_after_transpose; } if (!absl::c_all_of(slice->slice_starts(), [](auto v) { return v == 0; }) || !absl::c_all_of(slice->slice_strides(), [](auto v) { return v == 1; })) { VLOG(2) << "fail: Slice doesn't start at the front or has stride != 1."; return false; } for (int64_t dim = 0; dim < slice->slice_limits().size(); dim++) { if (slice->slice_starts(dim) != 0 || slice->slice_strides(dim) != 1 || (dim != output_feature_dim && slice->slice_limits(dim) != slice->operand(0)->shape().dimensions(dim))) { VLOG(2) << "fail: Slice removes something other than the features dim."; return false; } } int64_t num_sliced_from_feature_dim = slice->operand(0)->shape().dimensions(output_feature_dim) - slice->slice_limits(output_feature_dim); if (num_sliced_from_feature_dim > *num_known_zero_output_features) { VLOG(2) << "fail: Slice removes " << num_sliced_from_feature_dim << " features from the conv, but only " << *num_known_zero_output_features << " features in the conv are known to be zero."; return false; } if (pad->padding_config().dimensions(output_feature_dim).interior_padding() != 0) { VLOG(2) << "fail: Can't merge slice into pad because pad adds interior padding " "in feature dimension."; return false; } VLOG(1) << "Eliminating " << num_sliced_from_feature_dim << " elements of padding from conv " << conv->name(); PaddingConfig new_padding_config = pad->padding_config(); PaddingConfig::PaddingConfigDimension* new_pad_feature_dim = new_padding_config.mutable_dimensions(output_feature_dim); new_pad_feature_dim->set_edge_padding_high( new_pad_feature_dim->edge_padding_high() - num_sliced_from_feature_dim); TF_ASSIGN_OR_RETURN(HloInstruction * new_pad, MakePadHlo(slice->mutable_operand(0), pad->mutable_operand(1), new_padding_config)); TF_RETURN_IF_ERROR(pad->parent()->ReplaceInstruction(pad, new_pad)); return true; } } absl::StatusOr<bool> CudnnSimplifyPadding::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool changed = false; for (HloComputation* comp : module->MakeNonfusionComputations(execution_threads)) { for (HloInstruction* instr : comp->MakeInstructionPostOrder()) { TF_ASSIGN_OR_RETURN(bool c, TrySimplifyPadding(instr)); changed |= c; } } return changed; } }
#include "xla/service/gpu/transforms/cudnn_simplify_padding.h" #include <cstdint> #include <memory> #include <utility> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/functional/function_ref.h" #include "absl/strings/str_cat.h" #include "absl/types/span.h" #include "xla/hlo/pass/hlo_pass_fix.h" #include "xla/hlo/pass/hlo_pass_pipeline.h" #include "xla/literal.h" #include "xla/service/algebraic_simplifier.h" #include "xla/service/call_inliner.h" #include "xla/service/gpu/transforms/cudnn_pad_for_convolutions.h" #include "xla/service/gpu/transforms/cudnn_vectorize_convolutions.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/service/reshape_mover.h" #include "xla/service/tuple_simplifier.h" #include "xla/stream_executor/device_description.h" #include "xla/stream_executor/dnn.h" #include "xla/tests/hlo_test_base.h" #include "xla/tsl/lib/core/status_test_util.h" #include "xla/util.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" #include "tsl/platform/statusor.h" namespace xla::gpu { namespace { namespace m = ::xla::match; class CudnnSimplifyPaddingTest : public HloTestBase { protected: absl::StatusOr<bool> RunEndToEnd(std::pair<int, int> compute_capability, HloModule* module) { se::CudaComputeCapability cc{compute_capability.first, compute_capability.second}; TF_RETURN_IF_ERROR( RunHloPass(CudnnPadForConvolutions(cc), module).status()); TF_RETURN_IF_ERROR( RunHloPass(CudnnVectorizeConvolutions( cc, se::dnn::VersionInfo{8, 3, 0}), module) .status()); VLOG(1) << "after vectorizing convs:\n" << module->ToString(); TF_RETURN_IF_ERROR(RunHloPass(CallInliner(), module).status()); VLOG(1) << "after inliner:\n" << module->ToString(); TF_RETURN_IF_ERROR(RunHloPass(TupleSimplifier(), module).status()); VLOG(1) << "after tuple simplifier:\n" << module->ToString(); TF_ASSIGN_OR_RETURN(bool changed, RunHloPass(CudnnSimplifyPadding(), module)); VLOG(1) << "after simplify_padding:\n" << module->ToString(); { HloPassFix<HloPassPipeline> pipeline("reshape-mover and algsimp"); pipeline.AddPass<ReshapeMover>(); pipeline.AddPass<AlgebraicSimplifier>(AlgebraicSimplifierOptions()); TF_RETURN_IF_ERROR(RunHloPass(pipeline, module).status()); } VLOG(1) << "after reshape mover + algsimp:\n" << module->ToString(); return changed; } absl::StatusOr<bool> RunJustThisPass(HloModule* module) { TF_ASSIGN_OR_RETURN(bool changed, RunHloPass(CudnnSimplifyPadding(), module)); VLOG(1) << "after simplify_padding:\n" << module->ToString(); TF_RETURN_IF_ERROR(RunHloPass(HloPassFix<AlgebraicSimplifier>( AlgebraicSimplifierOptions()), module) .status()); return changed; } }; void ExpectOnlyPadsOneDim(int64_t dim, int64_t padding_high, const PaddingConfig& p) { SCOPED_TRACE(p.DebugString()); for (int i = 0; i < p.dimensions_size(); ++i) { SCOPED_TRACE(absl::StrCat("dimension ", i)); EXPECT_EQ(p.dimensions(i).edge_padding_low(), 0); if (i == dim) { EXPECT_EQ(p.dimensions(i).edge_padding_high(), padding_high); } else { EXPECT_EQ(p.dimensions(i).edge_padding_high(), 0); } } } template <typename NativeT> void SetConstantValue( HloInstruction* instr, absl::FunctionRef<NativeT(absl::Span<const int64_t>, NativeT)> value_fn) { Literal new_literal = instr->literal().Clone(); new_literal.MutableEachCell<int8_t>(value_fn); TF_EXPECT_OK(instr->parent()->ReplaceWithNewInstruction( instr, HloInstruction::CreateConstant(std::move(new_literal)))); } TEST_F(CudnnSimplifyPaddingTest, EndToEnd) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { conv1 = (s8[10,20,30,190], u8[0]) custom-call( s8[10,20,30,63] parameter(0), s8[3,5,63,190] parameter(1), f32[10] parameter(2), s8[10,20,30,190] parameter(3)), window={size=3x5}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convBiasActivationForward" conv1_result = get-tuple-element(conv1), index=0 ROOT conv2 = (s8[10,20,30,29], u8[0]) custom-call( conv1_result, s8[3,5,190,29] parameter(4), f32[10] parameter(5), s8[10,20,30,29] parameter(6)), window={size=3x5}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convBiasActivationForward" })") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunEndToEnd({7, 5}, module.get())); EXPECT_TRUE(changed); SCOPED_TRACE(module->ToString()); auto* root = module->entry_computation()->root_instruction(); EXPECT_THAT( root, GmockMatch(m::Tuple( m::Slice(m::Reshape(m::GetTupleElement(m::CustomCall( {"__cudnn$convBiasActivationForward"}, m::GetTupleElement( m::CustomCall({"__cudnn$convBiasActivationForward"}), 0), m::Op(), m::Op(), m::Op())))), m::Op()))); } TEST_F(CudnnSimplifyPaddingTest, EndToEndNCHW) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { conv1 = (s8[1,64,480,400], u8[0]) custom-call( s8[1,112,480,400] parameter(0), s8[3,3,112,64] parameter(1), f32[64] parameter(2)), window={size=3x3}, dim_labels=bf01_01io->bf01, custom_call_target="__cudnn$convBiasActivationForward" conv1_result = get-tuple-element(conv1), index=0 convert = f32[1,64,480,400] convert(conv1_result) constant = f32[] constant(0.349002093) broadcast = f32[1,64,480,400] broadcast(constant) ROOT multiply = f32[1,64,480,400] multiply(convert, broadcast) })") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunEndToEnd({7, 5}, module.get())); EXPECT_FALSE(changed); SCOPED_TRACE(module->ToString()); auto* root = module->entry_computation()->root_instruction(); EXPECT_THAT(root, GmockMatch(m::Reshape(m::Multiply()))); } TEST_F(CudnnSimplifyPaddingTest, PaddedWeights) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { weights = pad(s8[3,3,10,10] parameter(0), s8[] constant(0)), padding=0_0x0_0x0_0x0_4 conv = (s8[10,10,10,10], u8[0]) custom-call( s8[10,10,10,10] parameter(1), weights ), window={size=3x3}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" conv_result = get-tuple-element(conv), index=0 slice = s8[10,10,10,6] slice(conv_result), slice={[0:10], [0:10], [0:10], [0:6]} ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_0x0_0x0_5 } )") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get())); EXPECT_TRUE(changed); SCOPED_TRACE(module->ToString()); auto* root = module->entry_computation()->root_instruction(); const HloInstruction* pad = nullptr; ASSERT_THAT(root, GmockMatch(m::Pad(&pad, m::GetTupleElement(m::CustomCall(), 0), m::ConstantScalar(0)))); ExpectOnlyPadsOneDim(3, 1, pad->padding_config()); } TEST_F(CudnnSimplifyPaddingTest, PaddedWeightsNotPaddedEnough) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { weights = pad(s8[3,3,10,10] parameter(0), s8[] constant(0)), padding=0_0x0_0x0_0x0_3 conv = (s8[10,10,10,10], u8[0]) custom-call( s8[10,10,10,10] parameter(1), weights ), window={size=3x3}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" conv_result = get-tuple-element(conv), index=0 slice = s8[10,10,10,6] slice(conv_result), slice={[0:10], [0:10], [0:10], [0:6]} ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_0x0_0x0_5 } )") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get())); EXPECT_FALSE(changed); } TEST_F(CudnnSimplifyPaddingTest, PaddedAndReshapedWeightsNCHW) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { weights_p = pad(s8[64,60,3,3] parameter(0), s8[] constant(0)), padding=0_0x0_4x0_0x0_0 weights = s8[2,32,64,3,3] reshape(weights_p) conv = (s8[10,2,32,10,10], u8[0]) custom-call( s8[10,2,32,10,10] parameter(1), weights ), window={size=3x3}, dim_labels=bf?01_i?o01->bf?01, custom_call_target="__cudnn$convForward" conv_result = get-tuple-element(conv), index=0 slice = s8[10,60,10,10] slice(s8[10,64,10,10] reshape(conv_result)), slice={[0:10], [0:60], [0:10], [0:10]} ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_5x0_0x0_0 } )") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get())); EXPECT_TRUE(changed); SCOPED_TRACE(module->ToString()); auto* root = module->entry_computation()->root_instruction(); const HloInstruction* pad = nullptr; ASSERT_THAT( root, GmockMatch( m::Pad(&pad, m::Reshape(m::GetTupleElement(m::CustomCall(), 0)), m::ConstantScalar(0)))); ExpectOnlyPadsOneDim(1, 1, pad->padding_config()); } TEST_F(CudnnSimplifyPaddingTest, PaddedAndReshapedWeightsNHWC) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { weights_p = pad(s8[3,3,64,60] parameter(0), s8[] constant(0)), padding=0_0x0_0x0_0x0_4 weights = s8[3,3,2,32,64] reshape(weights_p) conv = (s8[10,10,10,2,32], u8[0]) custom-call( s8[10,10,10,2,32] parameter(1), weights ), window={size=3x3}, dim_labels=b01f?_01i?o->b01f?, custom_call_target="__cudnn$convForward" conv_result = get-tuple-element(conv), index=0 slice = s8[10,10,10,60] slice(s8[10,10,10,64] reshape(conv_result)), slice={[0:10], [0:10], [0:10], [0:60]} ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_0x0_0x0_5 } )") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get())); EXPECT_TRUE(changed); SCOPED_TRACE(module->ToString()); auto* root = module->entry_computation()->root_instruction(); const HloInstruction* pad = nullptr; ASSERT_THAT( root, GmockMatch( m::Pad(&pad, m::Reshape(m::GetTupleElement(m::CustomCall(), 0)), m::ConstantScalar(0)))); ExpectOnlyPadsOneDim(3, 1, pad->padding_config()); } TEST_F(CudnnSimplifyPaddingTest, PaddedTransposedAndReshapedOutput) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { weights_p = pad(s8[64,60,3,3] parameter(0), s8[] constant(0)), padding=0_0x0_4x0_0x0_0 weights = s8[2,32,64,3,3] reshape(weights_p) conv = (s8[10,2,10,10,32], u8[0]) custom-call( s8[10,2,10,10,32] parameter(1), weights ), window={size=3x3}, dim_labels=bf01?_i?o01->bf01?, custom_call_target="__cudnn$convForward" conv_result = get-tuple-element(conv), index=0 conv_transposed = s8[10,2,32,10,10] transpose(conv_result), dimensions={0,1,4,2,3} slice = s8[10,60,10,10] slice(s8[10,64,10,10] reshape(conv_transposed)), slice={[0:10], [0:60], [0:10], [0:10]} ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_6x0_0x0_0 } )") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get())); EXPECT_TRUE(changed); SCOPED_TRACE(module->ToString()); auto* root = module->entry_computation()->root_instruction(); const HloInstruction* pad = nullptr; ASSERT_THAT( root, GmockMatch(m::Pad( &pad, m::Reshape(m::Transpose(m::GetTupleElement(m::CustomCall(), 0))), m::ConstantScalar(0)))); ExpectOnlyPadsOneDim(1, 2, pad->padding_config()); } TEST_F(CudnnSimplifyPaddingTest, PaddedConstantWeight) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { conv = (s8[10,10,10,10], u8[0]) custom-call( s8[10,10,10,10] parameter(0), s8[3,3,10,10] constant({...}) ), window={size=3x3}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" conv_result = get-tuple-element(conv), index=0 slice = s8[10,10,10,6] slice(conv_result), slice={[0:10], [0:10], [0:10], [0:6]} ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_0x0_0x0_5 } )") .value(); { HloInstruction* weights = nullptr; ASSERT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Pad(m::Slice(m::GetTupleElement(m::CustomCall( m::Op(), m::Constant(&weights)))), m::Op()))); SetConstantValue<int8_t>( weights, [](absl::Span<const int64_t> dims, int8_t old_val) -> int8_t { if (dims[3] < 6) return 1; return 0; }); } TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get())); EXPECT_TRUE(changed); SCOPED_TRACE(module->ToString()); auto* root = module->entry_computation()->root_instruction(); const HloInstruction* pad = nullptr; ASSERT_THAT(root, GmockMatch(m::Pad(&pad, m::GetTupleElement(m::CustomCall(), 0), m::ConstantScalar(0)))); ExpectOnlyPadsOneDim(3, 1, pad->padding_config()); } TEST_F(CudnnSimplifyPaddingTest, PaddedConstantWeightIsNotLargeEnough) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { conv = (s8[10,10,10,10], u8[0]) custom-call( s8[10,10,10,10] parameter(0), s8[3,3,10,10] constant({...}) ), window={size=3x3}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" conv_result = get-tuple-element(conv), index=0 slice = s8[10,10,10,6] slice(conv_result), slice={[0:10], [0:10], [0:10], [0:6]} ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_0x0_0x0_5 } )") .value(); { HloInstruction* weights = nullptr; ASSERT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Pad(m::Slice(m::GetTupleElement(m::CustomCall( m::Op(), m::Constant(&weights)))), m::Op()))); SetConstantValue<int8_t>( weights, [](absl::Span<const int64_t> dims, int8_t old_val) -> int8_t { if (dims[3] < 5 ) return 0; return 1; }); } TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get())); EXPECT_FALSE(changed); } TEST_F(CudnnSimplifyPaddingTest, ReshapeDoesntMergeVectCDim) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { weights_p = pad(s8[64,60,3,3] parameter(0), s8[] constant(0)), padding=0_0x0_4x0_0x0_0 weights = s8[2,64,3,3,32] reshape(weights_p) conv = (s8[10,2,10,10,32], u8[0]) custom-call( s8[10,2,10,10,32] parameter(1), weights_p ), window={size=3x3}, dim_labels=bf01?_io01?->bf01?, custom_call_target="__cudnn$convForward" conv_result = get-tuple-element(conv), index=0 slice = s8[10,60,10,10] slice(s8[10,64,10,10] reshape(conv_result)), slice={[0:10], [0:60], [0:10], [0:10]} ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_6x0_0x0_0 } )") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get())); EXPECT_FALSE(changed); } TEST_F(CudnnSimplifyPaddingTest, TwoVectCDimsInOutput) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { weights_p = pad(s8[64,60,3,3] parameter(0), s8[] constant(0)), padding=0_0x0_4x0_0x0_0 weights = s8[2,64,3,3,32] reshape(weights_p) conv = (s8[10,2,10,10,4,8], u8[0]) custom-call( s8[10,2,10,10,32] parameter(1), weights ), window={size=3x3}, dim_labels=bf01?_io01?->bf01??, custom_call_target="__cudnn$convForward" conv_result = get-tuple-element(conv), index=0 conv_transposed = s8[10,2,4,8,10,10] transpose(conv_result), dimensions={0,1,4,5,2,3} slice = s8[10,60,10,10] slice(s8[10,64,10,10] reshape(conv_transposed)), slice={[0:10], [0:60], [0:10], [0:10]} ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_6x0_0x0_0 } )") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get())); EXPECT_FALSE(changed); } TEST_F(CudnnSimplifyPaddingTest, TwoVectCDimsInKernel) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { weights_p = pad(s8[64,60,3,3] parameter(0), s8[] constant(0)), padding=0_0x0_4x0_0x0_0 weights = s8[2,64,3,3,4,8] reshape(weights_p) conv = (s8[10,2,10,10,32], u8[0]) custom-call( s8[10,2,10,10,32] parameter(1), weights ), window={size=3x3}, dim_labels=bf01?_io01??->bf01?, custom_call_target="__cudnn$convForward" conv_result = get-tuple-element(conv), index=0 conv_transposed = s8[10,2,32,10,10] transpose(conv_result), dimensions={0,1,4,2,3} slice = s8[10,60,10,10] slice(s8[10,64,10,10] reshape(conv_transposed)), slice={[0:10], [0:60], [0:10], [0:10]} ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_6x0_0x0_0 } )") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get())); EXPECT_FALSE(changed); } TEST_F(CudnnSimplifyPaddingTest, SliceDoesntStartAtBeginning) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { weights = pad(s8[3,3,10,10] parameter(0), s8[] constant(0)), padding=0_0x0_0x0_0x0_4 conv = (s8[10,10,10,10], u8[0]) custom-call( s8[10,10,10,10] parameter(1), weights ), window={size=3x3}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" conv_result = get-tuple-element(conv), index=0 slice = s8[10,9,10,6] slice(conv_result), slice={[0:10], [1:10], [0:10], [0:6]} ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_0x0_0x0_5 } )") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get())); EXPECT_FALSE(changed); } TEST_F(CudnnSimplifyPaddingTest, SliceDoesntStartAtBeginningOfFeatureDim) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { weights = pad(s8[3,3,10,10] parameter(0), s8[] constant(0)), padding=0_0x0_0x0_0x0_4 conv = (s8[10,10,10,10], u8[0]) custom-call( s8[10,10,10,10] parameter(1), weights ), window={size=3x3}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" conv_result = get-tuple-element(conv), index=0 slice = s8[10,10,10,5] slice(conv_result), slice={[0:10], [0:10], [0:10], [1:6]} ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_0x0_0x0_5 } )") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get())); EXPECT_FALSE(changed); } TEST_F(CudnnSimplifyPaddingTest, SliceHasStride) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { weights = pad(s8[3,3,10,10] parameter(0), s8[] constant(0)), padding=0_0x0_0x0_0x0_4 conv = (s8[10,10,10,10], u8[0]) custom-call( s8[10,10,10,10] parameter(1), weights ), window={size=3x3}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" conv_result = get-tuple-element(conv), index=0 slice = s8[10,10,10,3] slice(conv_result), slice={[0:10], [0:10], [0:10], [0:6:2]} ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_0x0_0x0_5 } )") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get())); EXPECT_FALSE(changed); } TEST_F(CudnnSimplifyPaddingTest, PadAddsInteriorPadding) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { weights = pad(s8[3,3,10,10] parameter(0), s8[] constant(0)), padding=0_0x0_0x0_0x0_4 conv = (s8[10,10,10,10], u8[0]) custom-call( s8[10,10,10,10] parameter(1), weights ), window={size=3x3}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" conv_result = get-tuple-element(conv), index=0 slice = s8[10,10,10,6] slice(conv_result), slice={[0:10], [0:10], [0:10], [0:6]} ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_0x0_0x0_5_1 } )") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get())); EXPECT_FALSE(changed); } TEST_F(CudnnSimplifyPaddingTest, SliceMoreElementsThanPad) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { weights = pad(s8[3,3,10,10] parameter(0), s8[] constant(0)), padding=0_0x0_0x0_0x0_4 conv = (s8[10,10,10,10], u8[0]) custom-call( s8[10,10,10,10] parameter(1), weights ), window={size=3x3}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" conv_result = get-tuple-element(conv), index=0 slice = s8[10,10,10,6] slice(conv_result), slice={[0:10], [0:10], [0:10], [0:6]} ROOT pad = pad(slice, s8[] constant(0)), padding=0_0x0_0x0_0x0_2 } )") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get())); EXPECT_TRUE(changed); SCOPED_TRACE(module->ToString()); auto* root = module->entry_computation()->root_instruction(); const HloInstruction* slice = nullptr; ASSERT_THAT(root, GmockMatch(m::Slice( &slice, m::GetTupleElement(m::CustomCall(), 0)))); for (int64_t i = 0; i < slice->shape().dimensions_size(); ++i) { SCOPED_TRACE(i); EXPECT_EQ(slice->slice_starts(i), 0); EXPECT_EQ(slice->slice_strides(i), 1); if (i != 3) { EXPECT_EQ(slice->slice_limits(i), 10); } else { EXPECT_EQ(slice->slice_limits(i), 8); } } } TEST_F(CudnnSimplifyPaddingTest, NoChangeOnNonTrivialConstants) { auto module = ParseAndReturnVerifiedModule(R"( HloModule jit_outer ENTRY main.26 { reshape.2 = f32[1,3,3,12]{3,2,1,0} parameter(0) constant.1 = f32[3,3,1,12]{3,2,1,0} constant({ { { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } } }, { { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } } { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } } }, { { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } } } }) cudnn-conv = (f32[1,5,5,12]{3,2,1,0}, u8[0]{0}) custom-call(reshape.2, constant.1), window={size=3x3 pad=2_2x2_2}, dim_labels=b01f_01io->b01f, feature_group_count=12, custom_call_target="__cudnn$convForward" get-tuple-element = f32[1,5,5,12]{3,2,1,0} get-tuple-element(cudnn-conv), index=0 slice.2 = f32[1,5,1,12]{3,2,1,0} slice(get-tuple-element), slice={[0:1], [0:5], [0:1], [0:12]} constant.0 = f32[] constant(0) ROOT pad.1 = f32[1,5,3,12]{3,2,1,0} pad(slice.2, constant.0), padding=0_0x0_0x2_0x0_0 } )") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get())); EXPECT_FALSE(changed); } TEST_F(CudnnSimplifyPaddingTest, NoChangeOnComplexSlices) { auto module = ParseAndReturnVerifiedModule(R"( HloModule jit_outer ENTRY main.26 { reshape.2 = f32[1,3,3,12]{3,2,1,0} parameter(0) constant.1 = f32[3,3,1,12]{3,2,1,0} constant({ { { { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } } }, { { { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } } { { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } } }, { { { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } } } }) cudnn-conv = (f32[1,5,5,12]{3,2,1,0}, u8[0]{0}) custom-call(reshape.2, constant.1), window={size=3x3 pad=2_2x2_2}, dim_labels=b01f_01io->b01f, feature_group_count=12, custom_call_target="__cudnn$convForward" get-tuple-element = f32[1,5,5,12]{3,2,1,0} get-tuple-element(cudnn-conv), index=0 slice.2 = f32[1,5,5,4]{3,2,1,0} slice(get-tuple-element), slice={[0:1], [0:5], [0:5], [2:6]} constant.0 = f32[] constant(0) ROOT pad.1 = f32[1,5,5,12]{3,2,1,0} pad(slice.2, constant.0), padding=0_0x0_0x0_0x0_8 } )") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get())); EXPECT_FALSE(changed); } TEST_F(CudnnSimplifyPaddingTest, ScanOrderFeatureDimLast) { auto module = ParseAndReturnVerifiedModule(R"( HloModule jit_outer ENTRY main.26 { reshape.2 = f32[1,3,3,12]{3,2,1,0} parameter(0) constant.1 = f32[3,3,1,12]{3,2,1,0} constant({ { { { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } } }, { { { 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0 } }, { { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } } { { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } } }, { { { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } } } }) cudnn-conv = (f32[1,5,5,12]{3,2,1,0}, u8[0]{0}) custom-call(reshape.2, constant.1), window={size=3x3 pad=2_2x2_2}, dim_labels=b01f_01io->b01f, feature_group_count=12, custom_call_target="__cudnn$convForward" get-tuple-element = f32[1,5,5,12]{3,2,1,0} get-tuple-element(cudnn-conv), index=0 slice.2 = f32[1,5,5,6]{3,2,1,0} slice(get-tuple-element), slice={[0:1], [0:5], [0:5], [0:6]} constant.0 = f32[] constant(0) ROOT pad.1 = f32[1,5,5,12]{3,2,1,0} pad(slice.2, constant.0), padding=0_0x0_0x0_0x0_6 } )") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunJustThisPass(module.get())); EXPECT_FALSE(changed); } TEST_F(CudnnSimplifyPaddingTest, Int8FilterReorderedOutputFirst) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { conv.1 = (s8[1,63,80,80], u8[0]) custom-call( s8[1,112,80,80] parameter(0), s8[63,112,3,3] parameter(1)), window={size=3x3}, dim_labels=bf01_oi01->bf01, custom_call_target="__cudnn$convForward" gte.1 = s8[1,63,80,80] get-tuple-element(conv.1), index=0 const.0 = s8[] constant(0) ROOT pad.1 = s8[1,64,80,80] pad(gte.1, const.0), padding=0_0x0_1x0_0x0_0 })") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunEndToEnd({7, 5}, module.get())); EXPECT_TRUE(changed); } TEST_F(CudnnSimplifyPaddingTest, Int8FilterReorderedOutputLast) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { conv.1 = (s8[1,63,80,80], u8[0]) custom-call( s8[1,112,80,80] parameter(0), s8[3,3,112,63] parameter(1)), window={size=3x3}, dim_labels=bf01_01io->bf01, custom_call_target="__cudnn$convForward" gte.1 = s8[1,63,80,80] get-tuple-element(conv.1), index=0 const.0 = s8[] constant(0) ROOT pad.1 = s8[1,64,80,80] pad(gte.1, const.0), padding=0_0x0_1x0_0x0_0 })") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, RunEndToEnd({7, 5}, module.get())); EXPECT_TRUE(changed); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/cudnn_simplify_padding.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/cudnn_simplify_padding_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
6e2f5336-6a76-45d8-b557-76aa0818c7ef
cpp
tensorflow/tensorflow
fusion_block_level_rewriter
third_party/xla/xla/service/gpu/transforms/fusion_block_level_rewriter.cc
third_party/xla/xla/service/gpu/transforms/fusion_block_level_rewriter_test.cc
#include "xla/service/gpu/transforms/fusion_block_level_rewriter.h" #include <string> #include <utility> #include <variant> #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_join.h" #include "absl/strings/string_view.h" #include "mlir/IR/MLIRContext.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/fusions/triton/triton_support.h" #include "xla/service/gpu/hlo_traversal.h" #include "xla/service/gpu/ir_emission_utils.h" #include "xla/service/gpu/model/fusion_analysis_cache.h" #include "xla/service/gpu/model/gpu_indexing_performance_model.h" #include "xla/service/hlo_cost_analysis.h" #include "xla/service/instruction_fusion.h" #include "xla/stream_executor/device_description.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { using ::mlir::MLIRContext; absl::StatusOr<bool> ProcessFusionInstruction( HloFusionInstruction* fusion_instruction, const se::DeviceDescription& device_info, HloCostAnalysis::ShapeSizeFunction shape_size, MLIRContext* ctx) { const HloComputation* fusion_computation = fusion_instruction->fused_instructions_computation(); if (CodegenDecision can_codegen = IsTritonSupportedComputation( *fusion_computation, device_info.gpu_compute_capability()); !can_codegen) { VLOG(2) << "Can't rewrite fusion " << fusion_instruction->ToString() << " because one or more instructions is not supported by Triton: " << can_codegen.Explain(); return false; } TF_ASSIGN_OR_RETURN(auto backend_config, fusion_instruction->backend_config<GpuBackendConfig>()); if (backend_config.has_fusion_backend_config() && backend_config.fusion_backend_config().has_block_level_fusion_config()) { return false; } HloFusionAnalysisCache fusion_analysis_cache(device_info); GpuPerformanceModelWithIndexingAnalysis indexing_performance_model( &device_info, &fusion_analysis_cache, shape_size, ctx); auto fusion_adaptor = HloFusionAdaptor::ForInstruction( Cast<HloFusionInstruction>(fusion_instruction)); TF_ASSIGN_OR_RETURN( TiledRunTimeDataOrError tiled_runtime_data_or_error, indexing_performance_model.TryFindBestTilingForFusion(*fusion_adaptor)); if (const auto* fusion_decision = std::get_if<FusionDecision>(&tiled_runtime_data_or_error)) { VLOG(2) << "Can't rewrite fusion " << fusion_instruction->ToString() << " because tiling search failed. (The most likely cause for " << "is that SymbolicTileAnalysis failed.)"; return false; } TiledRunTimeData tiled_runtime_data = std::get<TiledRunTimeData>(std::move(tiled_runtime_data_or_error)); VLOG(1) << "Found parameters " << absl::StrCat( "sizes=[", absl::StrJoin( tiled_runtime_data.block_level_parameters.output_tile_sizes, ", "), "], num_warps=", tiled_runtime_data.block_level_parameters.num_warps) << " for fusion computation " << fusion_computation->ToString(); *backend_config.mutable_fusion_backend_config() ->mutable_block_level_fusion_config() = tiled_runtime_data.block_level_parameters.ToBlockLevelFusionConfig(); backend_config.mutable_fusion_backend_config()->set_kind( std::string(kTritonFusionKind)); TF_RETURN_IF_ERROR(fusion_instruction->set_backend_config(backend_config)); return true; } } absl::StatusOr<bool> FusionBlockLevelRewriter::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { TF_RETURN_IF_ERROR(EnsureTritonSupportsComputeCapability( device_info_.gpu_compute_capability())); MLIRContext ctx; bool has_changed = false; for (HloComputation* computation : module->MakeComputationSorted(execution_threads)) { if (!computation->IsFusionComputation()) { continue; } TF_ASSIGN_OR_RETURN( bool changed, ProcessFusionInstruction( ::xla::Cast<HloFusionInstruction>(computation->FusionInstruction()), device_info_, shape_size_, &ctx)); has_changed |= changed; } return has_changed; } } }
#include "xla/service/gpu/transforms/fusion_block_level_rewriter.h" #include <cstdint> #include <memory> #include <variant> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/log/check.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "mlir/IR/MLIRContext.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/fusions/triton/triton_support.h" #include "xla/service/gpu/gpu_device_info_for_tests.h" #include "xla/service/gpu/ir_emission_utils.h" #include "xla/service/gpu/model/gpu_hlo_cost_analysis.h" #include "xla/service/gpu/model/symbolic_tile_analysis.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/stream_executor/device_description.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/status_matchers.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { namespace m = ::xla::match; using ::tsl::testing::IsOkAndHolds; GpuHloCostAnalysis::ShapeSizeFunction ShapeSizeBytesFunction() { return [&](const Shape& shape) { constexpr int64_t kPointerSize = 8; return ShapeUtil::ByteSizeOf(shape, kPointerSize); }; } bool HasTritonBlockLevelFusionConfig(const HloInstruction* fusion) { return fusion->opcode() == HloOpcode::kFusion && fusion->has_backend_config() && fusion->backend_config<GpuBackendConfig>().ok() && fusion->backend_config<GpuBackendConfig>() ->fusion_backend_config() .has_block_level_fusion_config() && fusion->backend_config<GpuBackendConfig>() ->fusion_backend_config() .kind() == kTritonFusionKind; } class FusionBlockLevelRewriterTest : public HloTestBase { protected: se::DeviceDescription device_info_{TestGpuDeviceInfo::RTXA6000DeviceInfo( se::CudaComputeCapability::Ampere())}; }; TEST_F(FusionBlockLevelRewriterTest, DoesNotRewriteFusionThatIsAlreadyBlockLevel) { const absl::string_view hlo_text = R"( fusion_computation { ROOT param_0 = f32[10,10] parameter(0) } ENTRY entry { param_0 = f32[10,10] parameter(0) ROOT fusion = f32[10,10] fusion(param_0), kind=kCustom, calls=fusion_computation, backend_config={"fusion_backend_config": {"kind":"__triton", "block_level_fusion_config":{}}} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); EXPECT_THAT(FusionBlockLevelRewriter(device_info_, ShapeSizeBytesFunction()) .Run(module.get()), IsOkAndHolds(false)); } TEST_F(FusionBlockLevelRewriterTest, RewritesFusionThatIsNotBlockLevelAndCanBeTiledAndCodegenedCorrectly) { const absl::string_view hlo_text = R"( fusion_computation { ROOT param_0 = f32[10,10] parameter(0) } ENTRY entry { param_0 = f32[10,10] parameter(0) ROOT fusion = f32[10,10] fusion(param_0), kind=kLoop, calls=fusion_computation })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); EXPECT_THAT(FusionBlockLevelRewriter(device_info_, ShapeSizeBytesFunction()) .Run(module.get()), IsOkAndHolds(true)); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Fusion(m::Parameter()) .WithPredicate(HasTritonBlockLevelFusionConfig))); } TEST_F(FusionBlockLevelRewriterTest, DoesNotRewriteFusionThatIsNotBlockLevelAndCannotBeTiledCorrectly) { const absl::string_view hlo_text = R"( fusion_computation { param_0 = f32[10,10] parameter(0) ROOT bitcast = f32[25,4] bitcast(param_0) } ENTRY entry { param_0 = f32[10,10] parameter(0) ROOT fusion = f32[25,4] fusion(param_0), kind=kLoop, calls=fusion_computation })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); mlir::MLIRContext ctx; ASSERT_FALSE(std::holds_alternative<SymbolicTileAnalysis>( SymbolicTileAnalysis::AnalyzeComputation( *module->GetComputationWithName("fusion_computation"), &ctx))); EXPECT_THAT(FusionBlockLevelRewriter(device_info_, ShapeSizeBytesFunction()) .Run(module.get()), IsOkAndHolds(false)); } TEST_F(FusionBlockLevelRewriterTest, DoesNotRewriteFusionThatIsNotBlockLevelAndCannotBeCodegenedCorrectly) { const absl::string_view hlo_text = R"( fusion_computation { param_0 = f8e4m3fn[10,10] parameter(0) ROOT add = f8e4m3fn[10,10] add(param_0, param_0) } ENTRY entry { param_0 = f8e4m3fn[10,10] parameter(0) ROOT fusion = f8e4m3fn[10,10] fusion(param_0), kind=kLoop, calls=fusion_computation })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); ASSERT_FALSE(IsTritonSupportedComputation( *module->GetComputationWithName("fusion_computation"), device_info_.gpu_compute_capability())); EXPECT_THAT(FusionBlockLevelRewriter(device_info_, ShapeSizeBytesFunction()) .Run(module.get()), IsOkAndHolds(false)); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/fusion_block_level_rewriter.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/fusion_block_level_rewriter_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
fad4df4d-79a0-4439-a363-0b60791c4666
cpp
tensorflow/tensorflow
conv_rewriter
third_party/xla/xla/service/gpu/transforms/conv_rewriter.cc
third_party/xla/xla/service/gpu/transforms/conv_rewriter_test.cc
#include "xla/service/gpu/transforms/conv_rewriter.h" #include <cstdint> #include <cstdlib> #include <memory> #include <numeric> #include <optional> #include <string> #include <string_view> #include <tuple> #include <utility> #include <variant> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_set.h" #include "absl/status/status.h" #include "absl/strings/str_replace.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/permutation_util.h" #include "xla/primitive_util.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/cublas_cudnn.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/stream_executor/device_description.h" #include "xla/util.h" #include "xla/window_util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" #include "tsl/platform/status.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { absl::Status CheckTypes(HloInstruction* conv, const se::GpuComputeCapability cc) { auto valid_shape = [conv, &cc](const Shape& shape) -> absl::Status { PrimitiveType type = shape.element_type(); if (!primitive_util::IsFloatingPointType(type) && !primitive_util::IsIntegralType(type)) { return Unimplemented( "Convolutions must have floating-point or integral operands/outputs, " "but got convolution with type %s: %s", primitive_util::LowercasePrimitiveTypeName(type), conv->ToString()); } if (primitive_util::IsF8Type(type)) { if (type != F8E4M3FN && type != F8E5M2) { return Unimplemented( "The only FP8 types supported in convolutions are f8e5m2 and " "f8e4m3, " "but got convolution with FP8 type %s: %s", primitive_util::LowercasePrimitiveTypeName(type), conv->ToString()); } if (!std::holds_alternative<se::CudaComputeCapability>(cc)) { return Unimplemented( "FP8 convolutions are only supported on CUDA GPUs, but got " "FP8 convolution on ROCm GPU: %s", conv->ToString()); } else if (!std::get<se::CudaComputeCapability>(cc).IsAtLeastHopper()) { return Unimplemented( "FP8 convolutions are only supported on CUDA GPUs with compute " "capability at least 9.0, but got " "FP8 convolution on GPU with compute capability %s: %s", std::get<se::CudaComputeCapability>(cc).ToString(), conv->ToString()); } } return absl::OkStatus(); }; TF_RETURN_IF_ERROR(valid_shape(conv->shape())); TF_RETURN_IF_ERROR(valid_shape(conv->operand(0)->shape())); TF_RETURN_IF_ERROR(valid_shape(conv->operand(1)->shape())); return absl::OkStatus(); } using ConvolutionMatch = std::optional< std::tuple<Window, ConvolutionDimensionNumbers, HloInstruction*>>; bool MaybeConv1dToConv2d(HloInstruction* conv) { if (conv->window().dimensions().size() != 2) { return false; } if (conv->operand(1)->opcode() != HloOpcode::kReshape) { return false; } auto filter = conv->operand(1); std::optional<ShapeUtil::ShapeEqualityDescriptor> reshape_degenerate = filter->ReshapeMerelyInsertsOrDeletes1SizedDimensions(); if (reshape_degenerate.has_value() && reshape_degenerate->deleted_dimensions.empty() && reshape_degenerate->inserted_dimensions.size() == 1) { const auto& dnums = conv->convolution_dimension_numbers(); for (auto dim : dnums.kernel_spatial_dimensions()) { if (dim == reshape_degenerate->inserted_dimensions[0]) { return true; } } } return false; } bool CanImplementAsGpuForwardConv(HloInstruction* conv) { const ConvolutionDimensionNumbers& dnums = conv->convolution_dimension_numbers(); if (dnums.input_spatial_dimensions_size() > 3) { return false; } if (ShapeUtil::IsZeroElementArray(conv->operand(0)->shape()) || ShapeUtil::IsZeroElementArray(conv->operand(1)->shape())) { return false; } if (dnums.input_spatial_dimensions_size() == 2 ? !window_util::AllOrNoneReversed(conv->window()) : window_util::HasWindowReversal(conv->window())) { return false; } return true; } ConvolutionMatch MatchBackwardFilter(HloInstruction* conv) { VLOG(2) << "Trying to match convolution backward filter."; if (conv->feature_group_count() > 1) { VLOG(1) << conv->ToString() << " is a forward convolution. All grouped backward filters are " "mapped to batch grouped convolutions in tf2xla bridge. Hence " "backward filter " "convolutions cannot have feature groups greater than 1 at this " "point. No need to fold to backward filter."; return std::nullopt; } CHECK_EQ(HloOpcode::kConvolution, conv->opcode()); const ConvolutionDimensionNumbers& conv_dnums = conv->convolution_dimension_numbers(); auto input_batch_dim = conv_dnums.input_batch_dimension(); auto input_feature_dim = conv_dnums.input_feature_dimension(); auto input_spatial_dims = conv_dnums.input_spatial_dimensions(); auto kernel_input_feature_dim = conv_dnums.kernel_input_feature_dimension(); auto kernel_output_feature_dim = conv_dnums.kernel_output_feature_dimension(); auto kernel_spatial_dims = conv_dnums.kernel_spatial_dimensions(); auto output_batch_dim = conv_dnums.output_batch_dimension(); auto output_feature_dim = conv_dnums.output_feature_dimension(); auto output_spatial_dims = conv_dnums.output_spatial_dimensions(); for (const WindowDimension& window_dim : conv->window().dimensions()) { if (window_dim.stride() != 1) { VLOG(1) << "Forward convolution's window " << conv->window().ShortDebugString() << " should have stride of 1."; return std::nullopt; } if (window_dim.base_dilation() != 1) { VLOG(1) << "Forward convolution's window " << conv->window().ShortDebugString() << " should have no base (LHS) dilation."; return std::nullopt; } if (window_dim.padding_low() < 0) { VLOG(1) << "Padding low should be non-negative."; return std::nullopt; } if (window_dim.window_reversal()) { VLOG(1) << "Window reversal field not supported"; return std::nullopt; } } int small_kernel_dimension_num = 0; for (int i = 0; i < kernel_spatial_dims.size(); ++i) { if (conv->operand(1)->shape().dimensions(kernel_spatial_dims[i]) <= conv->shape().dimensions(output_spatial_dims[i])) { small_kernel_dimension_num += 1; } } if ((kernel_spatial_dims.empty() || small_kernel_dimension_num > 1 || (!MaybeConv1dToConv2d(conv) && small_kernel_dimension_num == 1)) && !window_util::HasWindowDilation(conv->window())) { VLOG(1) << conv->ToString() << " is a regular forward convolution. No need " "to fold it to a backward filter convolution...."; return std::nullopt; } Window backward_conv_window; for (int i = 0; i < input_spatial_dims.size(); ++i) { WindowDimension* dim = backward_conv_window.add_dimensions(); int64_t filter_size = conv->shape().dimensions(output_spatial_dims[i]); dim->set_size(filter_size); dim->set_stride(conv->window().dimensions(i).window_dilation()); dim->set_padding_low(conv->window().dimensions(i).padding_low()); dim->set_base_dilation(1); dim->set_window_dilation(1); int64_t input_size = conv->operand(0)->shape().dimensions(input_spatial_dims[i]); int64_t output_size = conv->window().dimensions(i).size(); int64_t padded_input_size = filter_size + (output_size - 1) * dim->stride(); int64_t min_padding_high = padded_input_size - input_size - dim->padding_low(); int64_t max_padding_high = min_padding_high + dim->stride() - 1; CHECK_GE(dim->padding_low(), 0); if (dim->padding_low() >= min_padding_high && dim->padding_low() <= max_padding_high) { dim->set_padding_high(dim->padding_low()); } else { if (dim->padding_low() < min_padding_high) { dim->set_padding_high(min_padding_high); } else { dim->set_padding_high(max_padding_high); } } if (dim->padding_high() < 0) { LOG(WARNING) << "Fusing this pattern to backward filter convolution would cause " "negative padding (" << dim->padding_high() << ") on right/bottom of the weight gradients, which is not " "supported by GpuConvPaddingLegalization (b/32744257). " "Falling back to " "unfused convolution for instruction: " << conv->ToString(); return std::nullopt; } } ConvolutionDimensionNumbers backward_conv_dnums; backward_conv_dnums.set_input_batch_dimension(input_feature_dim); backward_conv_dnums.set_input_feature_dimension(input_batch_dim); for (int i = 0; i < input_spatial_dims.size(); ++i) { backward_conv_dnums.add_input_spatial_dimensions(input_spatial_dims[i]); } backward_conv_dnums.set_output_batch_dimension(kernel_input_feature_dim); backward_conv_dnums.set_output_feature_dimension(kernel_output_feature_dim); for (int i = 0; i < kernel_spatial_dims.size(); ++i) { backward_conv_dnums.add_output_spatial_dimensions(kernel_spatial_dims[i]); } backward_conv_dnums.set_kernel_input_feature_dimension(output_batch_dim); backward_conv_dnums.set_kernel_output_feature_dimension(output_feature_dim); for (int i = 0; i < output_spatial_dims.size(); ++i) { backward_conv_dnums.add_kernel_spatial_dimensions(output_spatial_dims[i]); } HloInstruction* lhs = conv->mutable_operand(0); return std::make_tuple(backward_conv_window, backward_conv_dnums, lhs); } ConvolutionMatch MatchBackwardInput(HloInstruction* conv) { VLOG(2) << "Trying to match convolution backward input."; if (conv->feature_group_count() > 1) { return std::nullopt; } CHECK_EQ(HloOpcode::kConvolution, conv->opcode()); HloInstruction* reverse_filter = conv->mutable_operand(1); ConvolutionDimensionNumbers dnums = conv->convolution_dimension_numbers(); auto kernel_out_feature_dim = dnums.kernel_output_feature_dimension(); auto kernel_out_features = reverse_filter->shape().dimensions(kernel_out_feature_dim); if (conv->feature_group_count() > 1 && kernel_out_features == conv->feature_group_count()) { return std::nullopt; } bool is_reversed_filter = reverse_filter->opcode() == HloOpcode::kReverse && absl::c_is_permutation(dnums.kernel_spatial_dimensions(), reverse_filter->dimensions()); bool is_reversed_conv1d_filter = MaybeConv1dToConv2d(conv) && reverse_filter->operand(0)->opcode() == HloOpcode::kReverse; bool is_1x1_filter = absl::c_all_of(conv->window().dimensions(), [](const WindowDimension& d) { return d.size() == 1; }); if (!is_reversed_filter && !is_reversed_conv1d_filter && !(window_util::HasBaseDilation(conv->window()) && (reverse_filter->IsConstant() || is_1x1_filter))) { VLOG(1) << "Can't match to backwards convolution. Either filter is not " "kReverse, or it's not a base-dilated conv with a 1x1 or " "constant filter."; return std::nullopt; } for (const WindowDimension& window_dim : conv->window().dimensions()) { if (window_dim.stride() != 1) { VLOG(1) << "Forward convolution's window " << conv->window().ShortDebugString() << " should have stride of 1."; return std::nullopt; } if (window_dim.window_dilation() != 1) { VLOG(1) << "Forward convolution's window " << conv->window().ShortDebugString() << " should have no window dilation."; return std::nullopt; } if (window_dim.window_reversal()) { VLOG(1) << "Window reversal field not supported"; return std::nullopt; } } const auto& input_spatial_dims = dnums.input_spatial_dimensions(); const auto& output_spatial_dims = dnums.output_spatial_dimensions(); CHECK_EQ(conv->window().dimensions().size(), input_spatial_dims.size()); CHECK_EQ(output_spatial_dims.size(), input_spatial_dims.size()); const Window& old_window = conv->window(); Window new_window = old_window; for (size_t i = 0; i < input_spatial_dims.size(); ++i) { auto dim = new_window.mutable_dimensions(i); dim->set_stride(old_window.dimensions(i).base_dilation()); dim->set_base_dilation(1); auto kernel_size = old_window.dimensions(i).size(); auto backward_padding_low = kernel_size - 1 - old_window.dimensions(i).padding_low(); if (backward_padding_low < 0) { LOG(WARNING) << "The low padding of the backward convolution would be negative (" << backward_padding_low << "), which isn't supported by GpuConvPaddingLegalization " "for now (b/32744257)."; return std::nullopt; } dim->set_padding_low(backward_padding_low); auto unpadded_input_size = conv->shape().dimensions(output_spatial_dims[i]); auto output_size = conv->operand(0)->shape().dimensions(input_spatial_dims[i]); auto padded_input_size = kernel_size + dim->stride() * (output_size - 1); auto total_pad_size = padded_input_size - unpadded_input_size; auto min_padding_high = total_pad_size - backward_padding_low; auto max_padding_high = min_padding_high + dim->stride() - 1; if (backward_padding_low >= min_padding_high && backward_padding_low <= max_padding_high) { dim->set_padding_high(backward_padding_low); } else { if (backward_padding_low < min_padding_high) { dim->set_padding_high(min_padding_high); } else { dim->set_padding_high(max_padding_high); } } if (dim->padding_high() < 0) { LOG(WARNING) << "Fusing this pattern to backward convolution would cause " "negative padding (" << dim->padding_high() << ") on right/bottom of the activations, which is not " "supported by GpuConvPaddingLegalization (b/32744257). " "Falling back to unfused convolution for instruction: " << conv->ToString(); return std::nullopt; } } auto conv_dnums = conv->convolution_dimension_numbers(); dnums.set_kernel_input_feature_dimension( conv_dnums.kernel_output_feature_dimension()); dnums.set_kernel_output_feature_dimension( conv_dnums.kernel_input_feature_dimension()); for (int i = 0; i < input_spatial_dims.size(); ++i) { dnums.set_input_spatial_dimensions(i, conv_dnums.output_spatial_dimensions(i)); dnums.set_output_spatial_dimensions(i, conv_dnums.input_spatial_dimensions(i)); } dnums.set_input_feature_dimension(conv_dnums.output_feature_dimension()); dnums.set_input_batch_dimension(conv_dnums.output_batch_dimension()); dnums.set_output_feature_dimension(conv_dnums.input_feature_dimension()); dnums.set_output_batch_dimension(conv_dnums.input_batch_dimension()); if (reverse_filter->opcode() != HloOpcode::kReverse && reverse_filter->IsConstant()) { HloComputation* c = conv->parent(); reverse_filter = c->AddInstruction( HloInstruction::CreateReverse(reverse_filter->shape(), reverse_filter, dnums.kernel_spatial_dimensions())); reverse_filter = c->AddInstruction( HloInstruction::CreateReverse(reverse_filter->shape(), reverse_filter, dnums.kernel_spatial_dimensions())); TF_CHECK_OK(conv->ReplaceOperandWith(1, reverse_filter)); } HloInstruction* rhs = reverse_filter; if (rhs->opcode() == HloOpcode::kReverse) { rhs = rhs->mutable_operand(0); } else if (is_reversed_conv1d_filter) { auto src = rhs->mutable_operand(0)->mutable_operand(0); rhs = conv->parent()->AddInstruction( HloInstruction::CreateReshape(rhs->shape(), src)); } if (conv->feature_group_count() == 1) { return std::make_tuple(new_window, dnums, rhs); } int64_t input_feature_dimension = dnums.kernel_input_feature_dimension(); int64_t output_feature_dimension = dnums.kernel_output_feature_dimension(); if (std::abs(input_feature_dimension - output_feature_dimension) != 1) { return std::nullopt; } int64_t input_features = rhs->shape().dimensions(input_feature_dimension); int64_t output_features = rhs->shape().dimensions(output_feature_dimension); std::vector<int64_t> reshape_dims = SpanToVector(rhs->shape().dimensions()); auto num_groups = conv->feature_group_count(); CHECK_EQ(input_features % num_groups, 0) << "Input feature count should be an exact multiple of feature group " "count"; reshape_dims[input_feature_dimension] = reshape_dims[input_feature_dimension] / num_groups; reshape_dims.insert(reshape_dims.begin() + input_feature_dimension, num_groups); HloComputation* c = conv->parent(); rhs = c->AddInstruction(HloInstruction::CreateReshape( ShapeUtil::MakeShape(rhs->shape().element_type(), reshape_dims), rhs)); std::vector<int64_t> transpose_dims(rhs->shape().dimensions_size()); std::iota(transpose_dims.begin(), transpose_dims.end(), 0); transpose_dims.erase(transpose_dims.begin() + input_feature_dimension); transpose_dims.insert(transpose_dims.begin() + output_feature_dimension, input_feature_dimension); std::vector<int64_t> transpose_reshape_dims = SpanToVector(rhs->shape().dimensions()); transpose_reshape_dims.erase(transpose_reshape_dims.begin() + input_feature_dimension); transpose_reshape_dims.insert( transpose_reshape_dims.begin() + output_feature_dimension, num_groups); rhs = c->AddInstruction(HloInstruction::CreateTranspose( ShapeUtil::MakeShape(rhs->shape().element_type(), transpose_reshape_dims), rhs, transpose_dims)); Shape new_shape = rhs->shape(); new_shape.DeleteDimension(output_feature_dimension); new_shape.set_dimensions(output_feature_dimension, output_features * num_groups); rhs = c->AddInstruction(HloInstruction::CreateReshape(new_shape, rhs)); return std::make_tuple(new_window, dnums, rhs); } HloInstruction* CreateGpuConv(absl::string_view call_target, const Shape& shape, HloInstruction* lhs, HloInstruction* rhs, const Window& window, const ConvolutionDimensionNumbers& dnums, int64_t feature_group_count, const PrecisionConfig& precision_config, const OpMetadata& metadata) { HloComputation* computation = lhs->parent(); Shape call_shape = ShapeUtil::MakeTupleShape({shape, ShapeUtil::MakeShape(U8, {0})}); HloInstruction* custom_call = computation->AddInstruction( HloInstruction::CreateCustomCall(call_shape, {lhs, rhs}, call_target)); custom_call->set_window(window); custom_call->set_convolution_dimension_numbers(dnums); custom_call->set_feature_group_count(feature_group_count); *custom_call->mutable_precision_config() = precision_config; custom_call->set_metadata(metadata); std::optional<std::string> name; if (call_target == kCudnnConvForwardCallTarget) { name = "cudnn-conv"; } else if (call_target == kCudnnConvBackwardInputCallTarget) { name = "cudnn-conv-bw-input"; } else if (call_target == kCudnnConvBackwardFilterCallTarget) { name = "cudnn-conv-bw-filter"; } else if (call_target == kCudnnConvBiasActivationForwardCallTarget) { name = "cudnn-conv-bias-activation"; } if (name.has_value()) { computation->parent()->SetAndUniquifyInstrName(custom_call, *name); } return custom_call; } HloInstruction* ConvertBatchGroupedToFeatureGroupedConvolution( HloInstruction* conv) { CHECK_EQ(conv->feature_group_count(), 1); int64_t num_groups = conv->batch_group_count(); auto dim_numbers = conv->convolution_dimension_numbers(); auto lhs = conv->mutable_operand(0); auto rhs = conv->mutable_operand(1); int64_t input_batch_dimension = dim_numbers.input_batch_dimension(); Shape output_shape = conv->shape(); int64_t input_feature_dimension = dim_numbers.input_feature_dimension(); int64_t input_feature = lhs->shape().dimensions(input_feature_dimension); HloComputation* computation = lhs->parent(); auto add = [&](std::unique_ptr<HloInstruction> inst) { return computation->AddInstruction(std::move(inst)); }; std::vector<int64_t> reshape_dims = SpanToVector(lhs->shape().dimensions()); reshape_dims[input_batch_dimension] = reshape_dims[input_batch_dimension] / num_groups; reshape_dims.insert(reshape_dims.begin() + input_batch_dimension, num_groups); lhs = add(HloInstruction::CreateReshape( ShapeUtil::MakeShape(lhs->shape().element_type(), reshape_dims), lhs)); std::vector<int64_t> transpose_dims(lhs->shape().dimensions_size()); std::iota(transpose_dims.begin(), transpose_dims.end(), 0); transpose_dims.erase(transpose_dims.begin() + input_batch_dimension); transpose_dims.insert(transpose_dims.begin() + input_feature_dimension, input_batch_dimension); std::vector<int64_t> transpose_reshape_dims = ComposePermutations(lhs->shape().dimensions(), transpose_dims); lhs = add(HloInstruction::CreateTranspose( ShapeUtil::MakeShape(lhs->shape().element_type(), transpose_reshape_dims), lhs, transpose_dims)); Shape new_shape = lhs->shape(); new_shape.DeleteDimension(input_feature_dimension); new_shape.set_dimensions(input_feature_dimension, input_feature * num_groups); lhs = add(HloInstruction::CreateReshape(new_shape, lhs)); std::vector<HloInstruction*> new_operands = {lhs, rhs}; auto new_conv = conv->CloneWithNewOperands(output_shape, new_operands); new_conv->set_feature_group_count(num_groups); new_conv->set_batch_group_count(1); new_conv->set_convolution_dimension_numbers(dim_numbers); return computation->AddInstruction(std::move(new_conv)); } CudnnConvBackendConfig GetDefaultBackendConfig() { CudnnConvBackendConfig config; config.set_conv_result_scale(1); return config; } static absl::StatusOr<HloInstruction*> CreateCustomCallHelper( HloInstruction* conv, const se::GpuComputeCapability& cc) { TF_RETURN_IF_ERROR(CheckTypes(conv, cc)); if (ConvolutionMatch m = MatchBackwardInput(conv)) { auto& [window, dnums, rhs] = *m; return CreateGpuConv(kCudnnConvBackwardInputCallTarget, conv->shape(), conv->mutable_operand(0), rhs, window, dnums, conv->feature_group_count(), conv->precision_config(), conv->metadata()); } if (ConvolutionMatch m = MatchBackwardFilter(conv)) { auto& [window, dnums, lhs] = *m; return CreateGpuConv(kCudnnConvBackwardFilterCallTarget, conv->shape(), lhs, conv->mutable_operand(1), window, dnums, conv->batch_group_count(), conv->precision_config(), conv->metadata()); } if (CanImplementAsGpuForwardConv(conv)) { if (conv->batch_group_count() > 1) { conv = ConvertBatchGroupedToFeatureGroupedConvolution(conv); } return CreateGpuConv(kCudnnConvForwardCallTarget, conv->shape(), conv->mutable_operand(0), conv->mutable_operand(1), conv->window(), conv->convolution_dimension_numbers(), conv->feature_group_count(), conv->precision_config(), conv->metadata()); } return nullptr; } absl::StatusOr<bool> RunOnInstruction(HloInstruction* conv, const se::GpuComputeCapability& cc) { CHECK_EQ(conv->opcode(), HloOpcode::kConvolution); TF_ASSIGN_OR_RETURN(HloInstruction * custom_call, CreateCustomCallHelper(conv, cc)); if (custom_call == nullptr) { return false; } GpuBackendConfig gpu_backend_config; *gpu_backend_config.mutable_cudnn_conv_backend_config() = GetDefaultBackendConfig(); TF_RETURN_IF_ERROR(custom_call->set_backend_config(gpu_backend_config)); VLOG(1) << "Replacing convolution " << conv->ToString() << " with " << custom_call->ToString(); TF_RETURN_IF_ERROR(conv->parent()->ReplaceWithNewInstruction( conv, HloInstruction::CreateGetTupleElement(conv->shape(), custom_call, 0))); return true; } absl::StatusOr<bool> RunOnComputation(HloComputation* computation, const se::GpuComputeCapability& cc) { std::vector<HloInstruction*> convs; for (auto* hlo : computation->instructions()) { if (hlo->opcode() == HloOpcode::kConvolution) { convs.push_back(hlo); } } bool changed = false; for (HloInstruction* conv : convs) { TF_ASSIGN_OR_RETURN(bool result, RunOnInstruction(conv, cc)); changed |= result; } return changed; } } absl::StatusOr<bool> ConvRewriter::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { XLA_VLOG_LINES(2, "ConvRewriter::Run(), before:\n" + module->ToString()); bool changed = false; for (HloComputation* computation : module->MakeNonfusionComputations(execution_threads)) { TF_ASSIGN_OR_RETURN(bool result, RunOnComputation(computation, compute_capability_)); changed |= result; } XLA_VLOG_LINES(2, "ConvRewriter::Run(), after:\n" + module->ToString()); return changed; } bool ConvRewriter::ConvIsLowerable(HloInstruction* conv) { return CanImplementAsGpuForwardConv(conv) || MatchBackwardFilter(conv) || MatchBackwardInput(conv); } } }
#include "xla/service/gpu/transforms/conv_rewriter.h" #include <optional> #include <string> #include "absl/log/check.h" #include "absl/strings/str_format.h" #include "xla/array4d.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/literal_util.h" #include "xla/protobuf_util.h" #include "xla/service/gpu/cublas_cudnn.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/service/shape_inference.h" #include "xla/shape_util.h" #include "xla/stream_executor/device_description.h" #include "xla/test.h" #include "xla/test_helpers.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/status_matchers.h" #include "tsl/platform/statusor.h" #include "tsl/platform/test.h" namespace xla { namespace gpu { namespace { namespace m = ::xla::match; class ConvRewriterTest : public HloTestBase { public: ConvRewriterTest() : HloTestBase(true, false) { for (int i = 0; i < 2; ++i) { WindowDimension* window_dim = default_conv_window_.add_dimensions(); window_dim->set_size(1); window_dim->set_stride(1); window_dim->set_padding_low(0); window_dim->set_padding_high(0); window_dim->set_window_dilation(1); window_dim->set_base_dilation(1); } tf_default_dnums_for_backward_filter_.set_input_batch_dimension(3); tf_default_dnums_for_backward_filter_.set_input_feature_dimension(0); tf_default_dnums_for_backward_filter_.add_input_spatial_dimensions(1); tf_default_dnums_for_backward_filter_.add_input_spatial_dimensions(2); tf_default_dnums_for_backward_filter_.set_kernel_input_feature_dimension(0); tf_default_dnums_for_backward_filter_.set_kernel_output_feature_dimension( 3); tf_default_dnums_for_backward_filter_.add_kernel_spatial_dimensions(1); tf_default_dnums_for_backward_filter_.add_kernel_spatial_dimensions(2); tf_default_dnums_for_backward_filter_.add_output_spatial_dimensions(0); tf_default_dnums_for_backward_filter_.add_output_spatial_dimensions(1); tf_default_dnums_for_backward_filter_.set_output_batch_dimension(2); tf_default_dnums_for_backward_filter_.set_output_feature_dimension(3); tf_default_dnums_for_backward_input_.set_input_batch_dimension(0); tf_default_dnums_for_backward_input_.set_output_batch_dimension(0); tf_default_dnums_for_backward_input_.set_input_feature_dimension(3); tf_default_dnums_for_backward_input_.set_output_feature_dimension(3); tf_default_dnums_for_backward_input_.add_input_spatial_dimensions(1); tf_default_dnums_for_backward_input_.add_output_spatial_dimensions(1); tf_default_dnums_for_backward_input_.add_input_spatial_dimensions(2); tf_default_dnums_for_backward_input_.add_output_spatial_dimensions(2); tf_default_dnums_for_backward_input_.set_kernel_input_feature_dimension(3); tf_default_dnums_for_backward_input_.set_kernel_output_feature_dimension(2); tf_default_dnums_for_backward_input_.add_kernel_spatial_dimensions(0); tf_default_dnums_for_backward_input_.add_kernel_spatial_dimensions(1); } protected: const se::GpuComputeCapability& GetComputeCapability() { return backend() .default_stream_executor() ->GetDeviceDescription() .gpu_compute_capability(); } bool RunPass(HloModule* module) { return ConvRewriter(GetComputeCapability()).Run(module).value(); } Window default_conv_window_; ConvolutionDimensionNumbers tf_default_dnums_for_backward_filter_; ConvolutionDimensionNumbers tf_default_dnums_for_backward_input_; }; TEST_F(ConvRewriterTest, BackwardFilterConvolve) { HloComputation::Builder builder(TestName()); HloInstruction* activations = builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(F32, {1, 1, 3, 1}), "activations")); HloInstruction* gradients = builder.AddInstruction(HloInstruction::CreateParameter( 1, ShapeUtil::MakeShape(F32, {1, 1, 2, 1}), "gradients")); Window conv_window = default_conv_window_; conv_window.mutable_dimensions(1)->set_size(2); conv_window.mutable_dimensions(1)->set_window_dilation(2); auto* conv = builder.AddInstruction(HloInstruction::CreateConvolve( ShapeInference::InferConvolveShape( activations->shape(), gradients->shape(), 1, 1, conv_window, tf_default_dnums_for_backward_filter_, std::nullopt) .value(), activations, gradients, 1, 1, conv_window, tf_default_dnums_for_backward_filter_, DefaultPrecisionConfig(2))); OpMetadata metadata; metadata.set_op_name("foo"); conv->set_metadata(metadata); auto module = CreateNewVerifiedModule(); HloComputation* entry_computation = module->AddEntryComputation(builder.Build()); EXPECT_TRUE(RunPass(module.get())); ASSERT_THAT(entry_computation->root_instruction(), GmockMatch(m::GetTupleElement( m::CustomCall({kCudnnConvBackwardFilterCallTarget}), 0))); const auto& md_after_opt = entry_computation->root_instruction()->operand(0)->metadata(); EXPECT_TRUE(protobuf_util::ProtobufEquals(md_after_opt, metadata)) << md_after_opt.DebugString() << " vs " << metadata.DebugString(); } TEST_F(ConvRewriterTest, BackwardFilterConvolveEquivalentToForwardConvolution) { HloComputation::Builder builder(TestName()); HloInstruction* activations = builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(F32, {1, 1, 3, 1}), "activations")); HloInstruction* gradients = builder.AddInstruction(HloInstruction::CreateParameter( 1, ShapeUtil::MakeShape(F32, {1, 1, 3, 1}), "gradients")); Window conv_window = default_conv_window_; conv_window.mutable_dimensions(1)->set_size(3); builder.AddInstruction(HloInstruction::CreateConvolve( ShapeInference::InferConvolveShape( activations->shape(), gradients->shape(), 1, 1, conv_window, tf_default_dnums_for_backward_filter_, std::nullopt) .value(), activations, gradients, 1, 1, conv_window, tf_default_dnums_for_backward_filter_, DefaultPrecisionConfig(2))); auto module = CreateNewVerifiedModule(); HloComputation* entry_computation = module->AddEntryComputation(builder.Build()); EXPECT_TRUE(RunPass(module.get())); EXPECT_THAT(entry_computation->root_instruction(), GmockMatch(m::GetTupleElement( m::CustomCall({kCudnnConvForwardCallTarget}), 0))); } TEST_F(ConvRewriterTest, BackwardFilterConvolveWithPaddedActivations) { auto builder = HloComputation::Builder(TestName()); HloInstruction* activations = builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(F32, {20, 35, 35, 32}), "activations")); HloInstruction* gradients = builder.AddInstruction(HloInstruction::CreateParameter( 1, ShapeUtil::MakeShape(F32, {20, 35, 35, 32}), "gradients")); Window conv_window = default_conv_window_; for (int i = 0; i < 2; ++i) { conv_window.mutable_dimensions(i)->set_size(35); conv_window.mutable_dimensions(i)->set_padding_low(1); conv_window.mutable_dimensions(i)->set_padding_high(1); } builder.AddInstruction(HloInstruction::CreateConvolve( ShapeUtil::MakeShape(F32, {32, 3, 3, 32}), activations, gradients, 1, 1, conv_window, tf_default_dnums_for_backward_filter_, DefaultPrecisionConfig(2))); auto module = CreateNewVerifiedModule(); HloComputation* entry_computation = module->AddEntryComputation(builder.Build()); EXPECT_TRUE(RunPass(module.get())); EXPECT_THAT(entry_computation->root_instruction(), GmockMatch(m::GetTupleElement( m::CustomCall({kCudnnConvBackwardFilterCallTarget}), 0))); } TEST_F(ConvRewriterTest, BackwardFilterConvolveWithPaddedGradients) { auto builder = HloComputation::Builder(TestName()); HloInstruction* activations = builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(F32, {20, 10, 10, 192}), "activations")); HloInstruction* gradients = builder.AddInstruction(HloInstruction::CreateParameter( 1, ShapeUtil::MakeShape(F32, {20, 4, 4, 320}), "gradients")); Window conv_window = default_conv_window_; for (int i = 0; i < 2; ++i) { conv_window.mutable_dimensions(i)->set_size(4); conv_window.mutable_dimensions(i)->set_padding_high(-1); conv_window.mutable_dimensions(i)->set_window_dilation(2); } builder.AddInstruction(HloInstruction::CreateConvolve( ShapeUtil::MakeShape(F32, {320, 3, 3, 192}), activations, gradients, 1, 1, conv_window, tf_default_dnums_for_backward_filter_, DefaultPrecisionConfig(2))); auto module = CreateNewVerifiedModule(); HloComputation* entry_computation = module->AddEntryComputation(builder.Build()); EXPECT_TRUE(RunPass(module.get())); EXPECT_THAT(entry_computation->root_instruction(), GmockMatch(m::GetTupleElement( m::CustomCall({kCudnnConvBackwardFilterCallTarget}), 0))); } TEST_F(ConvRewriterTest, BackwardFilterConvolveWithUnevenPadding) { auto builder = HloComputation::Builder(TestName()); HloInstruction* activations = builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(F32, {20, 35, 35, 32}), "activations")); HloInstruction* gradients = builder.AddInstruction(HloInstruction::CreateParameter( 1, ShapeUtil::MakeShape(F32, {20, 35, 35, 32}), "gradients")); Window conv_window = default_conv_window_; for (int i = 0; i < 2; ++i) { conv_window.mutable_dimensions(i)->set_size(35); conv_window.mutable_dimensions(i)->set_padding_high(1); } builder.AddInstruction(HloInstruction::CreateConvolve( ShapeUtil::MakeShape(F32, {32, 2, 2, 32}), activations, gradients, 1, 1, conv_window, tf_default_dnums_for_backward_filter_, DefaultPrecisionConfig(2))); auto module = CreateNewVerifiedModule(); HloComputation* entry_computation = module->AddEntryComputation(builder.Build()); EXPECT_TRUE(RunPass(module.get())); EXPECT_THAT(entry_computation->root_instruction(), GmockMatch(m::GetTupleElement( m::CustomCall({kCudnnConvBackwardFilterCallTarget}), 0))); } TEST_F(ConvRewriterTest, BackwardInputConvolveEvenPadding) { auto builder = HloComputation::Builder(TestName()); HloInstruction* output = builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(F32, {4, 5, 16, 16}), "output")); HloInstruction* kernel = builder.AddInstruction(HloInstruction::CreateParameter( 1, ShapeUtil::MakeShape(F32, {5, 3, 7, 7}), "kernel")); HloInstruction* reverse_kernel = builder.AddInstruction( HloInstruction::CreateReverse(kernel->shape(), kernel, {2, 3})); Window conv_window = default_conv_window_; for (int i = 0; i < 2; ++i) { conv_window.mutable_dimensions(i)->set_size(7); conv_window.mutable_dimensions(i)->set_padding_low(3); conv_window.mutable_dimensions(i)->set_padding_high(3); } ConvolutionDimensionNumbers conv_dnums; conv_dnums.set_input_batch_dimension(0); conv_dnums.set_output_batch_dimension(0); conv_dnums.set_input_feature_dimension(1); conv_dnums.set_output_feature_dimension(1); conv_dnums.add_input_spatial_dimensions(2); conv_dnums.add_output_spatial_dimensions(2); conv_dnums.add_input_spatial_dimensions(3); conv_dnums.add_output_spatial_dimensions(3); conv_dnums.set_kernel_input_feature_dimension(0); conv_dnums.set_kernel_output_feature_dimension(1); conv_dnums.add_kernel_spatial_dimensions(2); conv_dnums.add_kernel_spatial_dimensions(3); HloInstruction* conv = builder.AddInstruction(HloInstruction::CreateConvolve( ShapeUtil::MakeShape(F32, {4, 3, 16, 16}), output, reverse_kernel, 1, 1, conv_window, conv_dnums, DefaultPrecisionConfig(2))); CHECK(ShapeUtil::Compatible( conv->shape(), ShapeInference::InferConvolveShape( output->shape(), reverse_kernel->shape(), 1, 1, conv_window, conv_dnums, std::nullopt) .value())); auto module = CreateNewVerifiedModule(); HloComputation* entry_computation = module->AddEntryComputation(builder.Build()); EXPECT_TRUE(RunPass(module.get())); ASSERT_THAT(entry_computation->root_instruction(), GmockMatch(m::GetTupleElement( m::CustomCall({kCudnnConvBackwardInputCallTarget}), 0))); const HloInstruction* custom_call = entry_computation->root_instruction()->operand(0); for (int i = 0; i < 2; ++i) { const WindowDimension& window_dim = custom_call->window().dimensions(i); EXPECT_EQ(3, window_dim.padding_low()); EXPECT_EQ(3, window_dim.padding_high()); EXPECT_EQ(1, window_dim.stride()); EXPECT_EQ(1, window_dim.base_dilation()); } } TEST_F(ConvRewriterTest, BackwardInputConvolve1x1Filter) { auto builder = HloComputation::Builder(TestName()); HloInstruction* output = builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(F32, {1, 1, 3, 1}), "output")); HloInstruction* kernel = builder.AddInstruction(HloInstruction::CreateParameter( 1, ShapeUtil::MakeShape(F32, {1, 1, 1, 1}), "kernel")); Window conv_window = default_conv_window_; conv_window.mutable_dimensions(1)->set_base_dilation(2); builder.AddInstruction(HloInstruction::CreateConvolve( ShapeInference::InferConvolveShape( output->shape(), kernel->shape(), 1, 1, conv_window, tf_default_dnums_for_backward_input_, std::nullopt) .value(), output, kernel, 1, 1, conv_window, tf_default_dnums_for_backward_input_, DefaultPrecisionConfig(2))); auto module = CreateNewVerifiedModule(); HloComputation* entry_computation = module->AddEntryComputation(builder.Build()); EXPECT_TRUE(RunPass(module.get())); EXPECT_THAT(entry_computation->root_instruction(), GmockMatch(m::GetTupleElement( m::CustomCall({kCudnnConvBackwardInputCallTarget}), 0))); } TEST_F(ConvRewriterTest, BackwardInputConvolve1x1FilterEquivalentToForwardConvolve) { auto builder = HloComputation::Builder(TestName()); HloInstruction* output = builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(F32, {1, 1, 3, 1}), "output")); HloInstruction* kernel = builder.AddInstruction(HloInstruction::CreateParameter( 1, ShapeUtil::MakeShape(F32, {1, 1, 1, 1}), "kernel")); builder.AddInstruction(HloInstruction::CreateConvolve( ShapeInference::InferConvolveShape( output->shape(), kernel->shape(), 1, 1, default_conv_window_, tf_default_dnums_for_backward_input_, std::nullopt) .value(), output, kernel, 1, 1, default_conv_window_, tf_default_dnums_for_backward_input_, DefaultPrecisionConfig(2))); auto module = CreateNewVerifiedModule(); HloComputation* entry_computation = module->AddEntryComputation(builder.Build()); EXPECT_TRUE(RunPass(module.get())); EXPECT_THAT(entry_computation->root_instruction(), GmockMatch(m::GetTupleElement( m::CustomCall({kCudnnConvForwardCallTarget}), 0))); } TEST_F(ConvRewriterTest, BackwardInputConvolveUnevenPaddingOnGradients) { auto builder = HloComputation::Builder(TestName()); HloInstruction* output = builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(F32, {20, 4, 4, 320}), "output")); HloInstruction* kernel = builder.AddInstruction(HloInstruction::CreateParameter( 1, ShapeUtil::MakeShape(F32, {3, 3, 192, 320}), "kernel")); HloInstruction* reverse_kernel = builder.AddInstruction( HloInstruction::CreateReverse(kernel->shape(), kernel, {0, 1})); Window conv_window = default_conv_window_; for (int i = 0; i < 2; ++i) { conv_window.mutable_dimensions(i)->set_size(3); conv_window.mutable_dimensions(i)->set_padding_low(2); conv_window.mutable_dimensions(i)->set_padding_high(3); conv_window.mutable_dimensions(i)->set_base_dilation(2); } HloInstruction* conv = builder.AddInstruction(HloInstruction::CreateConvolve( ShapeUtil::MakeShape(F32, {20, 10, 10, 192}), output, reverse_kernel, 1, 1, conv_window, tf_default_dnums_for_backward_input_, DefaultPrecisionConfig(2))); CHECK(ShapeUtil::Compatible( conv->shape(), ShapeInference::InferConvolveShape( output->shape(), reverse_kernel->shape(), 1, 1, conv_window, tf_default_dnums_for_backward_input_, std::nullopt) .value())); auto module = CreateNewVerifiedModule(); HloComputation* entry_computation = module->AddEntryComputation(builder.Build()); EXPECT_TRUE(RunPass(module.get())); ASSERT_THAT(entry_computation->root_instruction(), GmockMatch(m::GetTupleElement( m::CustomCall({kCudnnConvBackwardInputCallTarget}), 0))); const HloInstruction* custom_call = entry_computation->root_instruction()->operand(0); for (int i = 0; i < 2; ++i) { const WindowDimension& window_dim = custom_call->window().dimensions(i); EXPECT_EQ(0, window_dim.padding_low()); EXPECT_EQ(0, window_dim.padding_high()); EXPECT_EQ(2, window_dim.stride()); EXPECT_EQ(1, window_dim.base_dilation()); } } TEST_F(ConvRewriterTest, BackwardInputConvolveLowPaddingTooLarge) { auto builder = HloComputation::Builder(TestName()); HloInstruction* output = builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(F32, {20, 4, 4, 320}), "output")); HloInstruction* kernel = builder.AddInstruction(HloInstruction::CreateParameter( 1, ShapeUtil::MakeShape(F32, {3, 3, 192, 320}), "kernel")); HloInstruction* reverse_kernel = builder.AddInstruction( HloInstruction::CreateReverse(kernel->shape(), kernel, {0, 1})); Window conv_window = default_conv_window_; for (int i = 0; i < 2; ++i) { conv_window.mutable_dimensions(i)->set_size(3); conv_window.mutable_dimensions(i)->set_padding_low(3); conv_window.mutable_dimensions(i)->set_padding_high(2); conv_window.mutable_dimensions(i)->set_base_dilation(2); } HloInstruction* conv = builder.AddInstruction(HloInstruction::CreateConvolve( ShapeUtil::MakeShape(F32, {20, 10, 10, 192}), output, reverse_kernel, 1, 1, conv_window, tf_default_dnums_for_backward_input_, DefaultPrecisionConfig(2))); CHECK(ShapeUtil::Compatible( conv->shape(), ShapeInference::InferConvolveShape( output->shape(), reverse_kernel->shape(), 1, 1, conv_window, tf_default_dnums_for_backward_input_, std::nullopt) .value())); auto module = CreateNewVerifiedModule(); HloComputation* entry_computation = module->AddEntryComputation(builder.Build()); EXPECT_TRUE(RunPass(module.get())); EXPECT_THAT(entry_computation->root_instruction(), GmockMatch(m::GetTupleElement( m::CustomCall({kCudnnConvForwardCallTarget}), 0))); } TEST_F(ConvRewriterTest, BackwardInputConvolveUnevenPaddingOnActivations) { auto builder = HloComputation::Builder(TestName()); HloInstruction* output = builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(F32, {1, 1, 7, 1}), "output")); HloInstruction* kernel = builder.AddInstruction(HloInstruction::CreateParameter( 1, ShapeUtil::MakeShape(F32, {1, 3, 1, 1}), "kernel")); HloInstruction* reverse_kernel = builder.AddInstruction( HloInstruction::CreateReverse(kernel->shape(), kernel, {0, 1})); Window conv_window = default_conv_window_; WindowDimension* forward_conv_col_dim = conv_window.mutable_dimensions(1); forward_conv_col_dim->set_size(3); forward_conv_col_dim->set_padding_low(2); forward_conv_col_dim->set_padding_high(1); forward_conv_col_dim->set_base_dilation(2); HloInstruction* conv = builder.AddInstruction(HloInstruction::CreateConvolve( ShapeUtil::MakeShape(F32, {1, 1, 14, 1}), output, reverse_kernel, 1, 1, conv_window, tf_default_dnums_for_backward_input_, DefaultPrecisionConfig(2))); CHECK(ShapeUtil::Compatible( conv->shape(), ShapeInference::InferConvolveShape( output->shape(), reverse_kernel->shape(), 1, 1, conv_window, tf_default_dnums_for_backward_input_, std::nullopt) .value())); auto module = CreateNewVerifiedModule(); const HloComputation* entry_computation = module->AddEntryComputation(builder.Build()); EXPECT_TRUE(RunPass(module.get())); ASSERT_THAT(entry_computation->root_instruction(), GmockMatch(m::GetTupleElement( m::CustomCall({kCudnnConvBackwardInputCallTarget}), 0))); const WindowDimension& backward_conv_col_dim = entry_computation->root_instruction()->operand(0)->window().dimensions(1); EXPECT_EQ(0, backward_conv_col_dim.padding_low()); EXPECT_EQ(1, backward_conv_col_dim.padding_high()); } TEST_F(ConvRewriterTest, BackwardInputConvolveNegativePaddingHighOnActivations) { auto builder = HloComputation::Builder(TestName()); HloInstruction* output = builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(F32, {1, 1, 3, 1}), "output")); HloInstruction* kernel = builder.AddInstruction(HloInstruction::CreateParameter( 1, ShapeUtil::MakeShape(F32, {1, 2, 1, 1}), "kernel")); HloInstruction* reverse_kernel = builder.AddInstruction( HloInstruction::CreateReverse(kernel->shape(), kernel, {0, 1})); Window conv_window = default_conv_window_; WindowDimension* forward_conv_col_dim = conv_window.mutable_dimensions(1); forward_conv_col_dim->set_size(2); forward_conv_col_dim->set_padding_high(2); HloInstruction* conv = builder.AddInstruction(HloInstruction::CreateConvolve( ShapeUtil::MakeShape(F32, {1, 1, 4, 1}), output, reverse_kernel, 1, 1, conv_window, tf_default_dnums_for_backward_input_, DefaultPrecisionConfig(2))); CHECK(ShapeUtil::Compatible( conv->shape(), ShapeInference::InferConvolveShape( output->shape(), reverse_kernel->shape(), 1, 1, conv_window, tf_default_dnums_for_backward_input_, std::nullopt) .value())); auto module = CreateNewVerifiedModule(); HloComputation* entry_computation = module->AddEntryComputation(builder.Build()); EXPECT_TRUE(RunPass(module.get())); EXPECT_THAT(entry_computation->root_instruction(), GmockMatch(m::GetTupleElement( m::CustomCall({kCudnnConvForwardCallTarget}), 0))); } TEST_F(ConvRewriterTest, BackwardInputConvolveConstantFilter) { Array4D<float> constant_arr(4, 4, 2, 2); constant_arr.FillIota(0); std::string constant_str = LiteralUtil::CreateR4FromArray4D(constant_arr).ToStringWithoutShape(); const std::string module_str = absl::StrFormat(R"( HloModule test ENTRY entry_computation { param0 = f32[128,2,16,16]{3,2,1,0} parameter(0) constant = f32[4,4,2,2]{3,2,1,0} constant(%s) ROOT convolution = f32[128,2,32,32]{3,2,1,0} convolution(param0, constant), window={size=4x4 pad=2_2x2_2 lhs_dilate=2x2}, dim_labels=bf01_01oi->bf01, feature_group_count=1 })", constant_str); TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); EXPECT_TRUE(RunPass(m.get())); EXPECT_THAT(m->entry_computation()->root_instruction(), GmockMatch(m::GetTupleElement( m::CustomCall({kCudnnConvBackwardInputCallTarget}, m::Parameter(), m::Reverse(m::Constant())), 0))); } TEST_F(ConvRewriterTest, TestBackwardFilterPatternMatch) { const std::string module_str = absl::StrFormat(R"( HloModule Test ENTRY Test { input = f32[8,120,256,256] parameter(0) filter = f32[8,120,256,256] parameter(1) ROOT conv = f32[120,120,3,3] convolution(input, filter), window={size=256x256 pad=1_1x1_1}, dim_labels=fb01_io01->fb01 })"); TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); EXPECT_TRUE(RunPass(m.get())); EXPECT_THAT(m->entry_computation()->root_instruction(), GmockMatch(m::GetTupleElement( m::CustomCall({kCudnnConvBackwardFilterCallTarget}, m::Parameter(0), m::Parameter(1)), 0))); } TEST_F(ConvRewriterTest, TestBackwardFilterPatternNoMatch) { const std::string module_str = absl::StrFormat(R"( HloModule Test ENTRY Test { input = f32[8,128,2,32] parameter(0) filter = f32[3,3,128,128] parameter(1) ROOT conv = f32[8,128,2,32] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01 })"); TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); EXPECT_TRUE(RunPass(m.get())); EXPECT_THAT(m->entry_computation()->root_instruction(), GmockMatch(m::GetTupleElement( m::CustomCall({kCudnnConvForwardCallTarget}, m::Parameter(0), m::Parameter(1)), 0))); } TEST_F(ConvRewriterTest, TestConv1dBackwardFilterPatternMatch) { const std::string module_str = absl::StrFormat(R"( HloModule Test ENTRY Test { input = f32[8,256,128] parameter(0) filter = f32[8,254,128] parameter(1) reshape.1 = f32[8,1,256,128] reshape(input) reshape.2 = f32[8,1,254,128] reshape(filter) ROOT conv = f32[1,3,128,128] convolution(reshape.1, reshape.2), window={size=1x254}, dim_labels=f01b_i01o->01bf })"); TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); EXPECT_TRUE(RunPass(m.get())); EXPECT_THAT(m->entry_computation()->root_instruction(), GmockMatch(m::GetTupleElement( m::CustomCall({kCudnnConvBackwardFilterCallTarget}, m::Reshape(), m::Reshape()), 0))); } TEST_F(ConvRewriterTest, TestConv1dBackwardInputPatternMatch) { const std::string module_str = absl::StrFormat(R"( HloModule Test ENTRY Test { input = f32[8,254,128] parameter(0) filter = f32[3,128,128] parameter(1) reverse = f32[3,128,128] reverse(filter), dimensions={0} reshape.1 = f32[8,1,254,128] reshape(input) reshape.2 = f32[1,3,128,128] reshape(reverse) ROOT conv = f32[8,1,256,128] convolution(reshape.1, reshape.2), window={size=1x3 pad=0_0x2_2}, dim_labels=b01f_01oi->b01f })"); TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); EXPECT_TRUE(RunPass(m.get())); EXPECT_THAT(m->entry_computation()->root_instruction(), GmockMatch(m::GetTupleElement( m::CustomCall({kCudnnConvBackwardInputCallTarget}, m::Reshape(), m::Reshape()), 0))); } TEST_F(ConvRewriterTest, TestInvalidTypes) { const std::string module_str = absl::StrFormat(R"( HloModule Test ENTRY Test { input = TYPE[1,17,9,9] parameter(0) filter = TYPE[3,3,17,32] parameter(1) ROOT conv = TYPE[1,32,9,9] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1 })"); for (std::string_view type : {"c64", "c128"}) { const std::string module_with_type = absl::StrReplaceAll(module_str, {{"TYPE", type}}); TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_with_type)); absl::Status s = ConvRewriter(GetComputeCapability()).Run(m.get()).status(); EXPECT_THAT( s, tsl::testing::StatusIs( absl::StatusCode::kUnimplemented, ::testing::HasSubstr("Convolutions must have floating-point or " "integral operands/outputs"))); } std::string module_with_type = absl::StrReplaceAll(module_str, {{"TYPE", "f8e4m3fn"}}); TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_with_type)); absl::Status s = ConvRewriter(se::CudaComputeCapability::Ampere()).Run(m.get()).status(); EXPECT_THAT(s, tsl::testing::StatusIs( absl::StatusCode::kUnimplemented, ::testing::HasSubstr( "FP8 convolutions are only supported on CUDA " "GPUs with compute capability at least 9.0"))); s = ConvRewriter(se::RocmComputeCapability{"gfx942"}).Run(m.get()).status(); EXPECT_THAT(s, tsl::testing::StatusIs( absl::StatusCode::kUnimplemented, ::testing::HasSubstr( "FP8 convolutions are only supported on CUDA GPUs"))); module_with_type = absl::StrReplaceAll(module_str, {{"TYPE", "f8e4m3fnuz"}}); TF_ASSERT_OK_AND_ASSIGN(m, ParseAndReturnVerifiedModule(module_with_type)); s = ConvRewriter(GetComputeCapability()).Run(m.get()).status(); EXPECT_THAT(s, tsl::testing::StatusIs( absl::StatusCode::kUnimplemented, ::testing::HasSubstr("The only FP8 types supported in " "convolutions are f8e5m2 and f8e4m3"))); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/conv_rewriter.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/conv_rewriter_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
8d583ed5-842e-4e47-aa20-b89afa63b4b2
cpp
tensorflow/tensorflow
softmax_rewriter_triton
third_party/xla/xla/service/gpu/transforms/softmax_rewriter_triton.cc
third_party/xla/xla/service/gpu/transforms/softmax_rewriter_triton_test.cc
#include "xla/service/gpu/transforms/softmax_rewriter_triton.h" #include <cstdint> #include <functional> #include <string> #include <utility> #include <variant> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "absl/time/time.h" #include "mlir/IR/MLIRContext.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/pass/hlo_pass_fix.h" #include "xla/hlo/pass/hlo_pass_pipeline.h" #include "xla/hlo/utils/hlo_query.h" #include "xla/layout_util.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/fusion_pipeline.h" #include "xla/service/gpu/fusions/triton/triton_support.h" #include "xla/service/gpu/hlo_traversal.h" #include "xla/service/gpu/ir_emission_utils.h" #include "xla/service/gpu/model/fusion_analysis_cache.h" #include "xla/service/gpu/model/gpu_hlo_cost_analysis.h" #include "xla/service/gpu/model/gpu_indexing_performance_model.h" #include "xla/service/gpu/model/gpu_performance_model.h" #include "xla/service/gpu/model/gpu_performance_model_base.h" #include "xla/service/gpu/model/symbolic_tile_analysis.h" #include "xla/service/gpu/model/tiled_hlo_computation.h" #include "xla/service/gpu/model/triton_emitter_constraints.h" #include "xla/service/gpu/transforms/reduction_dimension_grouper.h" #include "xla/service/gpu/transforms/reduction_splitter.h" #include "xla/service/gpu/transforms/tree_reduction_rewriter.h" #include "xla/service/hlo_cost_analysis.h" #include "xla/service/instruction_fusion.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/stream_executor/device_description.h" #include "xla/tools/hlo_decomposer.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { using hlo_query::IsBroadcastOfParameter; using hlo_query::IsBroadcastOfScalarConstant; bool HasDefaultLayout(const Shape& shape) { return shape.has_layout() && LayoutUtil::IsMonotonicWithDim0Major(shape.layout()); } bool TrivialEdge(HloInstruction** producer, HloInstruction* consumer, HloOpcode opcode, const se::GpuComputeCapability& gpu_version); bool BitcastIsTilingNoop(HloInstruction* bitcast, const se::GpuComputeCapability& gpu_version) { CHECK_EQ(bitcast->opcode(), HloOpcode::kBitcast); if (ShapeUtil::IsEffectiveScalar(bitcast->shape())) { return true; } auto last_dimension = [](const HloInstruction* instr) { return instr->shape().dimensions().back(); }; HloInstruction* reduce = nullptr; TrivialEdge(&reduce, bitcast->mutable_operand(0), HloOpcode::kReduce, gpu_version); return (HasDefaultLayout(bitcast->shape()) && HasDefaultLayout(bitcast->operand(0)->shape()) && (reduce != nullptr || last_dimension(bitcast->operand(0)) == last_dimension(bitcast))); } inline bool HasOneUse(const HloInstruction* instr) { return instr->user_count() == 1; } bool IsBatchOrReductionDimBroadcast(const HloInstruction& hlo) { CHECK_EQ(hlo.opcode(), HloOpcode::kBroadcast) << "Expected broadcast " << hlo.ToShortString(); CHECK_EQ(hlo.operand(0)->opcode(), HloOpcode::kParameter) << "Expected parameter " << hlo.operand(0)->ToShortString(); const HloBroadcastInstruction* broadcast = Cast<HloBroadcastInstruction>(&hlo); const HloParameterInstruction* parameter = Cast<HloParameterInstruction>(hlo.operand(0)); if (broadcast->dimensions().empty() || parameter->shape().dimensions_size() + 1 != broadcast->shape().dimensions_size()) { return false; } bool preserve_first_dim = broadcast->dimensions().front() == 0; bool preserve_last_dim = broadcast->dimensions().back() == broadcast->shape().dimensions_size() - 1; return !(preserve_first_dim && preserve_last_dim); } bool IsBroadcastOfAScalar(const HloInstruction& hlo) { CHECK_EQ(hlo.opcode(), HloOpcode::kBroadcast) << "Expected broadcast " << hlo.ToShortString(); return ShapeUtil::IsScalar(hlo.operand(0)->shape()); } bool IsSingleRowParameterBroadcast(const HloInstruction& hlo) { CHECK_EQ(hlo.opcode(), HloOpcode::kBroadcast) << "Expected broadcast " << hlo.ToShortString(); CHECK_EQ(hlo.operand(0)->opcode(), HloOpcode::kParameter) << "Expected parameter " << hlo.operand(0)->ToShortString(); const HloBroadcastInstruction* broadcast = Cast<HloBroadcastInstruction>(&hlo); const HloParameterInstruction* parameter = Cast<HloParameterInstruction>(hlo.operand(0)); if (parameter->shape().dimensions_size() != 1) { return false; } return broadcast->dimensions()[0] == broadcast->shape().dimensions_size() - 1; } bool IsSupportedBroadcastOfParameter(const HloInstruction& hlo) { return IsBroadcastOfParameter(hlo) && (IsBatchOrReductionDimBroadcast(hlo) || IsBroadcastOfAScalar(hlo) || IsSingleRowParameterBroadcast(hlo)); } HloInstruction* ChooseOperandForFusionProcessing(HloInstruction* instr) { CHECK_GT(instr->operand_count(), 0); CHECK_LE(instr->operand_count(), 2); if (instr->operand_count() > 1 && (IsBroadcastOfScalarConstant(*instr->operand(0)) || IsSupportedBroadcastOfParameter(*instr->operand(0)))) { return instr->mutable_operand(1); } return instr->mutable_operand(0); } bool IsTriviallyFusible(HloInstruction* instr, const se::GpuComputeCapability& gpu_version, int num_allowed_users = 1) { if (instr->user_count() > num_allowed_users || !HasDefaultLayout(instr->shape())) { return false; } if (instr->opcode() == HloOpcode::kBitcast && BitcastIsTilingNoop(instr, gpu_version)) { return true; } if (instr->IsElementwise() && instr->operand_count() == 1) { return static_cast<bool>(IsTritonSupportedInstruction(*instr, gpu_version)); } if (instr->IsElementwiseBinary()) { const HloInstruction* operand_0 = instr->operand(0); const HloInstruction* operand_1 = instr->operand(1); if (operand_0 == operand_1) { return static_cast<bool>( IsTritonSupportedInstruction(*instr, gpu_version)); } if ((IsBroadcastOfScalarConstant(*operand_0) || IsSupportedBroadcastOfParameter(*operand_0)) ^ (IsBroadcastOfScalarConstant(*operand_1) || IsSupportedBroadcastOfParameter(*operand_1))) { return static_cast<bool>( IsTritonSupportedInstruction(*instr, gpu_version)); } } return false; } bool TrivialEdge(HloInstruction** producer, HloInstruction* consumer, HloOpcode opcode, const se::GpuComputeCapability& gpu_version) { while (consumer->opcode() != opcode) { if (IsTriviallyFusible(consumer, gpu_version)) { consumer = ChooseOperandForFusionProcessing(consumer); } else { return false; } } *producer = consumer; return true; } bool IsTriviallyConnectedProducerOf( HloInstruction* producer, HloInstruction* consumer, const se::GpuComputeCapability& gpu_version) { if (producer == consumer) { return true; } HloInstruction* found_producer = consumer; while ( TrivialEdge(&found_producer, consumer, producer->opcode(), gpu_version)) { if (found_producer == producer) { return true; } if (!IsTriviallyFusible(found_producer, gpu_version)) { return false; } consumer = found_producer->mutable_operand(0); } return false; } HloInstruction* FindFirstNonFusibleDiamondProducer( HloInstruction* diamond_producer, const se::GpuComputeCapability& gpu_version) { if (IsTriviallyFusible(diamond_producer, gpu_version, 2)) { diamond_producer = ChooseOperandForFusionProcessing(diamond_producer); while (IsTriviallyFusible(diamond_producer, gpu_version)) { diamond_producer = ChooseOperandForFusionProcessing(diamond_producer); } } return diamond_producer; } absl::StatusOr<HloFusionInstruction*> MakeFusionForDiamondChain( const DiamondChainDescriptor& diamond_chain) { auto [root, producer] = diamond_chain; std::string suggested_name = "triton_softmax"; HloComputation::Builder builder(absl::StrCat(suggested_name, "_computation")); absl::flat_hash_map<const HloInstruction*, HloInstruction*> old_to_new_mapping; int param = 0; old_to_new_mapping[producer] = builder.AddInstruction(HloInstruction::CreateParameter( param, producer->shape(), absl::StrCat("parameter_", param))); param++; std::vector<HloInstruction*> parameters = {producer}; std::function<void(HloInstruction*)> create_computation = [&](HloInstruction* instr) -> void { if (old_to_new_mapping.contains(instr)) { return; } std::vector<HloInstruction*> new_operands; for (HloInstruction* operand : instr->mutable_operands()) { create_computation(operand); new_operands.push_back(old_to_new_mapping[operand]); } if (instr->opcode() == HloOpcode::kParameter) { old_to_new_mapping[instr] = builder.AddInstruction(HloInstruction::CreateParameter( param, instr->shape(), absl::StrCat("parameter_", param))); parameters.push_back(instr); param++; } else { old_to_new_mapping[instr] = builder.AddInstruction( instr->CloneWithNewOperands(instr->shape(), new_operands)); } }; create_computation(root); HloComputation* computation = root->GetModule()->AddComputationAndUnifyNamesAndIds(builder.Build(), false); HloInstruction* softmax_fusion = root->parent()->AddInstruction(HloInstruction::CreateFusion( root->shape(), HloInstruction::FusionKind::kCustom, parameters, computation)); softmax_fusion->GetModule()->SetAndUniquifyInstrName(softmax_fusion, "triton_softmax"); TF_ASSIGN_OR_RETURN(auto gpu_config, softmax_fusion->backend_config<GpuBackendConfig>()); FusionBackendConfig& backend_config = *gpu_config.mutable_fusion_backend_config(); backend_config.set_kind(std::string(kTritonFusionKind)); TF_RETURN_IF_ERROR(softmax_fusion->set_backend_config(gpu_config)); return xla::Cast<HloFusionInstruction>(softmax_fusion); } absl::Status RunFusionPipeline( HloModule* module, const se::DeviceDescription& device_info, const HloCostAnalysis::ShapeSizeFunction& shape_size) { HloPassPipeline reduction_pipeline("reduction_pipeline"); reduction_pipeline.AddPass<ReductionDimensionGrouper>(); reduction_pipeline.AddPass<HloPassFix<ReductionSplitter>>( false); reduction_pipeline.AddPass<HloPassFix<TreeReductionRewriter>>( device_info.gpu_compute_capability()); TF_RETURN_IF_ERROR(reduction_pipeline.Run(module).status()); return FusionPipeline(module->config().debug_options(), shape_size, nullptr, device_info) .Run(module) .status(); } absl::StatusOr<absl::Duration> EstimateOptimizedHloRunTimeWithoutSoftMaxRewriterTriton( const HloFusionInstruction* fusion, const se::DeviceDescription& device_info, const HloCostAnalysis::ShapeSizeFunction& shape_size) { auto new_module = ExtractComputationIntoNewModule( *fusion->fused_instructions_computation()); TF_RETURN_IF_ERROR( RunFusionPipeline(new_module.get(), device_info, shape_size)); VLOG(10) << "priority fusion module: " << new_module->ToString(); HloComputation* entry_computation = new_module->entry_computation(); GpuHloCostAnalysis::Options cost_analysis_options{ shape_size, {}, {}, true}; GpuHloCostAnalysis cost_analysis(cost_analysis_options, device_info); TF_RETURN_IF_ERROR(entry_computation->Accept(&cost_analysis)); absl::Duration total_run_time = absl::ZeroDuration(); for (const HloInstruction* instr : entry_computation->instructions()) { total_run_time += GpuPerformanceModel::EstimateRunTimeForInstruction( instr, device_info, &cost_analysis, GpuPerformanceModelOptions::PriorityFusion()) .exec_time; } return total_run_time; } absl::StatusOr<FusionDecision> DecideIfShouldFuseAndMaybeSetBlockLevelParameters( HloFusionInstruction* softmax_fusion, GpuPerformanceModelWithIndexingAnalysis& indexing_performance_model, const se::DeviceDescription& device_info, const HloCostAnalysis::ShapeSizeFunction& shape_size, bool use_cost_model_to_evaluate_fusions) { auto fusion_adaptor = HloFusionAdaptor::ForInstruction(softmax_fusion); TF_ASSIGN_OR_RETURN( TiledRunTimeDataOrError tiled_runtime_data_or, indexing_performance_model.TryFindBestTilingForFusion(*fusion_adaptor)); if (const auto* fusion_decision = std::get_if<FusionDecision>(&tiled_runtime_data_or)) { return FusionDecision::Forbid(absl::StrCat("SymbolicTileAnalysis failed: ", fusion_decision->Explain())); } TiledRunTimeData tiled_runtime_data = std::get<TiledRunTimeData>(std::move(tiled_runtime_data_or)); if (use_cost_model_to_evaluate_fusions) { TF_ASSIGN_OR_RETURN(absl::Duration run_time_without_softmax_rewriter, EstimateOptimizedHloRunTimeWithoutSoftMaxRewriterTriton( softmax_fusion, device_info, shape_size)); VLOG(5) << "run time estimate if normalization diamond fused together: " << tiled_runtime_data.runtime_data.exec_time; VLOG(5) << "run time estimate if normalization diamond is not fused together: " << run_time_without_softmax_rewriter; if (run_time_without_softmax_rewriter < tiled_runtime_data.runtime_data.exec_time) { return FusionDecision::Forbid( "Run time estimate for without applying the custom normalization " "rewrite is faster."); } } TF_ASSIGN_OR_RETURN(auto backend_config, softmax_fusion->backend_config<GpuBackendConfig>()); *backend_config.mutable_fusion_backend_config() ->mutable_block_level_fusion_config() = tiled_runtime_data.block_level_parameters.ToBlockLevelFusionConfig(); TF_RETURN_IF_ERROR(softmax_fusion->set_backend_config(backend_config)); VLOG(5) << "Fusing with backend config: " << backend_config.DebugString(); return FusionDecision::Allow(); } absl::StatusOr<bool> MaybeFuseDiamondChainImpl( const DiamondChainDescriptor& diamond_chain, GpuPerformanceModelWithIndexingAnalysis& indexing_performance_model, const se::DeviceDescription& device_info, const HloCostAnalysis::ShapeSizeFunction& shape_size, bool use_cost_model_to_evaluate_fusions) { TF_ASSIGN_OR_RETURN(HloFusionInstruction * softmax_fusion, MakeFusionForDiamondChain(diamond_chain)); HloInstruction* root = diamond_chain.root; VLOG(5) << "MaybeFuseDiamondChainImpl: " << softmax_fusion->ToString(); TF_ASSIGN_OR_RETURN( FusionDecision fusion_decision, DecideIfShouldFuseAndMaybeSetBlockLevelParameters( softmax_fusion, indexing_performance_model, device_info, shape_size, use_cost_model_to_evaluate_fusions)); if (!fusion_decision.CanFuse()) { VLOG(5) << "Not fusing: " << fusion_decision.Explain(); softmax_fusion->DetachFromOperandsAndUsers(); TF_RETURN_IF_ERROR( softmax_fusion->parent()->RemoveInstruction(softmax_fusion)); return false; } if (root->IsRoot()) { root->parent()->set_root_instruction(softmax_fusion); TF_RETURN_IF_ERROR( root->parent()->RemoveInstructionAndUnusedOperands(root)); } else { TF_RETURN_IF_ERROR( root->parent()->ReplaceInstruction(root, softmax_fusion)); } return true; } absl::StatusOr<bool> CanSymbolicTileAnalysisTileDiamondChain( const DiamondChainDescriptor& diamond_chain, const se::DeviceDescription& device_info) { TF_ASSIGN_OR_RETURN(HloFusionInstruction * softmax_fusion, MakeFusionForDiamondChain(diamond_chain)); mlir::MLIRContext context; SymbolicTileAnalysisOrError symbolic_tile_analysis_or_error = SymbolicTileAnalysis::AnalyzeComputation( *softmax_fusion->called_computation(), &context, TritonEmitterConstraints::GetBuilder(device_info)); bool can_tile = std::holds_alternative<SymbolicTileAnalysis>( symbolic_tile_analysis_or_error); TF_RETURN_IF_ERROR(diamond_chain.root->GetModule()->RemoveEmbeddedComputation( softmax_fusion->called_computation())); TF_RETURN_IF_ERROR( diamond_chain.root->parent()->RemoveInstruction(softmax_fusion)); return can_tile; } FusionDecision ShouldFuseReduction(const HloInstruction& reduce, const se::GpuComputeCapability& cc) { if (CodegenDecision is_supported = IsTritonSupportedInstruction(reduce, cc); !is_supported) { return FusionDecision::Forbid(is_supported.Explain()); } if (reduce.dimensions().size() != 1 || reduce.dimensions(0) != reduce.operand(0)->shape().rank() - 1) { return FusionDecision::Forbid( "The reductions in the diamond must reduce 1 dimension and that " "dimension must be the last dimension of the operand."); } const HloInstruction* identity = reduce.operand(1); bool should_fuse_identity = identity->opcode() == HloOpcode::kConstant || (identity->opcode() == HloOpcode::kConvert && identity->operand(0)->opcode() == HloOpcode::kConstant && IsTritonSupportedInstruction(*identity, cc)); if (!should_fuse_identity) { return FusionDecision::Forbid( "Reduction identity is not a constant or a supported convert of a " "constant."); } return FusionDecision::Allow(); } DiamondMatchingDecision MatchesTritonCompatibleClosedReductionDiamondImpl( HloInstruction* instr, const se::GpuComputeCapability& cc) { if (!instr->IsElementwiseBinary()) { return FusionDecision::Forbid("Root is not elementwise binary."); } if (!IsTritonSupportedInstruction(*instr, cc)) { return FusionDecision::Forbid( "Root is not supported for Triton instruction."); } HloInstruction* producer; HloInstruction* broadcast; HloInstruction* reduce; if (!TrivialEdge(&broadcast, instr->mutable_operand(1), HloOpcode::kBroadcast, cc)) { return FusionDecision::Forbid( "Could not find a trivial connection from root to a broadcast."); } if (!TrivialEdge(&reduce, broadcast->mutable_operand(0), HloOpcode::kReduce, cc)) { return FusionDecision::Forbid( "Could not find a trivial connection from matched broadcast to a " "reduction."); } if (!(HasDefaultLayout(broadcast->shape()) && HasDefaultLayout(reduce->shape()))) { return FusionDecision::Forbid( "Broadcast or reduce have non-default layouts."); } if (FusionDecision should_fuse_reduction = ShouldFuseReduction(*reduce, cc); !should_fuse_reduction) { VLOG(2) << should_fuse_reduction.Explain(); return should_fuse_reduction; } const HloInstruction* identity = reduce->operand(1); bool should_fuse_identity = identity->opcode() == HloOpcode::kConstant || (identity->opcode() == HloOpcode::kConvert && identity->operand(0)->opcode() == HloOpcode::kConstant && IsTritonSupportedInstruction(*identity, cc)); if (!should_fuse_identity) { return FusionDecision::Forbid( "Reduction identity is not a constant or a supported convert of a " "constant."); } if (!HasOneUse(broadcast) || !HasOneUse(reduce)) { return FusionDecision::Forbid("More than one use of broadcast or reduce."); } producer = reduce->mutable_operand(0); if (absl::c_linear_search(broadcast->dimensions(), broadcast->shape().rank() - 1)) { return FusionDecision::Forbid( "Broadcast is not along the reduction dimension."); } while (IsTriviallyFusible(producer, cc)) { producer = ChooseOperandForFusionProcessing(producer); } if (!HasDefaultLayout(producer->shape())) { return FusionDecision::Forbid("Producer has non-default layout."); } if (!IsTriviallyConnectedProducerOf(producer, instr->mutable_operand(0), cc)) { return FusionDecision::Forbid("Producer is not trivially connected."); } if (producer != instr->operand(0) && instr->operand(0)->user_count() != 1) { return FusionDecision::Forbid("Unsupported root-producer connection."); } VLOG(5) << "Matched Softmax diamond with: "; VLOG(5) << "root: " << instr->ToString(); VLOG(5) << "producer: " << producer->ToString(); VLOG(5) << "broadcast: " << broadcast->ToString(); VLOG(5) << "reduce: " << reduce->ToString(); return producer; } absl::StatusOr<std::vector<DiamondChainDescriptor>> FindAllFusibleDiamonds( HloModule& module, const absl::flat_hash_set<absl::string_view>& execution_threads, const se::DeviceDescription& device_info) { const se::GpuComputeCapability& cc = device_info.gpu_compute_capability(); std::vector<DiamondChainDescriptor> matched_diamonds; for (HloComputation* comp : module.MakeNonfusionComputations(execution_threads)) { if (comp->IsCustomCallComputation()) { continue; } for (HloInstruction* instr : comp->MakeInstructionPostOrder()) { auto producer = MatchesTritonCompatibleClosedReductionDiamondImpl(instr, cc); if (std::holds_alternative<HloInstruction*>(producer)) { DiamondChainDescriptor diamond_chain{ instr, std::get<HloInstruction*>(producer)}; TF_ASSIGN_OR_RETURN(bool can_tile_diamond_chain, CanSymbolicTileAnalysisTileDiamondChain( diamond_chain, device_info)); if (can_tile_diamond_chain) { matched_diamonds.push_back(diamond_chain); } else { VLOG(5) << "Cannot tile the diamond pattern described by " << "instructions " << instr->ToString() << " and " << std::get<HloInstruction*>(producer)->ToString() << "."; continue; } } else { VLOG(5) << "Cannot match the diamond pattern for instruction " << instr->ToString() << ". Reason: " << std::get<FusionDecision>(producer).Explain(); } } } return std::move(matched_diamonds); } int64_t GetReductionDimensionSizeForDiamond( const DiamondChainDescriptor& diamond_chain) { HloInstruction* diamond_root = diamond_chain.root; HloInstruction* instr = diamond_root->mutable_operand(1); while (instr->opcode() != HloOpcode::kReduce) { instr = ChooseOperandForFusionProcessing(instr); } int operand_rank = instr->operand(0)->shape().rank(); CHECK_EQ(instr->dimensions().size(), 1); CHECK_EQ(instr->dimensions(0), operand_rank - 1); return instr->operand(0)->shape().dimensions(operand_rank - 1); } HloInstruction* GetLastTriviallyFusibleUser( HloInstruction* instr, const se::GpuComputeCapability& cc) { while (HasOneUse(instr) && !instr->IsRoot() && IsTriviallyFusible(instr->users().front(), cc)) { instr = instr->users().front(); } if (HasOneUse(instr) && !instr->IsRoot() && IsTriviallyFusible( instr->users().front(), cc, instr->users().front()->user_count())) { instr = instr->users().front(); } return instr; } } DiamondMatchingDecision SoftmaxRewriterTriton::MatchesTritonCompatibleClosedReductionDiamond( HloInstruction* instr) const { return MatchesTritonCompatibleClosedReductionDiamondImpl( instr, device_info_.gpu_compute_capability()); } absl::StatusOr<std::vector<DiamondChainDescriptor>> SoftmaxRewriterTriton::FindAllFusibleDiamondChains( HloModule& module, const absl::flat_hash_set<absl::string_view>& execution_threads) const { TF_ASSIGN_OR_RETURN( std::vector<DiamondChainDescriptor> matched_diamonds, FindAllFusibleDiamonds(module, execution_threads, device_info_)); if (matched_diamonds.empty()) { return std::vector<DiamondChainDescriptor>(); } std::vector<DiamondChainDescriptor> diamond_chains; diamond_chains.reserve(matched_diamonds.size()); const se::GpuComputeCapability& cc = device_info_.gpu_compute_capability(); HloInstruction* current_fusion_producer = FindFirstNonFusibleDiamondProducer(matched_diamonds.front().producer, cc); int current_reduce_dimension_size = GetReductionDimensionSizeForDiamond(matched_diamonds.front()); for (int diamond_idx = 1; diamond_idx < matched_diamonds.size(); ++diamond_idx) { HloInstruction* diamond_producer = matched_diamonds[diamond_idx].producer; HloInstruction* previous_diamond_root = matched_diamonds[diamond_idx - 1].root; HloInstruction* first_non_fusible_diamond_producer = FindFirstNonFusibleDiamondProducer(diamond_producer, cc); int diamond_reduce_dimension_size = GetReductionDimensionSizeForDiamond(matched_diamonds[diamond_idx]); if (first_non_fusible_diamond_producer == previous_diamond_root && ((first_non_fusible_diamond_producer != diamond_producer && HasOneUse(first_non_fusible_diamond_producer)) || (first_non_fusible_diamond_producer == diamond_producer && first_non_fusible_diamond_producer->user_count() == 2)) && diamond_reduce_dimension_size == current_reduce_dimension_size) { continue; } diamond_chains.push_back(DiamondChainDescriptor{ GetLastTriviallyFusibleUser(previous_diamond_root, cc), current_fusion_producer, }); current_fusion_producer = first_non_fusible_diamond_producer; current_reduce_dimension_size = diamond_reduce_dimension_size; } diamond_chains.push_back(DiamondChainDescriptor{ GetLastTriviallyFusibleUser(matched_diamonds.back().root, cc), current_fusion_producer}); std::vector<DiamondChainDescriptor> filtered_diamond_chains; for (const DiamondChainDescriptor& diamond_chain : diamond_chains) { TF_ASSIGN_OR_RETURN( bool can_tile_diamond_chain, CanSymbolicTileAnalysisTileDiamondChain(diamond_chain, device_info_)); if (can_tile_diamond_chain) { filtered_diamond_chains.push_back(diamond_chain); } } return filtered_diamond_chains; } absl::StatusOr<bool> SoftmaxRewriterTriton::MaybeFuseDiamondChain( const DiamondChainDescriptor& diamond_chain) { HloFusionAnalysisCache fusion_analysis_cache(device_info_); GpuPerformanceModelWithIndexingAnalysis indexing_performance_model( &device_info_, &fusion_analysis_cache, shape_size_, &mlir_context_); return MaybeFuseDiamondChainImpl(diamond_chain, indexing_performance_model, device_info_, shape_size_, use_cost_model_to_evaluate_fusions_); } absl::StatusOr<bool> SoftmaxRewriterTriton::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { TF_RETURN_IF_ERROR(EnsureTritonSupportsComputeCapability( device_info_.gpu_compute_capability())); TF_ASSIGN_OR_RETURN(std::vector<DiamondChainDescriptor> diamond_chains, FindAllFusibleDiamondChains(*module, execution_threads)); bool changed = false; for (auto diamond_chain = diamond_chains.rbegin(); diamond_chain != diamond_chains.rend(); ++diamond_chain) { TF_ASSIGN_OR_RETURN(bool fused, MaybeFuseDiamondChain(*diamond_chain)); changed |= fused; } return changed; } } }
#include "xla/service/gpu/transforms/softmax_rewriter_triton.h" #include <cstdint> #include <memory> #include <string> #include <variant> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/log/check.h" #include "absl/log/log.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_query.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/fusions/triton/triton_support.h" #include "xla/service/gpu/gpu_device_info_for_tests.h" #include "xla/service/gpu/model/gpu_hlo_cost_analysis.h" #include "xla/service/instruction_fusion.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/stream_executor/device_description.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/errors.h" #include "tsl/platform/status_matchers.h" namespace xla { namespace gpu { namespace { namespace m = ::xla::match; using ::testing::HasSubstr; GpuHloCostAnalysis::ShapeSizeFunction ShapeSizeBytesFunction() { return [&](const Shape& shape) { constexpr int64_t kPointerSize = 8; return ShapeUtil::ByteSizeOf(shape, kPointerSize); }; } bool HasBlockLevelFusionConfig(const HloInstruction* fusion) { return fusion->opcode() == HloOpcode::kFusion && fusion->has_backend_config() && fusion->backend_config<GpuBackendConfig>().ok() && fusion->backend_config<GpuBackendConfig>() ->fusion_backend_config() .has_block_level_fusion_config(); } class SoftmaxRewriterTritonTest : public HloTestBase, public ::testing::WithParamInterface<PrimitiveType> { protected: se::DeviceDescription device_info_{TestGpuDeviceInfo::RTXA6000DeviceInfo()}; SoftmaxRewriterTriton fusion_rewriter_{device_info_, ShapeSizeBytesFunction()}; }; TEST_F(SoftmaxRewriterTritonTest, CanFuseExactSoftmaxF32) { const std::string hlo_string = R"( HloModule softmax max_computation { arg_0 = f32[] parameter(0) arg_1 = f32[] parameter(1) ROOT maximum = f32[] maximum(arg_0, arg_1) } add_computation { arg_0.1 = f32[] parameter(0) arg_1.1 = f32[] parameter(1) ROOT add = f32[] add(arg_0.1, arg_1.1) } ENTRY main { param_0 = f32[127,125]{1,0} parameter(0) constant_neg_inf = f32[] constant(-inf) reduce = f32[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation broadcast = f32[127,125]{1,0} broadcast(reduce), dimensions={0} subtract = f32[127,125]{1,0} subtract(param_0, broadcast) exponential = f32[127,125]{1,0} exponential(subtract) constant_zero = f32[] constant(0) second_reduce = f32[127]{0} reduce(exponential, constant_zero), dimensions={1}, to_apply=add_computation second_broadcast = f32[127,125]{1,0} broadcast(second_reduce), dimensions={0} ROOT divide = f32[127,125]{1,0} divide(exponential, second_broadcast) } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_TRUE(fusion_rewriter_.Run(module.get()).value()); EXPECT_TRUE(verifier().Run(module.get()).status().ok()); VLOG(2) << module->ToString(); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch( m::Fusion(m::Parameter()).WithPredicate(HasBlockLevelFusionConfig))); } TEST_F(SoftmaxRewriterTritonTest, CanFuseSoftmaxLikeComputationWithNonF32DataType) { const std::string hlo_string = R"( HloModule softmax max_computation { arg_0 = f16[] parameter(0) arg_1 = f16[] parameter(1) ROOT maximum = f16[] maximum(arg_0, arg_1) } add_computation { arg_0.1 = f16[] parameter(0) arg_1.1 = f16[] parameter(1) ROOT add = f16[] add(arg_0.1, arg_1.1) } ENTRY main { param_0 = f16[127,125]{1,0} parameter(0) constant_neg_inf = f16[] constant(-inf) reduce = f16[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation broadcast = f16[127,125]{1,0} broadcast(reduce), dimensions={0} subtract = f16[127,125]{1,0} subtract(param_0, broadcast) abs = f16[127,125]{1,0} abs(subtract) constant_zero = f16[] constant(0) second_reduce = f16[127]{0} reduce(abs, constant_zero), dimensions={1}, to_apply=add_computation second_broadcast = f16[127,125]{1,0} broadcast(second_reduce), dimensions={0} ROOT multiply = f16[127,125]{1,0} multiply(abs, second_broadcast) } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_TRUE(fusion_rewriter_.Run(module.get()).value()); EXPECT_TRUE(verifier().Run(module.get()).status().ok()); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch( m::Fusion(m::Parameter()).WithPredicate(HasBlockLevelFusionConfig))); } TEST_F(SoftmaxRewriterTritonTest, CanFuseSingleNormalizationDiamond) { const std::string hlo_string = R"( HloModule softmax max_computation { arg_0 = f32[] parameter(0) arg_1 = f32[] parameter(1) ROOT maximum = f32[] maximum(arg_0, arg_1) } ENTRY main { param_0 = f32[127,125]{1,0} parameter(0) constant_neg_inf = f32[] constant(-inf) reduce = f32[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation broadcast = f32[127,125]{1,0} broadcast(reduce), dimensions={0} ROOT subtract = f32[127,125]{1,0} subtract(param_0, broadcast) } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_TRUE(fusion_rewriter_.Run(module.get()).value()); EXPECT_TRUE(verifier().Run(module.get()).status().ok()); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch( m::Fusion(m::Parameter()).WithPredicate(HasBlockLevelFusionConfig))); } TEST_F(SoftmaxRewriterTritonTest, DoesNotFuseDiamondInvolvingUnsupportedTritonInstruction) { const std::string hlo_string = R"( HloModule softmax add_computation { arg_0.1 = bf16[] parameter(0) arg_1.1 = bf16[] parameter(1) ROOT add = bf16[] add(arg_0.1, arg_1.1) } ENTRY main { param_0 = bf16[127,125]{1,0} parameter(0) constant_zero = bf16[] constant(0) reduce = bf16[127]{0} reduce(param_0, constant_zero), dimensions={1}, to_apply=add_computation broadcast = bf16[127,125]{1,0} broadcast(reduce), dimensions={0} ROOT divide = bf16[127,125]{1,0} divide(param_0, broadcast) })"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); const HloInstruction* bf16_divide = module->entry_computation()->root_instruction(); EXPECT_FALSE(IsTritonSupportedInstruction( *bf16_divide, device_info_.gpu_compute_capability())); EXPECT_FALSE(fusion_rewriter_.Run(module.get()).value()); } TEST_F(SoftmaxRewriterTritonTest, DoesNotFuseInstructionsUnsupportedByTritonIntoDiamonds) { const std::string hlo_string = R"( HloModule softmax max_computation { arg_0 = bf16[] parameter(0) arg_1 = bf16[] parameter(1) ROOT maximum = bf16[] maximum(arg_0, arg_1) } ENTRY main { param_0 = bf16[127,125]{1,0} parameter(0) constant_neg_inf = bf16[] constant(-inf) reduce = bf16[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation broadcast = bf16[127,125]{1,0} broadcast(reduce), dimensions={0} subtract = bf16[127,125]{1,0} subtract(param_0, broadcast) ROOT exponential = bf16[127,125]{1,0} exponential(subtract) })"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); const HloInstruction* bf16_exponential = hlo_query::GetFirstInstructionWithOpcode(*module->entry_computation(), HloOpcode::kExp); EXPECT_FALSE(IsTritonSupportedInstruction( *bf16_exponential, device_info_.gpu_compute_capability())); EXPECT_TRUE(fusion_rewriter_.Run(module.get()).value()); EXPECT_TRUE(verifier().Run(module.get()).status().ok()); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(m::Exp( m::Fusion(m::Parameter()).WithPredicate(HasBlockLevelFusionConfig)))); } TEST_F(SoftmaxRewriterTritonTest, CanNotFuseSoftmaxDiamondWithWrongLayout) { const std::string hlo_string = R"( HloModule softmax max_computation { arg_0 = f32[] parameter(0) arg_1 = f32[] parameter(1) ROOT maximum = f32[] maximum(arg_0, arg_1) } ENTRY main { param_0 = f32[127,125]{0,1} parameter(0) constant_neg_inf = f32[] constant(-inf) reduce = f32[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation broadcast = f32[127,125]{1,0} broadcast(reduce), dimensions={0} ROOT subtract = f32[127,125]{1,0} subtract(param_0, broadcast) } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_FALSE(fusion_rewriter_.Run(module.get()).value()); } TEST_F(SoftmaxRewriterTritonTest, CanNotFuseSoftmaxDiamondWithWrongReduceDimension) { const std::string hlo_string = R"( HloModule softmax max_computation { arg_0 = f32[] parameter(0) arg_1 = f32[] parameter(1) ROOT maximum = f32[] maximum(arg_0, arg_1) } ENTRY main { param_0 = f32[127,125]{1,0} parameter(0) constant_neg_inf = f32[] constant(-inf) reduce = f32[125]{0} reduce(param_0, constant_neg_inf), dimensions={0}, to_apply=max_computation broadcast = f32[127,125]{1,0} broadcast(reduce), dimensions={1} ROOT subtract = f32[127,125]{1,0} subtract(param_0, broadcast) } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_FALSE(fusion_rewriter_.Run(module.get()).value()); } TEST_F(SoftmaxRewriterTritonTest, CanNotFuseSoftmaxDiamondWithWrongBroadcastDimension) { const std::string hlo_string = R"( HloModule softmax max_computation { arg_0 = f32[] parameter(0) arg_1 = f32[] parameter(1) ROOT maximum = f32[] maximum(arg_0, arg_1) } ENTRY main { param_0 = f32[125,125]{1,0} parameter(0) constant_neg_inf = f32[] constant(-inf) reduce = f32[125]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation broadcast = f32[125,125]{1,0} broadcast(reduce), dimensions={1} ROOT subtract = f32[125,125]{1,0} subtract(param_0, broadcast) } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_FALSE(fusion_rewriter_.Run(module.get()).value()); } TEST_F(SoftmaxRewriterTritonTest, CanNotFuseSoftmaxDiamondWithExtraBroadcastUsage) { const std::string hlo_string = R"( HloModule softmax max_computation { arg_0 = f32[] parameter(0) arg_1 = f32[] parameter(1) ROOT maximum = f32[] maximum(arg_0, arg_1) } ENTRY main { param_0 = f32[127,125]{1,0} parameter(0) constant_neg_inf = f32[] constant(-inf) reduce = f32[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation broadcast = f32[127,125]{1,0} broadcast(reduce), dimensions={0} subtract = f32[127,125]{1,0} subtract(param_0, broadcast) ROOT multiply = f32[127,125]{1,0} multiply(broadcast, subtract) } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_FALSE(fusion_rewriter_.Run(module.get()).value()); } TEST_F(SoftmaxRewriterTritonTest, DoesNotFuseReductionOnNonMinorAxis) { const std::string hlo_string = R"( max_computation { arg_0 = f32[] parameter(0) arg_1 = f32[] parameter(1) ROOT maximum = f32[] maximum(arg_0, arg_1) } ENTRY main { param_0 = f32[8,16,16]{2,1,0} parameter(0) constant_neg_inf = f32[] constant(-inf) reduce = f32[8,16]{1,0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation broadcast = f32[8,16,16]{2,1,0} broadcast(reduce), dimensions={0,1} ROOT subtract = f32[8,16,16]{2,1,0} subtract(param_0, broadcast) } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_FALSE(fusion_rewriter_.Run(module.get()).value()); } TEST_F(SoftmaxRewriterTritonTest, DoesNotFuseReductionOnMultipleReductionAxes) { const std::string hlo_string = R"( max_computation { arg_0 = f32[] parameter(0) arg_1 = f32[] parameter(1) ROOT maximum = f32[] maximum(arg_0, arg_1) } ENTRY main { param_0 = f32[8,16,16]{2,1,0} parameter(0) constant_neg_inf = f32[] constant(-inf) reduce = f32[8]{0} reduce(param_0, constant_neg_inf), dimensions={2,1}, to_apply=max_computation broadcast = f32[8,16,16]{2,1,0} broadcast(reduce), dimensions={0} ROOT subtract = f32[8,16,16]{2,1,0} subtract(param_0, broadcast) } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_FALSE(fusion_rewriter_.Run(module.get()).value()); } TEST_F(SoftmaxRewriterTritonTest, CanFuseSoftmaxWithIntermediateUnaryElementwise) { const std::string hlo_string = R"( HloModule softmax max_computation { arg_0 = f32[] parameter(0) arg_1 = f32[] parameter(1) ROOT maximum = f32[] maximum(arg_0, arg_1) } add_computation { arg_0.1 = f32[] parameter(0) arg_1.1 = f32[] parameter(1) ROOT add = f32[] add(arg_0.1, arg_1.1) } ENTRY main { param_0 = f32[127,125]{1,0} parameter(0) constant_neg_inf = f32[] constant(-inf) reduce = f32[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation broadcast = f32[127,125]{1,0} broadcast(reduce), dimensions={0} subtract = f32[127,125]{1,0} subtract(param_0, broadcast) abs = f32[127,125]{1,0} abs(subtract) exponential = f32[127,125]{1,0} exponential(abs) constant_zero = f32[] constant(0) second_reduce = f32[127]{0} reduce(exponential, constant_zero), dimensions={1}, to_apply=add_computation second_broadcast = f32[127,125]{1,0} broadcast(second_reduce), dimensions={0} ROOT divide = f32[127,125]{1,0} divide(exponential, second_broadcast) } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_TRUE(fusion_rewriter_.Run(module.get()).value()); EXPECT_TRUE(verifier().Run(module.get()).status().ok()); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch( m::Fusion(m::Parameter()).WithPredicate(HasBlockLevelFusionConfig))); } TEST_F(SoftmaxRewriterTritonTest, CanFuseTwoDiamondsWithSecondDiamondProducerEqualToFirstDiamondRoot) { const std::string hlo_string = R"( HloModule softmax max_computation { arg_0 = f32[] parameter(0) arg_1 = f32[] parameter(1) ROOT maximum = f32[] maximum(arg_0, arg_1) } add_computation { arg_0.1 = f32[] parameter(0) arg_1.1 = f32[] parameter(1) ROOT add = f32[] add(arg_0.1, arg_1.1) } ENTRY main { param_0 = f32[127,125]{1,0} parameter(0) constant_neg_inf = f32[] constant(-inf) reduce = f32[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation broadcast = f32[127,125]{1,0} broadcast(reduce), dimensions={0} subtract = f32[127,125]{1,0} subtract(param_0, broadcast) constant_zero = f32[] constant(0) second_reduce = f32[127]{0} reduce(subtract, constant_zero), dimensions={1}, to_apply=add_computation second_broadcast = f32[127,125]{1,0} broadcast(second_reduce), dimensions={0} ROOT divide = f32[127,125]{1,0} divide(subtract, second_broadcast) } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_TRUE(fusion_rewriter_.Run(module.get()).value()); EXPECT_TRUE(verifier().Run(module.get()).status().ok()); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch( m::Fusion(m::Parameter()).WithPredicate(HasBlockLevelFusionConfig))); } TEST_F(SoftmaxRewriterTritonTest, CanFuseDiamondWithTrailingUnaryElementwiseAtTheRoot) { const std::string hlo_string = R"( HloModule softmax max_computation { arg_0 = f32[] parameter(0) arg_1 = f32[] parameter(1) ROOT maximum = f32[] maximum(arg_0, arg_1) } ENTRY main { param_0 = f32[127,125]{1,0} parameter(0) constant_neg_inf = f32[] constant(-inf) reduce = f32[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation broadcast = f32[127,125]{1,0} broadcast(reduce), dimensions={0} subtract = f32[127,125]{1,0} subtract(param_0, broadcast) ROOT abs = f32[127,125]{1,0} abs(subtract) } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_TRUE(fusion_rewriter_.Run(module.get()).value()); EXPECT_TRUE(verifier().Run(module.get()).status().ok()); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch( m::Fusion(m::Parameter()).WithPredicate(HasBlockLevelFusionConfig))); } TEST_F(SoftmaxRewriterTritonTest, CanFuseDiamondWithUnaryElementwisePrefix) { const std::string hlo_string = R"( HloModule softmax max_computation { arg_0 = f32[] parameter(0) arg_1 = f32[] parameter(1) ROOT maximum = f32[] maximum(arg_0, arg_1) } ENTRY main { param_0 = f32[127,125]{1,0} parameter(0) abs = f32[127,125]{1,0} abs(param_0) constant_neg_inf = f32[] constant(-inf) reduce = f32[127]{0} reduce(abs, constant_neg_inf), dimensions={1}, to_apply=max_computation broadcast = f32[127,125]{1,0} broadcast(reduce), dimensions={0} ROOT subtract = f32[127,125]{1,0} subtract(param_0, broadcast) } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_TRUE(fusion_rewriter_.Run(module.get()).value()); EXPECT_TRUE(verifier().Run(module.get()).status().ok()); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch( m::Fusion(m::Parameter()).WithPredicate(HasBlockLevelFusionConfig))); } TEST_F(SoftmaxRewriterTritonTest, CanFuseDiamondWithMultipleBroadcastDimensions) { const std::string hlo_string = R"( HloModule softmax max_computation { arg_0 = f32[] parameter(0) arg_1 = f32[] parameter(1) ROOT maximum = f32[] maximum(arg_0, arg_1) } ENTRY main { param_0 = f32[1,3,125,125]{3,2,1,0} parameter(0) bitcast = f32[3,125,125]{2,1,0} bitcast(f32[1,3,125,125]{3,2,1,0} param_0) constant_neg_inf = f32[] constant(-inf) reduce = f32[3,125]{1,0} reduce(f32[3,125,125]{2,1,0} bitcast, f32[] constant_neg_inf), dimensions={2}, to_apply=max_computation broadcast = f32[1,3,125,125]{3,2,1,0} broadcast(f32[3,125]{1,0} reduce), dimensions={1,2} ROOT subtract = f32[1,3,125,125]{3,2,1,0} subtract(f32[1,3,125,125]{3,2,1,0} param_0, f32[1,3,125,125]{3,2,1,0} broadcast) })"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_TRUE(fusion_rewriter_.Run(module.get()).value()); EXPECT_TRUE(verifier().Run(module.get()).status().ok()); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch( m::Fusion(m::Parameter()).WithPredicate(HasBlockLevelFusionConfig))); } TEST_F(SoftmaxRewriterTritonTest, CanNotFuseSoftmaxDiamondWithParameterReducerIdentity) { const std::string hlo_string = R"( HloModule softmax max_computation { arg_0 = f32[] parameter(0) arg_1 = f32[] parameter(1) ROOT maximum = f32[] maximum(arg_0, arg_1) } ENTRY main { param_0 = f32[127,125]{1,0} parameter(0) identity = f32[] parameter(1) constant_neg_inf = f32[] constant(-inf) reduce = f32[127]{0} reduce(param_0, identity), dimensions={1}, to_apply=max_computation broadcast = f32[127,125]{1,0} broadcast(reduce), dimensions={0} ROOT subtract = f32[127,125]{1,0} subtract(param_0, broadcast) } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_FALSE(fusion_rewriter_.Run(module.get()).value()); } TEST_F(SoftmaxRewriterTritonTest, CanNotFuseSoftmaxDiamondWithTritonIncompatibleReducer) { const std::string hlo_string = R"( HloModule softmax max_computation { arg_0 = f32[] parameter(0) arg_1 = f32[] parameter(1) if_0 = pred[] is-finite(arg_0) c = f32[] convert(if_0) ROOT maximum = f32[] maximum(c, arg_1) } ENTRY main { param_0 = f32[127,125]{1,0} parameter(0) constant_neg_inf = f32[] constant(-inf) reduce = f32[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation broadcast = f32[127,125]{1,0} broadcast(reduce), dimensions={0} ROOT subtract = f32[127,125]{1,0} subtract(param_0, broadcast) } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_FALSE(fusion_rewriter_.Run(module.get()).value()); } TEST_F(SoftmaxRewriterTritonTest, CanFuseSoftmaxDiamondWithLastDimensionBitcastAfterReduce) { const std::string hlo_string = R"( HloModule softmax max_computation { arg_0 = f32[] parameter(0) arg_1 = f32[] parameter(1) ROOT maximum = f32[] maximum(arg_0, arg_1) } ENTRY main { param_0 = f32[3,127,125]{2,1,0} parameter(0) constant_neg_inf = f32[] constant(-inf) reduce = f32[3,127]{1,0} reduce(param_0, constant_neg_inf), dimensions={2}, to_apply=max_computation bitcasted_reduce = f32[381]{0} bitcast(reduce) broadcast = f32[381,125]{1,0} broadcast(bitcasted_reduce), dimensions={0} bitcasted_broadcast = f32[3,127,125]{2,1,0} bitcast(broadcast) ROOT subtract = f32[3,127,125]{2,1,0} subtract(param_0, bitcasted_broadcast) } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_TRUE(fusion_rewriter_.Run(module.get()).value()); EXPECT_TRUE(verifier().Run(module.get()).status().ok()); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch( m::Fusion(m::Parameter()).WithPredicate(HasBlockLevelFusionConfig))); } TEST_F(SoftmaxRewriterTritonTest, CanNotFuseSoftmaxDiamondWithTransposeBitcast) { const std::string hlo_string = R"( HloModule softmax max_computation { arg_0 = f32[] parameter(0) arg_1 = f32[] parameter(1) ROOT maximum = f32[] maximum(arg_0, arg_1) } ENTRY main { param_0 = f32[1,127,125]{2,1,0} parameter(0) constant_neg_inf = f32[] constant(-inf) bitcasted_param_0 = f32[127,1,125]{2,0,1} bitcast(param_0) reduce = f32[127,1]{0,1} reduce(bitcasted_param_0, constant_neg_inf), dimensions={2}, to_apply=max_computation broadcast = f32[127,1,125]{2,0,1} broadcast(reduce), dimensions={0,1} bitcasted_broadcast = f32[1,127,125]{2,1,0} bitcast(broadcast) ROOT subtract = f32[1,127,125]{2,1,0} subtract(param_0, bitcasted_broadcast) } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_FALSE(fusion_rewriter_.Run(module.get()).value()); } TEST_F(SoftmaxRewriterTritonTest, CanNotFuseTwoDiamondsWithDifferentReductionAxisSizeTogether) { const std::string hlo_string = R"( HloModule softmax max_computation { arg_0 = f32[] parameter(0) arg_1 = f32[] parameter(1) ROOT maximum = f32[] maximum(arg_0, arg_1) } add_computation { arg_0.1 = f32[] parameter(0) arg_1.1 = f32[] parameter(1) ROOT add = f32[] add(arg_0.1, arg_1.1) } ENTRY main { param_0 = f32[127,625]{1,0} parameter(0) constant_neg_inf = f32[] constant(-inf) reduce = f32[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation broadcast = f32[127,625]{1,0} broadcast(reduce), dimensions={0} subtract = f32[127,625]{1,0} subtract(param_0, broadcast) bitcasted_subtract = f32[127,5,125] bitcast(subtract) exponential = f32[127,5,125] exponential(bitcasted_subtract) constant_zero = f32[] constant(0) second_reduce = f32[127,5] reduce(exponential, constant_zero), dimensions={2}, to_apply=add_computation second_broadcast = f32[127,5,125] broadcast(second_reduce), dimensions={0,1} ROOT divide = f32[127,5,125] divide(exponential, second_broadcast) } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_TRUE(fusion_rewriter_.Run(module.get()).value()); EXPECT_TRUE(verifier().Run(module.get()).status().ok()); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch( m::Fusion(m::Bitcast(m::Fusion(m::Parameter()) .WithPredicate(HasBlockLevelFusionConfig))) .WithPredicate(HasBlockLevelFusionConfig))); } TEST_F(SoftmaxRewriterTritonTest, CanNotFuseTwoDiamondsWithExtraUsageForFirstDiamondRoot) { const std::string hlo_string = R"( HloModule softmax max_computation { arg_0 = f32[] parameter(0) arg_1 = f32[] parameter(1) ROOT maximum = f32[] maximum(arg_0, arg_1) } add_computation { arg_0.1 = f32[] parameter(0) arg_1.1 = f32[] parameter(1) ROOT add = f32[] add(arg_0.1, arg_1.1) } ENTRY main { param_0 = f32[127,125]{1,0} parameter(0) constant_neg_inf = f32[] constant(-inf) reduce = f32[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation broadcast = f32[127,125]{1,0} broadcast(reduce), dimensions={0} subtract = f32[127,125]{1,0} subtract(param_0, broadcast) exponential = f32[127,125]{1,0} exponential(subtract) constant_zero = f32[] constant(0) second_reduce = f32[127]{0} reduce(exponential, constant_zero), dimensions={1}, to_apply=add_computation second_broadcast = f32[127,125]{1,0} broadcast(second_reduce), dimensions={0} divide = f32[127,125]{1,0} divide(exponential, second_broadcast) ROOT tuple = (f32[127,125]{1,0}, f32[127,125]{1,0}) tuple(divide, subtract) } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_TRUE(fusion_rewriter_.Run(module.get()).value()); EXPECT_TRUE(verifier().Run(module.get()).status().ok()); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(m::Tuple( m::Fusion(m::Fusion()).WithPredicate(HasBlockLevelFusionConfig), m::Fusion(m::Parameter()).WithPredicate(HasBlockLevelFusionConfig)))); } TEST_F(SoftmaxRewriterTritonTest, CanNotFuseTwoDiamondsWithExtraUsageForSecondDiamondProducer) { const std::string hlo_string = R"( HloModule softmax max_computation { arg_0 = f32[] parameter(0) arg_1 = f32[] parameter(1) ROOT maximum = f32[] maximum(arg_0, arg_1) } add_computation { arg_0.1 = f32[] parameter(0) arg_1.1 = f32[] parameter(1) ROOT add = f32[] add(arg_0.1, arg_1.1) } ENTRY main { param_0 = f32[127,125]{1,0} parameter(0) constant_neg_inf = f32[] constant(-inf) reduce = f32[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation broadcast = f32[127,125]{1,0} broadcast(reduce), dimensions={0} subtract = f32[127,125]{1,0} subtract(param_0, broadcast) exponential = f32[127,125]{1,0} exponential(subtract) constant_zero = f32[] constant(0) second_reduce = f32[127]{0} reduce(exponential, constant_zero), dimensions={1}, to_apply=add_computation second_broadcast = f32[127,125]{1,0} broadcast(second_reduce), dimensions={0} divide = f32[127,125]{1,0} divide(exponential, second_broadcast) ROOT tuple = (f32[127,125]{1,0}, f32[127,125]{1,0}) tuple(divide, exponential) } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_TRUE(fusion_rewriter_.Run(module.get()).value()); EXPECT_TRUE(verifier().Run(module.get()).status().ok()); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(m::Tuple( m::Fusion(m::Fusion()).WithPredicate(HasBlockLevelFusionConfig), m::Fusion(m::Parameter()).WithPredicate(HasBlockLevelFusionConfig)))); } TEST_F(SoftmaxRewriterTritonTest, CanFuseSoftmaxDiamondWithTritonIncompatibleProducer) { const std::string hlo_string = R"( HloModule softmax max_computation { arg_0 = f32[] parameter(0) arg_1 = f32[] parameter(1) ROOT maximum = f32[] maximum(arg_0, arg_1) } ENTRY main { param_0 = f16[127,125]{1,0} parameter(0) exponential = f16[127,125] exponential(param_0) convert = f32[127,125] convert(exponential) constant_neg_inf = f32[] constant(-inf) reduce = f32[127]{0} reduce(convert, constant_neg_inf), dimensions={1}, to_apply=max_computation broadcast = f32[127,125]{1,0} broadcast(reduce), dimensions={0} ROOT subtract = f32[127,125]{1,0} subtract(convert, broadcast) })"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_TRUE(fusion_rewriter_.Run(module.get()).value()); EXPECT_TRUE(verifier().Run(module.get()).status().ok()); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Fusion(m::Exp(m::Parameter())) .WithPredicate(HasBlockLevelFusionConfig))); } TEST_F(SoftmaxRewriterTritonTest, CanNotFuseSoftmaxDiamondWithNonFusibleBitcastBetweenReduceAndProducer) { const std::string hlo_string = R"( HloModule softmax max_computation { arg_0 = f32[] parameter(0) arg_1 = f32[] parameter(1) ROOT maximum = f32[] maximum(arg_0, arg_1) } ENTRY main { param_0 = f32[1,127,5,25]{3,2,1,0} parameter(0) bitcast_0 = f32[127,125] bitcast(param_0) bitcast_1 = f32[127,125] bitcast(param_0) constant_neg_inf = f32[] constant(-inf) reduce = f32[127]{0} reduce(bitcast_0, constant_neg_inf), dimensions={1}, to_apply=max_computation broadcast = f32[127,125]{1,0} broadcast(reduce), dimensions={0} ROOT subtract = f32[127,125]{1,0} subtract(bitcast_1, broadcast) } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_FALSE(fusion_rewriter_.Run(module.get()).value()); } TEST_F(SoftmaxRewriterTritonTest, CanFuseSoftmaxDiamondWithBitcastProducerFollowedByBitcastsOnEachUse) { const std::string hlo_string = R"( HloModule softmax max_computation { arg_0 = f32[] parameter(0) arg_1 = f32[] parameter(1) ROOT maximum = f32[] maximum(arg_0, arg_1) } ENTRY main { param_0 = f32[1,1,127,125]{3,2,1,0} parameter(0) bitcast_parent = f32[127,125]{1,0} bitcast(param_0) bitcast_0 = f32[127,125]{1,0} bitcast(bitcast_parent) bitcast_1 = f32[127,125]{1,0} bitcast(bitcast_parent) constant_neg_inf = f32[] constant(-inf) reduce = f32[127]{0} reduce(bitcast_0, constant_neg_inf), dimensions={1}, to_apply=max_computation broadcast = f32[127,125]{1,0} broadcast(reduce), dimensions={0} ROOT subtract = f32[127,125]{1,0} subtract(bitcast_1, broadcast) } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_TRUE(fusion_rewriter_.Run(module.get()).value()); EXPECT_TRUE(verifier().Run(module.get()).status().ok()); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch( m::Fusion(m::Parameter()).WithPredicate(HasBlockLevelFusionConfig))); } TEST_F(SoftmaxRewriterTritonTest, RewriterBailsOutOnPreAmpereCudaGpu) { const std::string hlo_string = R"( HloModule softmax max_computation { arg_0 = f32[] parameter(0) arg_1 = f32[] parameter(1) ROOT maximum = f32[] maximum(arg_0, arg_1) } ENTRY main { param_0 = bf16[127,125]{1,0} parameter(0) param_0_f32 = f32[127,125]{1,0} convert(param_0) constant_neg_inf = f32[] constant(-inf) reduce = f32[127]{0} reduce(param_0_f32, constant_neg_inf), dimensions={1}, to_apply=max_computation broadcast = f32[127,125]{1,0} broadcast(reduce), dimensions={0} ROOT subtract = f32[127,125]{1,0} subtract(param_0_f32, broadcast) })"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_THAT( SoftmaxRewriterTriton( TestGpuDeviceInfo::RTXA6000DeviceInfo( se::CudaComputeCapability{se::CudaComputeCapability::VOLTA, 0}), ShapeSizeBytesFunction()) .Run(module.get()), tsl::testing::StatusIs( tsl::error::FAILED_PRECONDITION, ::testing::HasSubstr("Triton support is only enabled for Ampere GPUs " "(compute capability 8.0) and up, but got"))); } TEST_F(SoftmaxRewriterTritonTest, RewriterSucceedsOnNonCudaGpu) { const std::string hlo_string = R"( HloModule softmax max_computation { arg_0 = f32[] parameter(0) arg_1 = f32[] parameter(1) ROOT maximum = f32[] maximum(arg_0, arg_1) } ENTRY main { param_0 = bf16[127,125]{1,0} parameter(0) param_0_f32 = f32[127,125]{1,0} convert(param_0) constant_neg_inf = f32[] constant(-inf) reduce = f32[127]{0} reduce(param_0_f32, constant_neg_inf), dimensions={1}, to_apply=max_computation broadcast = f32[127,125]{1,0} broadcast(reduce), dimensions={0} ROOT subtract = f32[127,125]{1,0} subtract(param_0_f32, broadcast) })"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_TRUE(SoftmaxRewriterTriton(TestGpuDeviceInfo::AMDMI210DeviceInfo(), ShapeSizeBytesFunction()) .Run(module.get()) .ok()); } TEST_F(SoftmaxRewriterTritonTest, CanFuseBinaryElementwiseProducerIntoDiamondWhenBothOperandsAreTheSame) { const std::string hlo_string = R"( HloModule fusible_diamond max_computation { arg_0 = f32[] parameter(0) arg_1 = f32[] parameter(1) ROOT maximum = f32[] maximum(arg_0, arg_1) } ENTRY main { param_0 = f32[127,125]{1,0} parameter(0) multiply = f32[127,125]{1,0} multiply(param_0, param_0) constant_neg_inf = f32[] constant(-inf) reduce = f32[127]{0} reduce(multiply, constant_neg_inf), dimensions={1}, to_apply=max_computation broadcast = f32[127,125]{1,0} broadcast(reduce), dimensions={0} ROOT subtract = f32[127,125]{1,0} subtract(multiply, broadcast) })"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_TRUE(fusion_rewriter_.Run(module.get()).value()); EXPECT_TRUE(verifier().Run(module.get()).status().ok()); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch( m::Fusion(m::Parameter()).WithPredicate(HasBlockLevelFusionConfig))); } TEST_F( SoftmaxRewriterTritonTest, CanFuseIntermediateBinaryElementwiseWithinDiamondWhenBothOperandsAreTheSame) { const std::string hlo_string = R"( HloModule fusible_diamond max_computation { arg_0 = f32[] parameter(0) arg_1 = f32[] parameter(1) ROOT maximum = f32[] maximum(arg_0, arg_1) } ENTRY main { param_0 = f32[127,125]{1,0} parameter(0) constant_neg_inf = f32[] constant(-inf) reduce = f32[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation multiply = f32[127]{0} multiply(reduce, reduce) broadcast = f32[127,125]{1,0} broadcast(multiply), dimensions={0} ROOT subtract = f32[127,125]{1,0} subtract(param_0, broadcast) } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_TRUE(fusion_rewriter_.Run(module.get()).value()); EXPECT_TRUE(verifier().Run(module.get()).status().ok()); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch( m::Fusion(m::Parameter()).WithPredicate(HasBlockLevelFusionConfig))); } TEST_F(SoftmaxRewriterTritonTest, CanFuseBinaryElementwiseWhenBothOperandsAreTheSameBetweenDiamonds) { const std::string hlo_string = R"( HloModule fusible_diamonds max_computation { arg_0 = f32[] parameter(0) arg_1 = f32[] parameter(1) ROOT maximum = f32[] maximum(arg_0, arg_1) } add_computation { arg_0.1 = f32[] parameter(0) arg_1.1 = f32[] parameter(1) ROOT add = f32[] add(arg_0.1, arg_1.1) } ENTRY main { param_0 = f32[127,125]{1,0} parameter(0) constant_neg_inf = f32[] constant(-inf) reduce = f32[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation broadcast = f32[127,125]{1,0} broadcast(reduce), dimensions={0} subtract = f32[127,125]{1,0} subtract(param_0, broadcast) multiply = f32[127,125]{1,0} multiply(subtract, subtract) constant_zero = f32[] constant(0) second_reduce = f32[127]{0} reduce(multiply, constant_zero), dimensions={1}, to_apply=add_computation second_broadcast = f32[127,125]{1,0} broadcast(second_reduce), dimensions={0} ROOT subtract_second = f32[127,125]{1,0} subtract(multiply, second_broadcast) } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_TRUE(fusion_rewriter_.Run(module.get()).value()); EXPECT_TRUE(verifier().Run(module.get()).status().ok()); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch( m::Fusion(m::Parameter()).WithPredicate(HasBlockLevelFusionConfig))); } TEST_F(SoftmaxRewriterTritonTest, CanFuseBinaryElementwiseConsumerWhereBothOperandsAreTheSameIntoDiamond) { const std::string hlo_string = R"( HloModule fusible_diamond max_computation { arg_0 = f32[] parameter(0) arg_1 = f32[] parameter(1) ROOT maximum = f32[] maximum(arg_0, arg_1) } add_computation { arg_0.1 = f32[] parameter(0) arg_1.1 = f32[] parameter(1) ROOT add = f32[] add(arg_0.1, arg_1.1) } ENTRY main { param_0 = f32[127,125]{1,0} parameter(0) constant_neg_inf = f32[] constant(-inf) reduce = f32[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation broadcast = f32[127,125]{1,0} broadcast(reduce), dimensions={0} subtract = f32[127,125]{1,0} subtract(param_0, broadcast) ROOT multiply = f32[127,125]{1,0} multiply(subtract, subtract) } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_TRUE(fusion_rewriter_.Run(module.get()).value()); EXPECT_TRUE(verifier().Run(module.get()).status().ok()); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch( m::Fusion(m::Parameter()).WithPredicate(HasBlockLevelFusionConfig))); } TEST_F( SoftmaxRewriterTritonTest, DoesNotFuseIntermediateBinaryElementwiseWithBothSplatOperandsIntoDiamond) { const std::string hlo_string = R"( HloModule nonfusible_splat max_computation { arg_0 = f32[] parameter(0) arg_1 = f32[] parameter(1) ROOT maximum = f32[] maximum(arg_0, arg_1) } ENTRY main { constant_0 = f32[] constant(0.333333343) splat_0 = f32[127,125]{1,0} broadcast(constant_0), dimensions={} constant_1 = f32[] constant(0.66666) splat_1 = f32[127,125]{1,0} broadcast(constant_1), dimensions={} param_0 = f32[127,125]{1,0} parameter(0) multiply_splats = f32[127,125]{1,0} multiply(splat_0, splat_1) multiply_splat_param = f32[127,125]{1,0} multiply(multiply_splats, param_0) constant_neg_inf = f32[] constant(-inf) reduce = f32[127]{0} reduce(multiply_splat_param, constant_neg_inf), dimensions={1}, to_apply=max_computation broadcast = f32[127,125]{1,0} broadcast(reduce), dimensions={0} ROOT subtract = f32[127,125]{1,0} subtract(param_0, broadcast) } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_FALSE(fusion_rewriter_.Run(module.get()).value()); } TEST_F( SoftmaxRewriterTritonTest, DoesNotFuseIntermediateBinaryElementwiseWithSameSplatOperandsIntoDiamond) { const std::string hlo_string = R"( HloModule nonfusible_splat_diamond max_computation { arg_0 = f32[] parameter(0) arg_1 = f32[] parameter(1) ROOT maximum = f32[] maximum(arg_0, arg_1) } ENTRY main { constant_0 = f32[] constant(0.333333343) splat = f32[127,125]{1,0} broadcast(constant_0), dimensions={} param_0 = f32[127,125]{1,0} parameter(0) multiply = f32[127,125]{1,0} multiply(splat, splat) add = f32[127,125]{1,0} add(param_0, multiply) constant_neg_inf = f32[] constant(-inf) reduce = f32[127]{0} reduce(add, constant_neg_inf), dimensions={1}, to_apply=max_computation broadcast = f32[127,125]{1,0} broadcast(reduce), dimensions={0} ROOT subtract = f32[127,125]{1,0} subtract(param_0, broadcast) } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); SoftmaxRewriterTriton fusion_rewriter(device_info_, ShapeSizeBytesFunction()); EXPECT_FALSE(fusion_rewriter_.Run(module.get()).value()); } TEST_F(SoftmaxRewriterTritonTest, CanFuseRMSNormDiamond) { const std::string hlo_string = R"( HloModule rms_norm add_computation { arg_0 = f32[] parameter(0) arg_1 = f32[] parameter(1) ROOT add.1 = f32[] add(arg_0, arg_1) } ENTRY main.30 { param_0 = f32[10,10,10,128]{3,2,1,0} parameter(0) multiply_param = f32[10,10,10,128]{3,2,1,0} multiply(param_0, param_0) constant_0 = f32[] constant(0) reduce = f32[10,10,10]{2,1,0} reduce(multiply_param, constant_0), dimensions={3}, to_apply=add_computation constant_1 = f32[] constant(0.333333343) splat = f32[10,10,10]{2,1,0} broadcast(constant_1), dimensions={} multiply_splat = f32[10,10,10]{2,1,0} multiply(reduce, splat) epsilon = f32[] constant(1e-06) splat_epsilon = f32[10,10,10]{2,1,0} broadcast(epsilon), dimensions={} add = f32[10,10,10]{2,1,0} add(multiply_splat, splat_epsilon) rsqrt = f32[10,10,10]{2,1,0} rsqrt(add) broadcast = f32[10,10,10,128]{3,2,1,0} broadcast(rsqrt), dimensions={0,1,2} ROOT multiply = f32[10,10,10,128]{3,2,1,0} multiply(param_0, broadcast) } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_TRUE(fusion_rewriter_.Run(module.get()).value()); EXPECT_TRUE(verifier().Run(module.get()).status().ok()); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch( m::Fusion(m::Parameter()).WithPredicate(HasBlockLevelFusionConfig))); } TEST_F( SoftmaxRewriterTritonTest, CanFuseAndEmitBinaryElementwiseWhereTheFirstOperandIsASplatConstantBetweenDiamonds) { const std::string hlo_string = R"( HloModule fusible_diamonds add_computation { arg_0.1 = f32[] parameter(0) arg_1.1 = f32[] parameter(1) ROOT add = f32[] add(arg_0.1, arg_1.1) } ENTRY main { param_0 = f32[127,125]{1,0} parameter(0) constant_neg_inf = f32[] constant(-inf) reduce = f32[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=add_computation broadcast = f32[127,125]{1,0} broadcast(reduce), dimensions={0} subtract = f32[127,125]{1,0} subtract(param_0, broadcast) constant = f32[] constant(0.333333343) broadcast_splat = f32[127,125]{1,0} broadcast(constant), dimensions={} multiply = f32[127,125]{1,0} multiply(broadcast_splat, subtract) constant_zero = f32[] constant(0) second_reduce = f32[127]{0} reduce(multiply, constant_zero), dimensions={1}, to_apply=add_computation second_broadcast = f32[127,125]{1,0} broadcast(second_reduce), dimensions={0} ROOT second_subtract = f32[127,125]{1,0} subtract(multiply, second_broadcast) } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_TRUE(fusion_rewriter_.Run(module.get()).value()); EXPECT_TRUE(verifier().Run(module.get()).status().ok()); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch( m::Fusion(m::Parameter()).WithPredicate(HasBlockLevelFusionConfig))); } TEST_F( SoftmaxRewriterTritonTest, CanFuseAndEmitBinaryElementwiseWhereTheSecondOperandIsASplatConstantBetweenDiamonds) { const std::string hlo_string = R"( HloModule fusible_diamonds add_computation { arg_0.1 = f32[] parameter(0) arg_1.1 = f32[] parameter(1) ROOT add = f32[] add(arg_0.1, arg_1.1) } ENTRY main { param_0 = f32[127,125]{1,0} parameter(0) constant_neg_inf = f32[] constant(-inf) reduce = f32[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=add_computation broadcast = f32[127,125]{1,0} broadcast(reduce), dimensions={0} subtract = f32[127,125]{1,0} subtract(param_0, broadcast) constant = f32[] constant(0.333333343) broadcast_splat = f32[127,125]{1,0} broadcast(constant), dimensions={} multiply = f32[127,125]{1,0} multiply(subtract, broadcast_splat) constant_zero = f32[] constant(0) second_reduce = f32[127]{0} reduce(multiply, constant_zero), dimensions={1}, to_apply=add_computation second_broadcast = f32[127,125]{1,0} broadcast(second_reduce), dimensions={0} ROOT second_subtract = f32[127,125]{1,0} subtract(multiply, second_broadcast) } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_TRUE(fusion_rewriter_.Run(module.get()).value()); EXPECT_TRUE(verifier().Run(module.get()).status().ok()); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch( m::Fusion(m::Parameter()).WithPredicate(HasBlockLevelFusionConfig))); } TEST_F( SoftmaxRewriterTritonTest, CanFuseBinaryElementwiseWhereTheFirstOperandIsASplatConstantWithinDiamond) { const std::string hlo_string = R"( HloModule fusible_diamond max_computation { arg_0 = f32[] parameter(0) arg_1 = f32[] parameter(1) ROOT maximum = f32[] maximum(arg_0, arg_1) } ENTRY main { param_0 = f32[127,125]{1,0} parameter(0) constant_neg_inf = f32[] constant(-inf) reduce = f32[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation constant = f32[] constant(0.333333343) broadcast_splat = f32[127]{0} broadcast(constant), dimensions={} multiply = f32[127]{0} multiply(broadcast_splat, reduce) broadcast = f32[127,125]{1,0} broadcast(multiply), dimensions={0} ROOT subtract = f32[127,125]{1,0} subtract(param_0, broadcast) } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_TRUE(fusion_rewriter_.Run(module.get()).value()); EXPECT_TRUE(verifier().Run(module.get()).status().ok()); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch( m::Fusion(m::Parameter()).WithPredicate(HasBlockLevelFusionConfig))); } TEST_F(SoftmaxRewriterTritonTest, CanFuseBinaryElementwiseConsumerWhereTheFirstOperandIsASplatConstant) { const std::string hlo_string = R"( HloModule fusible_diamond add_computation { arg_0.1 = f32[] parameter(0) arg_1.1 = f32[] parameter(1) ROOT add = f32[] add(arg_0.1, arg_1.1) } ENTRY main { param_0 = f32[127,125]{1,0} parameter(0) constant_neg_inf = f32[] constant(-inf) reduce = f32[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=add_computation broadcast = f32[127,125]{1,0} broadcast(reduce), dimensions={0} subtract = f32[127,125]{1,0} subtract(param_0, broadcast) constant = f32[] constant(0.333333343) broadcast_splat = f32[127,125]{1,0} broadcast(constant), dimensions={} ROOT multiply = f32[127,125]{1,0} multiply(broadcast_splat, subtract) })"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_TRUE(fusion_rewriter_.Run(module.get()).value()); EXPECT_TRUE(verifier().Run(module.get()).status().ok()); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch( m::Fusion(m::Parameter()).WithPredicate(HasBlockLevelFusionConfig))); } TEST_F(SoftmaxRewriterTritonTest, CanFuseBinaryElementwiseOperationWhereOneOperandIsASharedSplatProducer) { const std::string hlo_string = R"( HloModule nonfusible_diamond max_computation { arg_0 = f32[] parameter(0) arg_1 = f32[] parameter(1) ROOT max = f32[] maximum(arg_0, arg_1) } ENTRY main { param_0 = f32[127,125]{1,0} parameter(0) constant_2 = f32[] constant(0.333333343) broadcast_splat = f32[127,125]{1,0} broadcast(constant_2), dimensions={} param_1 = f32[127,125]{1,0} parameter(1) multiply_splat = f32[127,125]{1,0} multiply(broadcast_splat, param_1) multiply = f32[127,125]{1,0} multiply(param_0, broadcast_splat) constant_neg_inf = f32[] constant(-inf) reduce = f32[127]{0} reduce(multiply, constant_neg_inf), dimensions={1}, to_apply=max_computation broadcast = f32[127,125]{1,0} broadcast(reduce), dimensions={0} ROOT subtract = f32[127,125]{1,0} subtract(param_0, broadcast) })"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_TRUE(fusion_rewriter_.Run(module.get()).value()); EXPECT_TRUE(verifier().Run(module.get()).status().ok()); VLOG(2) << module->ToString(); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch( m::Fusion(m::Parameter()).WithPredicate(HasBlockLevelFusionConfig))); } TEST_F( SoftmaxRewriterTritonTest, DoesNotFuseBinaryElementwiseOperationWhereFirstOperandIsASplatAndSecondOperandIsASharedSplatProducer) { const std::string hlo_string = R"( HloModule nonfusible_diamond add_computation { arg_0.1 = f32[] parameter(0) arg_1.1 = f32[] parameter(1) ROOT add = f32[] add(arg_0.1, arg_1.1) } ENTRY main { param_0 = f32[127,125]{1,0} parameter(0) constant_2 = f32[] constant(0.333333343) broadcast_splat_shared = f32[127,125]{1,0} broadcast(constant_2), dimensions={} param_1 = f32[127,125]{1,0} parameter(1) multiply_splat_shared = f32[127,125]{1,0} multiply(broadcast_splat_shared, param_1) constant_3 = f32[] constant(0.5) broadcast_splat = f32[127,125]{1,0} broadcast(constant_3), dimensions={} multiply_splat = f32[127,125]{1,0} multiply(broadcast_splat, broadcast_splat_shared) multiply = f32[127,125]{1,0} multiply(param_0, multiply_splat) constant_neg_inf = f32[] constant(-inf) reduce = f32[127]{0} reduce(multiply, constant_neg_inf), dimensions={1}, to_apply=add_computation broadcast = f32[127,125]{1,0} broadcast(reduce), dimensions={0} ROOT subtract = f32[127,125]{1,0} subtract(param_0, broadcast) })"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_FALSE(fusion_rewriter_.Run(module.get()).value()); } TEST_F(SoftmaxRewriterTritonTest, FusionDecisionIsCapturedExplicitly) { const std::string hlo_string = R"( HloModule softmax max_computation { arg_0 = f32[] parameter(0) arg_1 = f32[] parameter(1) ROOT maximum = f32[] maximum(arg_0, arg_1) } ENTRY main { param_0 = f32[127,125]{1,0} parameter(0) identity_f8 = f8e5m2[] parameter(1) identity = f32[] convert(identity_f8) reduce = f32[127]{0} reduce(param_0, identity), dimensions={1}, to_apply=max_computation broadcast = f32[127,125]{1,0} broadcast(reduce), dimensions={0} ROOT subtract = f32[127,125]{1,0} subtract(param_0, broadcast) } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); SoftmaxRewriterTriton softmax_rewriter_triton(device_info_, ShapeSizeBytesFunction()); int unmatched = 0, matched = 0; for (HloInstruction* instruction : module->entry_computation()->MakeInstructionPostOrder()) { DiamondMatchingDecision decision = softmax_rewriter_triton.MatchesTritonCompatibleClosedReductionDiamond( instruction); if (std::holds_alternative<FusionDecision>(decision)) { std::string actual_decision = std::get<FusionDecision>(decision).Explain(); EXPECT_THAT( actual_decision, AnyOf( HasSubstr("Root is not elementwise binary"), HasSubstr("identity is not a constant or a supported convert"))); unmatched++; } else { matched++; } } EXPECT_EQ(unmatched, 6); EXPECT_EQ(matched, 0); } TEST_F( SoftmaxRewriterTritonTest, FusesBinaryElementwiseIfIntermediateDiamondOpWithBroadcastAlongReductionDimAsParameter) { const std::string hlo_string = R"( HloModule h1 add_computation { y = f32[] parameter(1) x = f32[] parameter(0) ROOT add = f32[] add(x, y) } ENTRY main { p0 = f32[32]{0} parameter(0) p1 = f32[32,16]{1,0} parameter(1) c = f32[] constant(0) r0 = f32[32]{0} reduce(p1, c), dimensions={1}, to_apply=add_computation b0 = f32[32,16]{1,0} broadcast(r0), dimensions={0} b1 = f32[32,16]{1,0} broadcast(p0), dimensions={0} add0 = f32[32,16]{1,0} add(b1, p1) ROOT add1 = f32[32,16]{1,0} add(add0, b0) })"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_TRUE(fusion_rewriter_.Run(module.get()).value()); } TEST_F( SoftmaxRewriterTritonTest, FusesBinaryElementwiseIfIntermediateDiamondOpWithBroadcastAlongBatchDimAsParameter) { const std::string hlo_string = R"( HloModule h1 add_computation { y = f32[] parameter(1) x = f32[] parameter(0) ROOT add = f32[] add(x, y) } ENTRY main { p0 = f32[16]{0} parameter(0) p1 = f32[32,16]{1,0} parameter(1) c = f32[] constant(0) r0 = f32[32]{0} reduce(p1, c), dimensions={1}, to_apply=add_computation b0 = f32[32,16]{1,0} broadcast(r0), dimensions={0} b1 = f32[32,16]{1,0} broadcast(p0), dimensions={1} add0 = f32[32,16]{1,0} add(b1, p1) ROOT add1 = f32[32,16]{1,0} add(add0, b0) })"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_TRUE(fusion_rewriter_.Run(module.get()).value()); } TEST_F( SoftmaxRewriterTritonTest, FusesBinaryElementwiseIfIntermediateDiamondOpWithMultiDimTensorBroadcastAlongBatchDimAsParameter) { const std::string hlo_string = R"( HloModule h1 add_computation { y = f32[] parameter(1) x = f32[] parameter(0) ROOT add = f32[] add(x, y) } ENTRY main { p0 = f32[32,16]{1,0} parameter(0) p1 = f32[64,32,16]{2,1,0} parameter(1) c = f32[] constant(0) r0 = f32[64,32]{1,0} reduce(p1, c), dimensions={2}, to_apply=add_computation b0 = f32[64,32,16]{2,1,0} broadcast(r0), dimensions={0,1} b1 = f32[64,32,16]{2,1,0} broadcast(p0), dimensions={1,2} add0 = f32[64,32,16]{2,1,0} add(b1, p1) ROOT add1 = f32[64,32,16]{2,1,0} add(add0, b0) })"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_TRUE(fusion_rewriter_.Run(module.get()).value()); } TEST_F( SoftmaxRewriterTritonTest, FusesBinaryElementwiseIfIntermediateDiamondOpWithZeroDimTensorBroadcastAsParameter) { const std::string hlo_string = R"( HloModule h1 add_computation { y = f32[] parameter(1) x = f32[] parameter(0) ROOT add = f32[] add(x, y) } ENTRY main { parameter_0 = f32[] parameter(0) parameter_1 = f32[64,32,16]{2,1,0} parameter(1) c = f32[] constant(0) reduce_0 = f32[64,32]{1,0} reduce(parameter_1, c), dimensions={2}, to_apply=add_computation broadcast_0 = f32[64,32,16]{2,1,0} broadcast(reduce_0), dimensions={0,1} broadcast_1 = f32[64,32,16]{2,1,0} broadcast(parameter_0), dimensions={} add_0 = f32[64,32,16]{2,1,0} add(broadcast_1, parameter_1) ROOT add1 = f32[64,32,16]{2,1,0} add(add_0, broadcast_0) })"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_TRUE(fusion_rewriter_.Run(module.get()).value()); } TEST_F( SoftmaxRewriterTritonTest, FusesBinaryElementwiseIfIntermediateDiamondOpIsBroadcastOf1DParameterAlongNonReductionDimensions) { const std::string hlo_string = R"( HloModule h1 add_computation { y = f32[] parameter(1) x = f32[] parameter(0) ROOT add = f32[] add(x, y) } ENTRY main { parameter_0 = f32[16] parameter(0) parameter_1 = f32[64,32,16]{2,1,0} parameter(1) c = f32[] constant(0) reduce_0 = f32[64,32]{1,0} reduce(parameter_1, c), dimensions={2}, to_apply=add_computation broadcast_0 = f32[64,32,16]{2,1,0} broadcast(reduce_0), dimensions={0,1} broadcast_1 = f32[64,32,16]{2,1,0} broadcast(parameter_0), dimensions={2} add_0 = f32[64,32,16]{2,1,0} add(broadcast_1, parameter_1) ROOT add1 = f32[64,32,16]{2,1,0} add(add_0, broadcast_0) })"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_TRUE(fusion_rewriter_.Run(module.get()).value()); } TEST_F( SoftmaxRewriterTritonTest, DoesNotFuseBinaryElementwiseIfIntermediateDiamondOpIsBroadcastOf1DParameterAlongBothBatchAndReductionDimensions) { const std::string hlo_string = R"( HloModule h1 add_computation { y = f32[] parameter(1) x = f32[] parameter(0) ROOT add = f32[] add(x, y) } ENTRY main { parameter_0 = f32[64] parameter(0) parameter_1 = f32[64,32,16]{2,1,0} parameter(1) c = f32[] constant(0) reduce_0 = f32[64,32]{1,0} reduce(parameter_1, c), dimensions={2}, to_apply=add_computation broadcast_0 = f32[64,32,16]{2,1,0} broadcast(reduce_0), dimensions={0,1} broadcast_1 = f32[64,32,16]{2,1,0} broadcast(parameter_0), dimensions={0} add_0 = f32[64,32,16]{2,1,0} add(broadcast_1, parameter_1) ROOT add1 = f32[64,32,16]{2,1,0} add(add_0, broadcast_0) })"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_FALSE(fusion_rewriter_.Run(module.get()).value()); } TEST_F( SoftmaxRewriterTritonTest, DoesNotFuseBinaryElementwiseIfIntermediateDiamondOpWithBroadcastAlongBatchAndReductionDimAsParameter) { const std::string hlo_string = R"( HloModule h1 add_computation { y = f32[] parameter(1) x = f32[] parameter(0) ROOT add = f32[] add(x, y) } ENTRY main { p0 = f32[8]{0} parameter(0) p1 = f32[32,8,16]{2,1,0} parameter(1) c = f32[] constant(0) r0 = f32[32,8]{1,0} reduce(p1, c), dimensions={2}, to_apply=add_computation b0 = f32[32,8,16]{2,1,0} broadcast(r0), dimensions={0,1} b1 = f32[32,8,16]{2,1,0} broadcast(p0), dimensions={1} add0 = f32[32,8,16]{2,1,0} add(b1, p1) ROOT add1 = f32[32,8,16]{2,1,0} add(add0, b0) })"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_FALSE(fusion_rewriter_.Run(module.get()).value()); } TEST_F( SoftmaxRewriterTritonTest, DoesNotFuseBinaryElementwiseIfIntermediateDiamondOpWithPartialBroadcastToBatchDim) { const std::string hlo_string = R"( HloModule h1 add_computation { y = f32[] parameter(1) x = f32[] parameter(0) ROOT add = f32[] add(x, y) } ENTRY main { p0 = f32[16,64]{1,0} parameter(0) p1 = f32[8,16,32,64]{3,2,1,0} parameter(1) c = f32[] constant(0) r0 = f32[8,16,32]{2,1,0} reduce(p1, c), dimensions={3}, to_apply=add_computation b0 = f32[8,16,32,64]{3,2,1,0} broadcast(r0), dimensions={0,1,2} b1 = f32[8,16,32,64]{3,2,1,0} broadcast(p0), dimensions={1,3} add0 = f32[8,16,32,64]{3,2,1,0} add(b1, p1) ROOT add1 = f32[8,16,32,64]{3,2,1,0} add(add0, b0) } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_FALSE(fusion_rewriter_.Run(module.get()).value()); } TEST_F( SoftmaxRewriterTritonTest, DoesNotFuseBinaryElementwiseIfIntermediateDiamondOpWithMultiDimBroadcastAlongBatchDimAsParameter) { const std::string hlo_string = R"( HloModule h1 add_computation { y = f32[] parameter(1) x = f32[] parameter(0) ROOT add = f32[] add(x, y) } ENTRY main { p0 = f32[32,16]{1,0} parameter(0) p1 = f32[128,64,32,16]{3,2,1,0} parameter(1) c = f32[] constant(0) r0 = f32[128,64,32]{2,1,0} reduce(p1, c), dimensions={3}, to_apply=add_computation b0 = f32[128,64,32,16]{3,2,1,0} broadcast(r0), dimensions={0,1,2} b1 = f32[128,64,32,16]{3,2,1,0} broadcast(p0), dimensions={2,3} add0 = f32[128,64,32,16]{3,2,1,0} add(b1, p1) ROOT add1 = f32[128,64,32,16]{3,2,1,0} add(add0, b0) })"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_FALSE(fusion_rewriter_.Run(module.get()).value()); } TEST_F(SoftmaxRewriterTritonTest, DoesNotFuseIfResultingFusionCannotBeTiled) { const std::string hlo_string = R"( HloModule softmax max_computation { arg_0 = f32[] parameter(0) arg_1 = f32[] parameter(1) ROOT maximum = f32[] maximum(arg_0, arg_1) } ENTRY main { param_0 = f32[8,2097152] parameter(0) constant_neg_inf = f32[] constant(-inf) reduce = f32[8]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation broadcast = f32[8,2097152] broadcast(reduce), dimensions={0} ROOT subtract = f32[8,2097152] subtract(param_0, broadcast) } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_FALSE(fusion_rewriter_.Run(module.get()).value()); } TEST_F(SoftmaxRewriterTritonTest, DoNotFuseNormalizationWithVeryLongRowsIfProfitabilityCheckIsEnabled) { const std::string hlo_string = R"( HloModule softmax max_computation { arg_0 = f32[] parameter(0) arg_1 = f32[] parameter(1) ROOT maximum = f32[] maximum(arg_0, arg_1) } ENTRY main { param_0 = f32[8,262144] parameter(0) constant_neg_inf = f32[] constant(-inf) reduce = f32[8]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation broadcast = f32[8,262144] broadcast(reduce), dimensions={0} ROOT subtract = f32[8,262144] subtract(param_0, broadcast) })"; { SoftmaxRewriterTriton fusion_rewriter_without_cost_model{ device_info_, ShapeSizeBytesFunction(), false}; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_TRUE(fusion_rewriter_without_cost_model.Run(module.get()).value()); EXPECT_TRUE(verifier().Run(module.get()).status().ok()); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Fusion(m::Parameter()) .WithPredicate(HasBlockLevelFusionConfig))); } { SoftmaxRewriterTriton fusion_rewriter_with_cost_model{ device_info_, ShapeSizeBytesFunction(), true}; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_FALSE(fusion_rewriter_with_cost_model.Run(module.get()).value()); } } TEST_F(SoftmaxRewriterTritonTest, DoesNotCrashOnScalarBroadcast) { const std::string hlo_string = R"( HloModule softmax max_computation { arg_0 = f32[] parameter(0) arg_1 = f32[] parameter(1) ROOT maximum = f32[] maximum(arg_0, arg_1) } ENTRY main { param_0 = f32[127,125]{1,0} parameter(0) param_1 = f32[] parameter(1) broadcast_from_scalar = f32[127] broadcast(param_1), dimensions={} constant_neg_inf = f32[] constant(-inf) reduce = f32[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation add = f32[127]{0} add(broadcast_from_scalar, reduce) broadcast = f32[127,125]{1,0} broadcast(add), dimensions={0} subtract = f32[127,125]{1,0} subtract(param_0, broadcast) ROOT abs = f32[127,125]{1,0} abs(subtract) } )"; auto module = ParseAndReturnVerifiedModule(hlo_string).value(); EXPECT_TRUE(fusion_rewriter_.Run(module.get()).value()); EXPECT_TRUE(verifier().Run(module.get()).status().ok()); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Fusion(m::Parameter(), m::Parameter()) .WithPredicate(HasBlockLevelFusionConfig))); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/softmax_rewriter_triton.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/softmax_rewriter_triton_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
853f3f85-8a15-440b-a243-417ea3be940b
cpp
tensorflow/tensorflow
priority_fusion
third_party/xla/xla/service/gpu/transforms/priority_fusion.cc
third_party/xla/xla/service/gpu/transforms/priority_fusion_test.cc
#include "xla/service/gpu/transforms/priority_fusion.h" #include <cstddef> #include <cstdint> #include <functional> #include <iterator> #include <map> #include <memory> #include <string> #include <utility> #include <variant> #include <vector> #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/meta/type_traits.h" #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" #include "absl/strings/string_view.h" #include "absl/synchronization/mutex.h" #include "absl/time/time.h" #include "llvm/ADT/STLExtras.h" #include "mlir/IR/MLIRContext.h" #include "xla/debug_options_flags.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/dump.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/fusion_deduplication_cache.h" #include "xla/service/gpu/fusion_process_dump.pb.h" #include "xla/service/gpu/fusions/triton/triton_support.h" #include "xla/service/gpu/gpu_fusible.h" #include "xla/service/gpu/hlo_fusion_analysis.h" #include "xla/service/gpu/hlo_traversal.h" #include "xla/service/gpu/ir_emission_utils.h" #include "xla/service/gpu/model/fusion_analysis_cache.h" #include "xla/service/gpu/model/gpu_hlo_cost_analysis.h" #include "xla/service/gpu/model/gpu_indexing_performance_model.h" #include "xla/service/gpu/model/gpu_performance_model.h" #include "xla/service/gpu/model/gpu_performance_model_base.h" #include "xla/service/gpu/model/tiled_hlo_computation.h" #include "xla/service/gpu/model/triton_emitter_constraints.h" #include "xla/service/hlo_graph_dumper.h" #include "xla/service/instruction_fusion.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/stream_executor/device_description.h" #include "xla/xla_data.pb.h" #include "tsl/platform/blocking_counter.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" #include "tsl/platform/status.h" #include "tsl/platform/statusor.h" #include "tsl/platform/threadpool.h" namespace xla { namespace gpu { namespace { bool IsFusible(const HloInstruction& instr) { if (!instr.IsFusible()) { return false; } if (instr.IsElementwise()) { return true; } switch (instr.opcode()) { case HloOpcode::kFusion: return instr.fusion_kind() != HloInstruction::FusionKind::kCustom; case HloOpcode::kCopy: case HloOpcode::kIota: case HloOpcode::kConstant: case HloOpcode::kReduce: case HloOpcode::kBitcast: case HloOpcode::kBroadcast: case HloOpcode::kConcatenate: case HloOpcode::kDynamicSlice: case HloOpcode::kDynamicUpdateSlice: case HloOpcode::kGather: case HloOpcode::kPad: case HloOpcode::kReduceWindow: case HloOpcode::kReshape: case HloOpcode::kReverse: case HloOpcode::kScatter: case HloOpcode::kSlice: case HloOpcode::kTranspose: return true; default: return false; } } GpuBackendConfig GetTritonGpuBackendConfig( const BlockLevelParameters& block_level_parameters) { GpuBackendConfig gpu_backend_config; gpu_backend_config.mutable_fusion_backend_config()->set_kind( std::string(kTritonFusionKind)); *gpu_backend_config.mutable_fusion_backend_config() ->mutable_block_level_fusion_config() = block_level_parameters.ToBlockLevelFusionConfig(); return gpu_backend_config; } class PriorityFusionQueue { using Priority = absl::Duration; using CanFuseCallback = std::function<FusionDecision( HloInstruction* , int64_t )>; public: PriorityFusionQueue(HloComputation* computation, const GpuHloCostAnalysis::Options& cost_analysis_options, const se::DeviceDescription* device_info, FusionProcessDumpProto* fusion_process_dump, tsl::thread::ThreadPool* thread_pool, mlir::MLIRContext* mlir_context, HloFusionAnalysisCache& fusion_analysis_cache, FusionDeduplicationCache& fusion_deduplication_cache, bool triton_softmax_priority_fusion_enabled) : computation_(computation), device_info_(device_info), cost_analysis_(cost_analysis_options, *device_info), gpu_indexing_performance_model_(device_info, &fusion_analysis_cache, cost_analysis_options.shape_size, mlir_context), fusion_process_dump_(fusion_process_dump), thread_pool_(thread_pool), mlir_context_(mlir_context), fusion_analysis_cache_(fusion_analysis_cache), fusion_deduplication_cache_(fusion_deduplication_cache), triton_softmax_priority_fusion_enabled_( triton_softmax_priority_fusion_enabled) { VLOG(2) << "Running full HLO cost analysis for " << computation_->name(); TF_CHECK_OK(computation_->Accept(&cost_analysis_)); dump_fusion_visualization_ = computation->parent() ->config() .debug_options() .xla_dump_fusion_visualization(); std::vector<HloInstruction*> instructions; for (auto* instruction : computation->MakeInstructionPostOrder()) { TF_CHECK_OK(UpdatePerformanceModelCache(instruction)); if (instruction->opcode() == HloOpcode::kParameter || instruction->user_count() == 0 || !instruction->IsFusible() || instruction->opcode() == HloOpcode::kTuple || instruction->opcode() == HloOpcode::kGetTupleElement) { continue; } instructions.push_back(instruction); } ComputeAndSetPriorities(instructions); } void ComputeAndSetPriorities( const std::vector<HloInstruction*>& instructions) { std::vector<Priority> priorities = ComputePriorities(instructions); for (auto [instruction, priority] : llvm::zip(instructions, priorities)) { auto key = std::make_pair(priority, instruction->unique_id()); auto reverse_it = reverse_map_.find(instruction); if (reverse_it != reverse_map_.end()) { const PriorityQueue::iterator& queue_it = reverse_it->second; if (key == queue_it->first) { continue; } producer_priority_queue_.erase(queue_it); reverse_map_.erase(reverse_it); } if (priority < absl::ZeroDuration()) { continue; } auto emplace_result = producer_priority_queue_.emplace(key, instruction); reverse_map_.emplace(instruction, emplace_result.first); } } std::vector<Priority> ComputePriorities( const std::vector<HloInstruction*>& instructions) { auto schedule_or_run = [this](std::function<void()> fn) { if (thread_pool_) { thread_pool_->Schedule(std::move(fn)); } else { fn(); } }; tsl::BlockingCounter counter(instructions.size()); std::vector<Priority> priorities(instructions.size()); for (size_t i = 0; i < instructions.size(); ++i) { schedule_or_run([&, i] { priorities[i] = CalculateProducerPriority(instructions[i]); counter.DecrementCount(); }); } counter.Wait(); return priorities; } bool DequeueNextProducer() { current_producer_ = nullptr; current_consumers_.clear(); while (!producer_priority_queue_.empty() && current_consumers_.empty()) { auto next_it = std::prev(producer_priority_queue_.end()); current_producer_ = next_it->second; producer_priority_queue_.erase(next_it); reverse_map_.erase(current_producer_); current_consumers_ = current_producer_->users(); if (current_producer_->opcode() == HloOpcode::kBitcast) { llvm::erase_if(current_consumers_, [&](HloInstruction* consumer) { return !CanFuseCached(current_producer_, consumer); }); } } return !current_consumers_.empty(); } absl::Status UpdatePerformanceModelCache(HloInstruction* producer) { bool is_triton_fusion = IsGenericTritonFusion(*producer); if (!IsFusible(*producer) && !is_triton_fusion) { return absl::OkStatus(); } if (gpu_performance_model_cache_.Get(*producer)) { return absl::OkStatus(); } EstimateRunTimeData runtime_data; if (is_triton_fusion) { TF_ASSIGN_OR_RETURN( runtime_data, gpu_indexing_performance_model_.EstimateRunTimeForTriton(producer)); } else { auto config = GpuPerformanceModelOptions::PriorityFusion( &fusion_analysis_cache_, &gpu_performance_model_cache_); runtime_data = GpuPerformanceModel::EstimateRunTimeForInstruction( producer, *device_info_, &cost_analysis_, config); } gpu_performance_model_cache_.Set(*producer, runtime_data); return absl::OkStatus(); } absl::Status UpdatePriorities() { for (auto instruction : to_update_priority_) { TF_RETURN_IF_ERROR(cost_analysis_.RevisitInstruction(instruction)); } for (auto producer : to_update_priority_) { TF_RETURN_IF_ERROR(UpdatePerformanceModelCache(producer)); } ComputeAndSetPriorities(std::vector<HloInstruction*>{ to_update_priority_.begin(), to_update_priority_.end()}); to_update_priority_.clear(); operands_to_new_consumers_.clear(); operands_to_removed_consumers_runtimes_.clear(); return absl::OkStatus(); } void PreFusion(HloInstruction* producer, HloInstruction* consumer) { if (dump_fusion_visualization_) { RegisterFusionState( *computation_, absl::StrCat("About to fuse |", producer->name(), "| into |", consumer->name(), "| inside PriorityFusion"), *consumer, producer); } } void InvalidateCaches(HloInstruction* instruction) { can_fuse_cache_.erase(instruction); for (const HloInstruction* operand : instruction->operands()) { auto it = can_fuse_cache_.find(operand); if (it != can_fuse_cache_.end()) { it->second.erase(instruction); } } block_level_parameters_cache_.erase(instruction); for (const HloInstruction* operand : instruction->operands()) { auto it = block_level_parameters_cache_.find(operand); if (it != block_level_parameters_cache_.end()) { it->second.erase(instruction); } } gpu_performance_model_cache_.Invalidate(*instruction); fusion_analysis_cache_.Invalidate(*instruction); fusion_info_cache_.Invalidate(instruction); } void UpdateRuntimes( GpuPerformanceModel::RunTimes& runtimes, const HloInstruction* consumer, const absl::flat_hash_map<const HloInstruction*, absl::Duration>& original_consumers) { auto it = original_consumers.find(consumer); if (it != original_consumers.end()) { runtimes.time_fused += it->second; auto consumer_cache_result = gpu_performance_model_cache_.Get(*consumer); CHECK(consumer_cache_result.has_value()); runtimes.time_unfused += (*consumer_cache_result).exec_time; } } void ComputeRuntimesOfRemovedConsumers() { for (const auto& pair : operands_to_new_consumers_) { auto operand = pair.first; if (!reverse_map_.contains(operand)) { continue; } if (!gpu_performance_model_cache_.ContainsConsumers(*operand)) { continue; } const auto& original_consumers = gpu_performance_model_cache_.GetAllConsumers(*operand); GpuPerformanceModel::RunTimes runtimes; for (auto consumer : current_consumers()) { UpdateRuntimes(runtimes, consumer, original_consumers); } UpdateRuntimes(runtimes, current_producer(), original_consumers); auto operand_cache_result = gpu_performance_model_cache_.Get(*operand); runtimes.time_unfused += (*operand_cache_result).exec_time + GpuPerformanceModel::kKernelLaunchOverhead; operands_to_removed_consumers_runtimes_.emplace(operand, runtimes); } } void OnFusingInstruction(HloInstruction* fusion, HloInstruction* original_producer, HloInstruction* original_consumer) { if (fusion_process_dump_) { auto* fusion_step = fusion_process_dump_->add_fusion_steps()->mutable_fusion(); fusion_step->set_fusion_name(std::string(fusion->name())); fusion_step->set_producer_name(std::string(original_producer->name())); fusion_step->set_consumer_name(std::string(original_consumer->name())); } if (dump_fusion_visualization_) { RegisterFusionState( *computation_, absl::StrCat("Fused |", original_producer->name(), "| into |", fusion->name(), "| inside PriorityFusion"), *fusion); } if (fusion != original_consumer) { RemoveInstruction(original_consumer); } for (HloInstruction* operand : fusion->operands()) { if (operand == original_producer || operand->opcode() == HloOpcode::kConstant || operand->opcode() == HloOpcode::kGetTupleElement) { continue; } if (!operand->IsFusible()) { continue; } to_update_priority_.insert(operand); operands_to_new_consumers_[operand].push_back(fusion); } to_update_priority_.insert(fusion); } void RemoveInstruction(HloInstruction* instruction) { to_update_priority_.erase(instruction); fusion_analysis_cache_.Invalidate(*instruction); auto reverse_it = reverse_map_.find(instruction); if (reverse_it == reverse_map_.end()) { return; } producer_priority_queue_.erase(reverse_it->second); reverse_map_.erase(reverse_it); } absl::flat_hash_map<const HloInstruction*, BlockLevelParameters> GetBlockLevelParametersMap(const HloInstruction* producer) { auto it = block_level_parameters_cache_.find(producer); if (it == block_level_parameters_cache_.end()) { return {}; } return it->second; } HloInstruction* current_producer() { return current_producer_; } const std::vector<HloInstruction*>& current_consumers() { return current_consumers_; } private: Priority CalculateProducerPriority(HloInstruction* producer) { if (producer->opcode() == HloOpcode::kBitcast) { return absl::InfiniteDuration(); } if (producer->opcode() == HloOpcode::kConstant) { return -absl::InfiniteDuration(); } if (auto fusion_decision = CanFuseWithAllNonBitcastUsers(producer); !fusion_decision) { if (fusion_process_dump_) { absl::MutexLock lock(&fusion_process_dump_mutex_); auto* step = fusion_process_dump_->add_fusion_steps() ->mutable_producer_ineligible(); step->set_producer_name(std::string(producer->name())); step->set_reason(fusion_decision.Explain()); } return -absl::InfiniteDuration(); } auto removed_consumers_runtime_it = operands_to_removed_consumers_runtimes_.find(producer); bool is_incremental_update = removed_consumers_runtime_it != operands_to_removed_consumers_runtimes_.end(); absl::Span<HloInstruction* const> fused_consumers = is_incremental_update ? operands_to_new_consumers_.find(producer)->second : absl::MakeConstSpan(producer->users()); GpuPerformanceModel::RunTimes run_times = GpuPerformanceModel::EstimateRunTimesForPriorityFusion( producer, *device_info_, &cost_analysis_, GpuPerformanceModelOptions::PriorityFusion( &fusion_analysis_cache_, &gpu_performance_model_cache_), fused_consumers); Priority current_priority; if (is_incremental_update) { const GpuPerformanceModel::RunTimes& removed_consumers_runtime = removed_consumers_runtime_it->second; run_times.time_unfused -= removed_consumers_runtime.time_unfused; run_times.time_fused -= removed_consumers_runtime.time_fused; const PriorityQueue::iterator& queue_it = FindOrDie(reverse_map_, producer); current_priority = queue_it->first.first; } if (fusion_process_dump_) { absl::MutexLock lock(&fusion_process_dump_mutex_); auto* step = fusion_process_dump_->add_fusion_steps()->mutable_update_priority(); step->set_producer_name(std::string(producer->name())); for (auto* consumer : producer->users()) { step->add_consumer_names(std::string(consumer->name())); } step->set_us_fused(absl::ToDoubleMicroseconds(run_times.time_fused)); step->set_us_unfused(absl::ToDoubleMicroseconds(run_times.time_unfused)); } return current_priority + run_times.time_unfused - run_times.time_fused; } FusionDecision IsTritonSupported(const HloInstruction& instruction) { if (instruction.opcode() != HloOpcode::kFusion) { return IsTritonSupportedInstruction( instruction, device_info_->gpu_compute_capability()); } for (const HloInstruction* instruction : instruction.fused_instructions_computation()->instructions()) { if (auto codegen_decision = IsTritonSupportedInstruction( *instruction, device_info_->gpu_compute_capability()); !codegen_decision) { return codegen_decision; } } return FusionDecision::Allow(); } TiledRunTimeDataOrError GetTiledRunTimeDataCached( const HloInstruction* producer, const HloInstruction* consumer) { FusionDeduplicationCache::FusionId fusion_id = [&]() { absl::MutexLock lock(&fusion_deduplication_cache_mutex_); return fusion_deduplication_cache_.GetFusionId(*producer, *consumer); }(); { absl::MutexLock lock(&tiled_run_time_data_cache_mutex_); auto it = tiled_run_time_data_cache_.find(fusion_id); if (it != tiled_run_time_data_cache_.end()) { return it->second; } } auto fusion = HloFusionAdaptor::ForProducerConsumer(producer, consumer); absl::StatusOr<TiledRunTimeDataOrError> result_or_status = gpu_indexing_performance_model_.TryFindBestTilingForFusion(*fusion); TiledRunTimeDataOrError tiled_run_time_data_or_error = [&]() -> TiledRunTimeDataOrError { if (result_or_status.ok()) { return *result_or_status; } else { return FusionDecision::Forbid( absl::StrCat("TiledRunTimeDataOrError return status: ", result_or_status.status().message())); } }(); if (const auto* fusion_decision = std::get_if<FusionDecision>(&tiled_run_time_data_or_error)) { tiled_run_time_data_or_error = FusionDecision::Forbid( absl::StrCat("Fusion can not be tiled with SymbolicTileAnalysis: ", fusion_decision->Explain())); } absl::MutexLock lock(&tiled_run_time_data_cache_mutex_); tiled_run_time_data_cache_.emplace(fusion_id, tiled_run_time_data_or_error); return tiled_run_time_data_or_error; } FusionDecision CanFuseTriton(HloInstruction* producer, HloInstruction* consumer) { if (!triton_softmax_priority_fusion_enabled_) { return FusionDecision::Forbid("triton softmax fusion is not enabled"); } if (IsGenericTritonFusion(*producer)) { if (!IsFusible(*consumer)) { return FusionDecision::Forbid("the consumer is not fusible"); } if (auto fusion_decision = IsTritonSupported(*consumer); !fusion_decision) { return fusion_decision; } } else { if (!IsFusible(*producer)) { return FusionDecision::Forbid("the producer is not fusible"); } if (auto fusion_decision = IsTritonSupported(*producer); !fusion_decision) { return fusion_decision; } } TiledRunTimeDataOrError tiled_run_time_data_or_error = GetTiledRunTimeDataCached(producer, consumer); if (const auto* fusion_decision = std::get_if<FusionDecision>(&tiled_run_time_data_or_error)) { return *fusion_decision; } TiledRunTimeData tiled_run_time_data = std::get<TiledRunTimeData>(std::move(tiled_run_time_data_or_error)); gpu_performance_model_cache_.Set( *producer, *consumer, tiled_run_time_data.runtime_data.exec_time); { absl::MutexLock lock(&block_level_parameters_cache_mutex_); block_level_parameters_cache_[producer][consumer] = tiled_run_time_data.block_level_parameters; } return FusionDecision::Allow(); } FusionDecision CanFuse(HloInstruction* producer, HloInstruction* consumer) { if (IsGenericTritonFusion(*producer) || IsGenericTritonFusion(*consumer)) { return CanFuseTriton(producer, consumer); } if (!IsFusible(*producer)) { return FusionDecision::Forbid("the producer is not fusible"); } if (!IsFusible(*consumer)) { return FusionDecision::Forbid("the consumer is not fusible"); } if (consumer->opcode() == HloOpcode::kBitcast) { return FusionDecision::Forbid( "not fusing into a single bitcast as consumer"); } if (auto can_fuse = CanEmitInputFusedScatter(*producer, *consumer); !can_fuse) { return can_fuse; } auto contains_significant_reduce = [&](const HloInstruction* instr) { auto fusion = HloFusionAdaptor::ForInstruction(instr); return HloAnyOf(*fusion, [](auto node) { if (!(node.opcode() == HloOpcode::kReduce && node.shape().IsArray())) { return false; } int64_t reduction_size = ShapeUtil::ElementsIn(node.instruction().operand(0)->shape()) / ShapeUtil::ElementsIn(node.shape()); return reduction_size >= 16; }); }; if (contains_significant_reduce(producer) && contains_significant_reduce(consumer)) { return FusionDecision::Forbid( "both the producer and the consumer contain a reduce"); } const auto& analysis = fusion_analysis_cache_.Get(*producer); if (analysis.GetEmitterFusionKind() == HloFusionAnalysis::EmitterFusionKind::kReduction) { const auto& analysis_fused = fusion_analysis_cache_.Get(*producer, *consumer); if (analysis_fused.GetEmitterFusionKind() == HloFusionAnalysis::EmitterFusionKind::kLoop) { return FusionDecision::Forbid( "fusion into output of a reduce fusion would create a loop fusion"); } } if (auto fits_budget = FusionFitsInBudget( *consumer, *producer, *device_info_, true, &fusion_info_cache_); !fits_budget) { return fits_budget; } if (cost_analysis_.ProducerConsumerMergedTooLarge(*producer, *consumer)) { return FusionDecision::Forbid( "the fusion would result in an overly large code duplication"); } if (producer == producer->parent()->root_instruction()) { return FusionDecision::Forbid( "not fusing into the output of the root instruction"); } return InstructionFusion::ShouldFuseInPlaceOp(producer, consumer); } FusionDecision CanFuseCached(HloInstruction* producer, HloInstruction* consumer) { { absl::MutexLock lock(&can_fuse_cache_mutex_); auto& producer_cache = can_fuse_cache_[producer]; auto it = producer_cache.find(consumer); if (it != producer_cache.end()) { return it->second; } } auto fusion_decision = CanFuse(producer, consumer); { absl::MutexLock lock(&can_fuse_cache_mutex_); can_fuse_cache_[producer].insert_or_assign(consumer, fusion_decision); } return fusion_decision; } FusionDecision CanFuseWithAllNonBitcastUsers(HloInstruction* producer) { if (producer->users().empty()) { return FusionDecision::Forbid("No users to fuse"); } bool has_non_bitcast_user = false; for (const auto& user : producer->users()) { if (user->opcode() == HloOpcode::kBitcast) { continue; } has_non_bitcast_user = true; if (auto fusion_decision = CanFuseCached(producer, user); !fusion_decision) { VLOG(10) << "Cannot fuse " << producer->name() << " with " << user->name() << ", because: " << fusion_decision.Explain(); return fusion_decision; } } if (!has_non_bitcast_user) { return FusionDecision::Forbid( "not fusing because there are only bitcast users"); } return FusionDecision::Allow(); } HloComputation* computation_; const se::DeviceDescription* device_info_; GpuHloCostAnalysis cost_analysis_; GpuPerformanceModelWithIndexingAnalysis gpu_indexing_performance_model_; using PriorityQueue = std::map<std::pair<Priority, int>, HloInstruction*>; PriorityQueue producer_priority_queue_; absl::flat_hash_map<HloInstruction*, PriorityQueue::iterator> reverse_map_; HloInstruction* current_producer_; std::vector<HloInstruction*> current_consumers_; absl::flat_hash_set<HloInstruction*> to_update_priority_; absl::flat_hash_map<HloInstruction*, std::vector<HloInstruction*>> operands_to_new_consumers_; absl::flat_hash_map<HloInstruction*, GpuPerformanceModel::RunTimes> operands_to_removed_consumers_runtimes_; FusionProcessDumpProto* fusion_process_dump_; absl::Mutex fusion_process_dump_mutex_; tsl::thread::ThreadPool* thread_pool_; mlir::MLIRContext* mlir_context_; HloFusionAnalysisCache& fusion_analysis_cache_; FusionDeduplicationCache& fusion_deduplication_cache_; absl::Mutex fusion_deduplication_cache_mutex_; absl::flat_hash_map< const HloInstruction*, absl::flat_hash_map<const HloInstruction*, FusionDecision>> can_fuse_cache_; absl::Mutex can_fuse_cache_mutex_; absl::flat_hash_map< const HloInstruction*, absl::flat_hash_map<const HloInstruction*, BlockLevelParameters>> block_level_parameters_cache_; absl::Mutex block_level_parameters_cache_mutex_; absl::flat_hash_map<FusionDeduplicationCache::FusionId, TiledRunTimeDataOrError> tiled_run_time_data_cache_; absl::Mutex tiled_run_time_data_cache_mutex_; GpuPerformanceModelCache gpu_performance_model_cache_; FusionInfoCache fusion_info_cache_; bool triton_softmax_priority_fusion_enabled_; bool dump_fusion_visualization_; }; } bool IsSmallConstant(const HloInstruction* instr) { return instr->opcode() == HloOpcode::kConstant && instr->shape().IsArray() && ShapeUtil::ElementsIn(instr->shape()) <= 1; } bool PriorityFusion::ConsumeFuel(HloInstruction* producer, HloInstruction* consumer) { return xla::ConsumeFuel(name(), [&] { return absl::StrFormat("Not fusing producer %s with consumer %s", producer->name(), consumer->name()); }); }; absl::StatusOr<bool> PriorityFusion::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool dump_enabled = DumpingEnabledForHloPass(name(), module->config().debug_options()); if (dump_enabled) { fusion_process_dump_ = std::make_unique<FusionProcessDumpProto>(); *fusion_process_dump_->mutable_gpu_device_info() = device_info_.ToGpuProto(); } auto fusible_computations = GetFusibleComputations(*module, execution_threads); for (auto* computation : fusible_computations) { for (auto* instruction : computation->instructions()) { module->SetAndUniquifyInstrName(instruction, absl::StrCat(instruction->name(), ".0")); } } if (dump_enabled) { fusion_process_dump_->set_hlo_module_before_fusion( module->ToString(HloPrintOptions::ShortParsable())); } bool triton_softmax_priority_fusion_enabled = module->config() .debug_options() .xla_gpu_experimental_enable_triton_softmax_priority_fusion(); FusionDeduplicationCache fusion_deduplication_cache = FusionDeduplicationCache::Create(*module); int changed = false; for (auto* computation : fusible_computations) { CHECK(!computation->IsFusionComputation()); auto fusion_queue = std::make_unique<PriorityFusionQueue>( computation, cost_analysis_options_, &device_info_, fusion_process_dump_.get(), thread_pool_, &mlir_context_, fusion_analysis_cache_, fusion_deduplication_cache, triton_softmax_priority_fusion_enabled); while (fusion_queue->DequeueNextProducer()) { auto producer = fusion_queue->current_producer(); absl::flat_hash_map<const HloInstruction*, BlockLevelParameters> block_level_parameters_map = fusion_queue->GetBlockLevelParametersMap(producer); for (auto* consumer : fusion_queue->current_consumers()) { if (consumer->opcode() == HloOpcode::kBitcast) { continue; } if (!ConsumeFuel(producer, consumer)) continue; VLOG(5) << "next: " << consumer->name() << "(" << consumer << ") + " << producer->name() << "(" << producer << ")"; int64_t consumer_operand_index = consumer->operand_index(producer); fusion_queue->PreFusion(producer, consumer); auto fusion_instruction = Fuse(producer, consumer); fusion_deduplication_cache.UpdateFusedInstructionId( *fusion_instruction, *producer, *consumer, consumer_operand_index); fusion_queue->OnFusingInstruction(fusion_instruction, producer, consumer); auto backend_config_it = block_level_parameters_map.find(consumer); if (backend_config_it != block_level_parameters_map.end()) { TF_RETURN_IF_ERROR(fusion_instruction->set_backend_config( GetTritonGpuBackendConfig(backend_config_it->second))); } changed = true; } fusion_queue->ComputeRuntimesOfRemovedConsumers(); if (producer->user_count() == 0) { fusion_queue->InvalidateCaches(producer); producer->DetachFromOperandsAndUsers(); fusion_queue->RemoveInstruction(producer); TF_RETURN_IF_ERROR(computation->RemoveInstruction(producer)); } for (auto* consumer : fusion_queue->current_consumers()) { fusion_queue->InvalidateCaches(consumer); } TF_RETURN_IF_ERROR(fusion_queue->UpdatePriorities()); } std::vector<HloInstruction*> constants; for (auto* instruction : computation->instructions()) { if (IsSmallConstant(instruction)) { constants.push_back(instruction); } } for (auto* constant : constants) { auto users = constant->users(); for (auto* user : users) { if ((IsFusible(*user) || IsGenericTritonFusion(*user)) && CanEmitInputFusedScatter(*constant, *user)) { Fuse(constant, user); changed = true; } } } } fusion_analysis_cache_.Clear(); if (dump_enabled) { DumpPerModuleProtobufToFile(*module, *fusion_process_dump_, module->config().debug_options(), "priority_fusion_dump"); } return changed; } HloInstruction::FusionKind PriorityFusion::ChooseKind( const HloInstruction* producer, const HloInstruction* consumer) { const auto& analysis = fusion_analysis_cache_.Get(*producer, *consumer); switch (analysis.GetEmitterFusionKind()) { case HloFusionAnalysis::EmitterFusionKind::kLoop: return HloInstruction::FusionKind::kLoop; case HloFusionAnalysis::EmitterFusionKind::kTriton: case HloFusionAnalysis::EmitterFusionKind::kCustomFusion: case HloFusionAnalysis::EmitterFusionKind::kCuDnn: return HloInstruction::FusionKind::kCustom; case HloFusionAnalysis::EmitterFusionKind::kConcatenate: case HloFusionAnalysis::EmitterFusionKind::kReduction: case HloFusionAnalysis::EmitterFusionKind::kTranspose: case HloFusionAnalysis::EmitterFusionKind::kInputSlices: case HloFusionAnalysis::EmitterFusionKind::kScatter: return HloInstruction::FusionKind::kInput; } } HloInstruction* PriorityFusion::Fuse(HloInstruction* producer, HloInstruction* consumer) { VLOG(2) << "Fusing " << producer->ToString() << " into " << consumer->ToString(); HloComputation* computation = consumer->parent(); auto kind = ChooseKind(producer, consumer); HloInstruction* fusion_instruction = consumer; if (fusion_instruction->opcode() != HloOpcode::kFusion) { fusion_instruction = computation->AddInstruction( HloInstruction::CreateFusion(consumer->shape(), kind, consumer)); TF_CHECK_OK(computation->ReplaceInstruction(consumer, fusion_instruction)); } else if (kind != fusion_instruction->fusion_kind()) { fusion_instruction->set_fusion_kind(kind); } fusion_instruction->set_called_computations_execution_thread( computation->execution_thread(), false); if (producer->opcode() == HloOpcode::kFusion) { fusion_instruction->MergeFusionInstruction(producer); } else { fusion_instruction->FuseInstruction(producer); } if (fusion_instruction != consumer) { VLOG(2) << " created new fusion: " << fusion_instruction->ToString(); } return fusion_instruction; } } }
#include "xla/service/gpu/transforms/priority_fusion.h" #include <stdint.h> #include <memory> #include <optional> #include <string> #include <utility> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/gpu_device_info_for_tests.h" #include "xla/service/gpu/gpu_fusible.h" #include "xla/service/gpu/hlo_fusion_analysis.h" #include "xla/service/gpu/model/gpu_hlo_cost_analysis.h" #include "xla/service/hlo_cost_analysis.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/tests/hlo_test_base.h" #include "xla/tests/verified_hlo_module.h" #include "tsl/platform/status_matchers.h" #include "tsl/platform/statusor.h" namespace m = ::xla::match; using ::testing::UnorderedElementsAre; using ::tsl::testing::IsOk; using ::tsl::testing::IsOkAndHolds; namespace xla { namespace gpu { class PriorityFusionTest : public HloTestBase { HloCostAnalysis::ShapeSizeFunction ShapeSizeBytesFunction() const { return [&](const Shape& shape) { constexpr int64_t kPointerSize = 8; return ShapeUtil::ByteSizeOf(shape, kPointerSize); }; } public: std::vector<HloFusionAnalysis::EmitterFusionKind> RunAndGetFusionKinds( absl::string_view hlo) { auto module = ParseAndReturnVerifiedModule(hlo).value(); EXPECT_THAT(priority_fusion_.Run(module.get()), IsOkAndHolds(true)); EXPECT_THAT(module->RemoveUnusedComputations(), IsOk()); std::vector<HloFusionAnalysis::EmitterFusionKind> kinds; for (auto computation : module->computations()) { if (!computation->FusionInstruction()) continue; auto device_info = TestGpuDeviceInfo::RTXA6000DeviceInfo(); auto analysis = HloFusionAnalysis::Create( *computation->FusionInstruction(), device_info); kinds.push_back(analysis.GetEmitterFusionKind()); } return kinds; } PriorityFusion priority_fusion_{ nullptr, TestGpuDeviceInfo::RTXA6000DeviceInfo(), GpuHloCostAnalysis::Options{ShapeSizeBytesFunction(), {}, {}, true}}; }; class PriorityFusionWithTritonEnabledTest : public PriorityFusionTest { public: DebugOptions GetDebugOptionsForTest() override { DebugOptions debug_options = PriorityFusionTest::GetDebugOptionsForTest(); debug_options .set_xla_gpu_experimental_enable_triton_softmax_priority_fusion(true); return debug_options; } }; TEST_F(PriorityFusionTest, FuseWithSharedArgument) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module ENTRY main { %p0 = f32[] parameter(0) %p1 = f32[] parameter(1) %subtract = f32[] subtract(%p0, %p1) %compare = pred[] compare(%subtract, %subtract), direction=NE %add = f32[] add(%p0, %p1) %abs = f32[] abs(%subtract) ROOT %select = f32[] select(%compare, %add, %abs) })") .value(); EXPECT_THAT(priority_fusion_.Run(module.get()), IsOkAndHolds(true)); HloInstruction* root = module->entry_computation()->root_instruction(); EXPECT_THAT(root, GmockMatch(m::Fusion())); EXPECT_EQ(root->fusion_kind(), HloInstruction::FusionKind::kLoop); } TEST_F(PriorityFusionTest, FusionFusionWithDuplication) { absl::string_view kHlo = R"( HloModule test_module square { p = f32[16384]{0} parameter(0) ROOT m = f32[16384]{0} multiply(p, p) } exp { p = f32[16384]{0} parameter(0) ROOT e = f32[16384]{0} exponential(p) } log { p = f32[16384]{0} parameter(0) ROOT l = f32[16384]{0} log(p) } ENTRY main { p = f32[16384]{0} parameter(0) s = f32[16384]{0} fusion(p), kind=kLoop, calls=square e = f32[16384]{0} fusion(s), kind=kLoop, calls=exp l = f32[16384]{0} fusion(s), kind=kInput, calls=log ROOT t = (f32[16384], f32[16384]) tuple(l, e) })"; RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"( CHECK: ENTRY CHECK-NEXT: %[[PARAM:.*]] = f32[16384]{0} parameter(0) CHECK-NEXT: %[[FUSION_0:.*]] = f32[16384]{0} fusion(%[[PARAM]]) CHECK-NEXT: %[[FUSION_1:.*]] = f32[16384]{0} fusion(%[[PARAM]]) CHECK-NEXT: ROOT {{.*}} tuple(%[[FUSION_0]], %[[FUSION_1]]) )"); } TEST_F(PriorityFusionTest, FuseBroadcastIntoBitcastConsumers) { absl::string_view kHlo = R"( HloModule test_module ENTRY main { param_0 = f32[96]{0} parameter(0) broadcast = f32[8,96,128,7]{3,2,1,0} broadcast(param_0), dimensions={1} bitcast.6079.2 = f32[8,24,4,128,7]{4,3,2,1,0} bitcast(broadcast) ROOT transpose.1990.2 = f32[8,24,128,7,4]{4,3,2,1,0} transpose(bitcast.6079.2), dimensions={0,1,3,4,2} } )"; RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"( CHECK: ENTRY CHECK-NEXT: %[[PARAM:.*]] = f32[96]{0} parameter(0) CHECK-NEXT: ROOT %{{.*}} fusion(%[[PARAM]]) )"); } TEST_F(PriorityFusionTest, FuseWideningConvertIntoConsumers) { absl::string_view kHlo = R"( HloModule test_module ENTRY main { p = f16[512]{0} parameter(0) a = f16[512]{0} add(p, p) c = f32[512]{0} convert(a) s = f32[512]{0} multiply(c, c) bc = s32[512]{0} bitcast(c) ROOT t = (f32[512], s32[512]) tuple(s, bc) })"; RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"( CHECK: ENTRY CHECK-NEXT: %[[PARAM:.*]] = f16[512]{0} parameter(0) CHECK-NEXT: %[[FUSION_F32:.*]] = f32[512]{0} fusion(%[[PARAM]]) CHECK-NEXT: %[[CONVERT_FUSION:.*]] = f32[512]{0} fusion(%[[PARAM]]) CHECK-NEXT: %[[BITCAST:.*]] = s32[512]{0} bitcast(%[[CONVERT_FUSION]]) CHECK-NEXT: ROOT %{{.*}} = (f32[512]{0}, s32[512]{0}) tuple(%[[FUSION_F32]], %[[BITCAST]]) )"); } TEST_F(PriorityFusionTest, FuseConvertIntoReduce) { absl::string_view kHlo = R"( HloModule test_module add { p0 = f32[] parameter(0) p1 = f32[] parameter(1) ROOT add.13235 = f32[] add(p0, p1) } ENTRY main { param_0_0.79 = bf16[1024,8192]{1,0} parameter(0) param_1_0.79 = bf16[1024,8192]{1,0} parameter(1) param_2.483 = f32[8192]{0} parameter(2) param_4.2892 = bf16[1024,8192]{1,0} parameter(3) convert.21854 = f32[1024,8192]{1,0} convert(param_0_0.79) convert.21855 = f32[1024,8192]{1,0} convert(param_1_0.79) constant_7773 = f32[] constant(0) broadcast.14555 = f32[1024,8192]{1,0} broadcast(param_2.483), dimensions={1} multiply.6906 = f32[1024,8192]{1,0} multiply(broadcast.14555, convert.21854) reduce.4813 = f32[1024]{0} reduce(multiply.6906, constant_7773), dimensions={1}, to_apply=add convert.13970 = bf16[1024]{0} convert(reduce.4813) convert.21534 = f32[1024,8192]{1,0} convert(param_4.2892) multiply.6910.clone.1 = f32[1024,8192]{1,0} multiply(broadcast.14555, convert.21534) reduce.4811.clone.1 = f32[1024]{0} reduce(multiply.6910.clone.1, constant_7773), dimensions={1}, to_apply=add convert.13967.clone.1 = bf16[1024]{0} convert(reduce.4811.clone.1) multiply.6908.clone.1 = f32[1024,8192]{1,0} multiply(broadcast.14555, convert.21855) reduce.4812.clone.1 = f32[1024]{0} reduce(multiply.6908.clone.1, constant_7773), dimensions={1}, to_apply=add convert.13969.clone.1 = bf16[1024]{0} convert(reduce.4812.clone.1) ROOT fusion.241 = (bf16[1024]{0}, bf16[1024]{0}, bf16[1024]{0}) tuple(convert.13970, convert.13967.clone.1, convert.13969.clone.1) })"; RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"( CHECK-COUNT-3: ROOT {{.*}} convert( CHECK: ENTRY %main CHECK-COUNT-3: fusion )"); } TEST_F(PriorityFusionTest, ReductionEpilogueFusionRegressionTest) { absl::string_view kHlo = R"( HloModule test_module add { rhs.407 = f32[] parameter(1) lhs.407 = f32[] parameter(0) ROOT add.24451 = f32[] add(lhs.407, rhs.407) } ENTRY main { param_1.15162 = f32[2752]{0} parameter(1) convert.44829 = bf16[2752]{0} convert(param_1.15162) bitcast.24686 = bf16[1,1,2752]{2,1,0} bitcast(convert.44829) convert.44468 = f32[1,1,2752]{2,1,0} convert(bitcast.24686) constant_13722 = bf16[] constant(1) convert.17451 = f32[] convert(constant_13722) broadcast.17565 = f32[1,1,2752]{2,1,0} broadcast(convert.17451), dimensions={} negate.167 = f32[1,1,2752]{2,1,0} negate(convert.44468) exponential.569 = f32[1,1,2752]{2,1,0} exponential(negate.167) add.1850 = f32[1,1,2752]{2,1,0} add(broadcast.17565, exponential.569) divide.1376 = f32[1,1,2752]{2,1,0} divide(broadcast.17565, add.1850) multiply.9709 = f32[1,1,2752]{2,1,0} multiply(convert.44468, divide.1376) param_0.15005 = f32[2752]{0} parameter(0) convert.44826 = bf16[2752]{0} convert(param_0.15005) bitcast.24683 = bf16[1,1,2752]{2,1,0} bitcast(convert.44826) convert.44467 = f32[1,1,2752]{2,1,0} convert(bitcast.24683) multiply.9708 = f32[1,1,2752]{2,1,0} multiply(multiply.9709, convert.44467) convert.16959 = bf16[1,1,2752]{2,1,0} convert(multiply.9708) fusion.3203 = bf16[2752]{0} bitcast(convert.16959) convert.15093 = f32[2752]{0} convert(fusion.3203) broadcast.13841 = f32[8192,2752]{1,0} broadcast(convert.15093), dimensions={1} param_0.15525 = bf16[8192,2752]{1,0} parameter(2) convert.13738 = f32[8192,2752]{1,0} convert(param_0.15525) multiply.6422 = f32[8192,2752]{1,0} multiply(broadcast.13841, convert.13738) constant_14382 = f32[] constant(0) fusion.339 = f32[8192]{0} reduce(multiply.6422, constant_14382), dimensions={1}, to_apply=add convert.44633 = bf16[8192]{0} convert(fusion.339) ROOT bitcast.24487 = bf16[1,1,8192]{2,1,0} bitcast(convert.44633) } )"; EXPECT_THAT( RunAndGetFusionKinds(kHlo), UnorderedElementsAre(HloFusionAnalysis::EmitterFusionKind::kLoop, HloFusionAnalysis::EmitterFusionKind::kReduction)); RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"( CHECK: ENTRY CHECK: ROOT {{.*}} bitcast({{.*}}fusion{{.*}}) )"); } TEST_F(PriorityFusionTest, DoNotChangeReductionFusionToLoopFusion) { auto module = *ParseAndReturnVerifiedModule(R"( HloModule test_module add { rhs.407 = f32[] parameter(1) lhs.407 = f32[] parameter(0) ROOT add.24451 = f32[] add(lhs.407, rhs.407) } fused_computation { p0 = f32[16,64]{1,0} parameter(0) zero = f32[] constant(0.0) ROOT reduce = f32[16]{0} reduce(p0, zero), dimensions={1}, to_apply=add } ENTRY main { param0 = f32[16,64]{1,0} parameter(0) fusion = f32[16]{0} fusion(param0), kind=kLoop, calls=fused_computation ROOT slice = f32[8]{0} slice(fusion), slice={[0:8]} })"); EXPECT_THAT(priority_fusion_.Run(module.get()), IsOkAndHolds(false)); } TEST_F(PriorityFusionTest, DoNotFuseTransposeIntoReduce) { absl::string_view kHlo = R"( HloModule test_module add { Arg_1.1046 = f32[] parameter(1) Arg_0.1045 = f32[] parameter(0) ROOT add.3303 = f32[] add(Arg_0.1045, Arg_1.1046) } ENTRY main { param_0.17323 = pred[2048,2048]{1,0} parameter(0) broadcast.22829 = pred[1,12,2048,2048]{3,2,1,0} broadcast(param_0.17323), dimensions={2,3} param_1.19761 = bf16[2048,24576]{1,0} parameter(1) convert.29880.clone.1 = f32[2048,24576]{1,0} convert(param_1.19761) constant_10033_clone_1 = bf16[] constant(0.02002) convert.30056.clone.1 = f32[] convert(constant_10033_clone_1) broadcast.18898.clone.1 = f32[2048,24576]{1,0} broadcast(convert.30056.clone.1), dimensions={} multiply.13451.clone.1 = f32[2048,24576]{1,0} multiply(convert.29880.clone.1, broadcast.18898.clone.1) tanh.798.clone.1 = f32[2048,24576]{1,0} tanh(multiply.13451.clone.1) constant_10244_clone_1 = bf16[] constant(50) convert.30039.clone.1 = f32[] convert(constant_10244_clone_1) broadcast.18310.clone.1 = f32[2048,24576]{1,0} broadcast(convert.30039.clone.1), dimensions={} multiply.12550.clone.1 = f32[2048,24576]{1,0} multiply(tanh.798.clone.1, broadcast.18310.clone.1) convert.29370.clone.1 = bf16[2048,24576]{1,0} convert(multiply.12550.clone.1) bitcast.1 = bf16[2048,2048,12]{2,1,0} bitcast(convert.29370.clone.1) transpose.6582 = bf16[12,2048,2048]{2,1,0} transpose(bitcast.1), dimensions={2,1,0} bitcast = bf16[1,12,2048,2048]{3,2,1,0} bitcast(transpose.6582) convert.33705 = f32[1,12,2048,2048]{3,2,1,0} convert(bitcast) constant_10212 = f32[] constant(-2.38197633e+38) broadcast.22828 = f32[1,12,2048,2048]{3,2,1,0} broadcast(constant_10212), dimensions={} select.589 = f32[1,12,2048,2048]{3,2,1,0} select(broadcast.22829, convert.33705, broadcast.22828) bitcast.22075 = f32[12,2048,2048]{2,1,0} bitcast(select.589) constant_10192 = f32[] constant(-inf) reduce.1614 = f32[12,2048]{1,0} reduce(bitcast.22075, constant_10192), dimensions={2}, to_apply=add predarg = pred[1,1,2048,2048]{3,2,1,0} parameter(2) bitcast.11069 = pred[2048,2048]{1,0} bitcast(predarg) broadcast.22825 = pred[1,12,2048,2048]{3,2,1,0} broadcast(bitcast.11069), dimensions={2,3} transpose.6580 = bf16[12,2048,2048]{2,1,0} transpose(bitcast.1), dimensions={2,1,0} bitcast.2 = bf16[1,12,2048,2048]{3,2,1,0} bitcast(transpose.6580) convert.33703 = f32[1,12,2048,2048]{3,2,1,0} convert(bitcast.2) constant_10213 = f32[] constant(-2.38197633e+38) broadcast.22824 = f32[1,12,2048,2048]{3,2,1,0} broadcast(constant_10213), dimensions={} select.587 = f32[1,12,2048,2048]{3,2,1,0} select(broadcast.22825, convert.33703, broadcast.22824) broadcast.22819 = f32[1,12,2048,2048]{3,2,1,0} broadcast(reduce.1614), dimensions={1,2} subtract.1129 = f32[1,12,2048,2048]{3,2,1,0} subtract(select.587, broadcast.22819) exponential.418 = f32[1,12,2048,2048]{3,2,1,0} exponential(subtract.1129) bitcast.22074 = f32[12,2048,2048]{2,1,0} bitcast(exponential.418) constant_10490 = f32[] constant(0) reduce.1613 = f32[12,2048]{1,0} reduce(bitcast.22074, constant_10490), dimensions={2}, to_apply=add constant_468 = f32[] constant(-2.38197633e+38) broadcast.22833 = pred[1,12,2048,2048]{3,2,1,0} broadcast(bitcast.11069), dimensions={2,3} transpose.6584 = bf16[12,2048,2048]{2,1,0} transpose(bitcast.1), dimensions={2,1,0} bitcast.3 = bf16[1,12,2048,2048]{3,2,1,0} bitcast(transpose.6584) convert.33707 = f32[1,12,2048,2048]{3,2,1,0} convert(bitcast.3) broadcast.22832 = f32[1,12,2048,2048]{3,2,1,0} broadcast(constant_468), dimensions={} select.591 = f32[1,12,2048,2048]{3,2,1,0} select(broadcast.22833, convert.33707, broadcast.22832) broadcast.22821 = f32[1,12,2048,2048]{3,2,1,0} broadcast(reduce.1614), dimensions={1,2} subtract.1131 = f32[1,12,2048,2048]{3,2,1,0} subtract(select.591, broadcast.22821) exponential.420 = f32[1,12,2048,2048]{3,2,1,0} exponential(subtract.1131) broadcast.18351 = f32[1,12,2048,2048]{3,2,1,0} broadcast(reduce.1613), dimensions={1,2} divide.340 = f32[1,12,2048,2048]{3,2,1,0} divide(exponential.420, broadcast.18351) ROOT convert.29418 = bf16[1,12,2048,2048]{3,2,1,0} convert(divide.340) })"; using Kind = HloFusionAnalysis::EmitterFusionKind; EXPECT_THAT( RunAndGetFusionKinds(kHlo), UnorderedElementsAre(Kind::kLoop, Kind::kLoop, Kind::kLoop, Kind::kReduction, Kind::kReduction, Kind::kTranspose, Kind::kTranspose, Kind::kTranspose)); } TEST_F(PriorityFusionTest, DoNotFuseReduceIntoReduce) { absl::string_view kHlo = R"( HloModule test_module add { p0 = f32[] parameter(0) p1 = f32[] parameter(1) ROOT add.13235 = f32[] add(p0, p1) } ENTRY main { p0 = f32[8,4,128,226]{3,2,1,0} parameter(0) c0 = f32[] constant(0) r0 = f32[8,4,128]{2,1,0} reduce(p0, c0), dimensions={3}, to_apply=add ROOT r1 = f32[8,4]{1,0} reduce(r0, c0), dimensions={2}, to_apply=add })"; RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"( CHECK: ROOT {{.*}} reduce( CHECK: ROOT {{.*}} reduce( )"); } TEST_F(PriorityFusionTest, ConvertFusedIntoReduce) { absl::string_view kHlo = R"( HloModule test_module add { p0 = f32[] parameter(0) p1 = f32[] parameter(1) ROOT add.13235 = f32[] add(p0, p1) } ENTRY main { param_0_0.79 = bf16[1024,8192]{1,0} parameter(0) param_1_0.79 = bf16[1024,8192]{1,0} parameter(1) param_2.483 = f32[8192]{0} parameter(2) param_4.2892 = bf16[1024,8192]{1,0} parameter(3) convert.21854 = f32[1024,8192]{1,0} convert(param_0_0.79) convert.21855 = f32[1024,8192]{1,0} convert(param_1_0.79) constant_7773 = f32[] constant(0) broadcast.14555 = f32[1024,8192]{1,0} broadcast(param_2.483), dimensions={1} multiply.6906 = f32[1024,8192]{1,0} multiply(broadcast.14555, convert.21854) reduce.4813 = f32[1024]{0} reduce(multiply.6906, constant_7773), dimensions={1}, to_apply=add convert.13970 = bf16[1024]{0} convert(reduce.4813) convert.21534 = f32[1024,8192]{1,0} convert(param_4.2892) multiply.6910.clone.1 = f32[1024,8192]{1,0} multiply(broadcast.14555, convert.21534) reduce.4811.clone.1 = f32[1024]{0} reduce(multiply.6910.clone.1, constant_7773), dimensions={1}, to_apply=add convert.13967.clone.1 = bf16[1024]{0} convert(reduce.4811.clone.1) multiply.6908.clone.1 = f32[1024,8192]{1,0} multiply(broadcast.14555, convert.21855) reduce.4812.clone.1 = f32[1024]{0} reduce(multiply.6908.clone.1, constant_7773), dimensions={1}, to_apply=add convert.13969.clone.1 = bf16[1024]{0} convert(reduce.4812.clone.1) ROOT fusion.241 = (bf16[1024]{0}, bf16[1024]{0}, bf16[1024]{0}) tuple(convert.13970, convert.13967.clone.1, convert.13969.clone.1) })"; RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"( CHECK-COUNT-3: ROOT {{.*}} convert( CHECK: ENTRY %main CHECK-COUNT-3: fusion( CHECK-NOT: fusion( )"); } TEST_F(PriorityFusionTest, DoNotFuseDynamicUpdateSliceIntoReduce) { GTEST_SKIP() << "b/294198633"; absl::string_view kHlo = R"( HloModule test_module add { Arg_1.1046 = f32[] parameter(1) Arg_0.1045 = f32[] parameter(0) ROOT add.3303 = f32[] add(Arg_0.1045, Arg_1.1046) } ENTRY main { param_0.10549 = f32[4,2112]{1,0} parameter(0) param_5.2561 = pred[] parameter(5) broadcast.19725 = pred[4,1]{1,0} broadcast(param_5.2561), dimensions={} param_1.11587 = pred[4]{0} parameter(1) constant_5837 = f32[] constant(1) broadcast.19723 = f32[4]{0} broadcast(constant_5837), dimensions={} param_2.5952 = f32[4,8000]{1,0} parameter(2) param_3.4004 = f32[4]{0} parameter(3) broadcast.19718 = f32[4,8000]{1,0} broadcast(param_3.4004), dimensions={0} subtract.1112 = f32[4,8000]{1,0} subtract(param_2.5952, broadcast.19718) exponential.418 = f32[4,8000]{1,0} exponential(subtract.1112) constant_6254 = f32[] constant(0) reduce.1154 = f32[4]{0} reduce(exponential.418, constant_6254), dimensions={1}, to_apply=add log.38 = f32[4]{0} log(reduce.1154) broadcast.19717 = f32[4,8000]{1,0} broadcast(log.38), dimensions={0} subtract.1111 = f32[4,8000]{1,0} subtract(subtract.1112, broadcast.19717) iota.170 = s32[4,1]{1,0} iota(), iota_dimension=0 constant_6281 = s32[] constant(0) broadcast.19735 = s32[4]{0} broadcast(constant_6281), dimensions={} param_4.3400 = s32[4,8000]{1,0} parameter(4) slice.3186 = s32[4,40]{1,0} slice(param_4.3400), slice={[0:4], [0:40]} iota.168 = s32[4,1]{1,0} iota(), iota_dimension=0 param_7.1596 = s32[4]{0} parameter(7) compare.341 = pred[4]{0} compare(param_7.1596, broadcast.19735), direction=LT constant_5833 = s32[] constant(40) broadcast.19731 = s32[4]{0} broadcast(constant_5833), dimensions={} add.8348 = s32[4]{0} add(param_7.1596, broadcast.19731) select.418 = s32[4]{0} select(compare.341, add.8348, param_7.1596) bitcast.20942 = s32[4,1]{1,0} bitcast(select.418) concatenate.1337 = s32[4,2]{1,0} concatenate(iota.168, bitcast.20942), dimensions={1} gather.43 = s32[4,1,1]{2,1,0} gather(slice.3186, concatenate.1337), offset_dims={1,2}, collapsed_slice_dims={}, start_index_map={0,1}, index_vector_dim=1, slice_sizes={1,1} bitcast.20941 = s32[4]{0} bitcast(gather.43) select.398 = s32[4]{0} select(param_1.11587, broadcast.19735, bitcast.20941) compare.334 = pred[4]{0} compare(select.398, broadcast.19735), direction=LT constant_6260 = s32[] constant(8000) broadcast.19720 = s32[4]{0} broadcast(constant_6260), dimensions={} add.8336 = s32[4]{0} add(select.398, broadcast.19720) select.396 = s32[4]{0} select(compare.334, add.8336, select.398) bitcast.20830 = s32[4,1]{1,0} bitcast(select.396) concatenate.1308 = s32[4,2]{1,0} concatenate(iota.170, bitcast.20830), dimensions={1} gather.41 = f32[4,1,1]{2,1,0} gather(subtract.1111, concatenate.1308), offset_dims={1,2}, collapsed_slice_dims={}, start_index_map={0,1}, index_vector_dim=1, slice_sizes={1,1} bitcast.20824 = f32[4]{0} bitcast(gather.41) select.389 = f32[4]{0} select(param_1.11587, broadcast.19723, bitcast.20824) bitcast.20823 = f32[4,1]{1,0} bitcast(select.389) param_6.1719 = s32[] parameter(6) constant_6323 = s32[] constant(2048) add.8549 = s32[] add(param_6.1719, constant_6323) compare.388 = pred[] compare(add.8549, constant_6281), direction=LT constant_5436 = s32[] constant(4160) add.8339 = s32[] add(param_6.1719, constant_5436) select.409 = s32[] select(compare.388, add.8339, add.8549) dynamic-slice.36 = f32[4,1]{1,0} dynamic-slice(param_0.10549, constant_6281, select.409), dynamic_slice_sizes={4,1} select.388 = f32[4,1]{1,0} select(broadcast.19725, bitcast.20823, dynamic-slice.36) ROOT dynamic-update-slice.307 = f32[4,2112]{1,0} dynamic-update-slice(param_0.10549, select.388, constant_6281, select.409) })"; RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"( CHECK: ROOT {{.*}} dynamic-update-slice( CHECK: %[[REDUCE:.*]] = {{.*}} reduce( CHECK: ROOT {{.*}} log(%[[REDUCE]]) CHECK: ENTRY CHECK-COUNT-2: fusion( )"); } TEST_F(PriorityFusionTest, DontFuseIntoFirstOperandOfScatter) { auto module = *ParseAndReturnVerifiedModule(R"( HloModule test_module add { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT add = s32[] add(lhs, rhs) } ENTRY FuseIntoScatter { p0 = s32[3,3] parameter(0) operand = s32[3,3] add(p0, p0) p1 = s32[2] parameter(1) indices = s32[2] add(p1, p1) p2 = s32[2,3] parameter(2) updates = s32[2,3] add(p2, p2) scatter = s32[3,3] scatter(operand, indices, updates), to_apply=add, update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1 ROOT add = s32[3,3] add(scatter, scatter) })"); EXPECT_THAT(priority_fusion_.Run(module.get()), IsOkAndHolds(true)); HloInstruction* root = module->entry_computation()->root_instruction(); const HloInstruction* fusion = nullptr; ASSERT_THAT(root, GmockMatch(m::Add(m::Fusion(&fusion), m::Fusion()))); EXPECT_EQ(fusion->fusion_kind(), HloInstruction::FusionKind::kInput); EXPECT_THAT(fusion->fused_expression_root(), GmockMatch(m::Scatter(m::Parameter(), m::Add(), m::Add()))); } TEST_F(PriorityFusionTest, DontFuseConstantIntoFirstOperandOfScatter) { auto module = *ParseAndReturnVerifiedModule(R"( HloModule test_module add { lhs = s32[] parameter(0) rhs = s32[] parameter(1) ROOT add = s32[] add(lhs, rhs) } ENTRY FuseIntoScatter { operand = s32[1] constant({0}) indices = s32[24,1] parameter(0) constant = s32[] constant(1) updates = s32[24,1] broadcast(constant) ROOT scatter = s32[1] scatter(operand, indices, updates), to_apply=add, update_window_dims={1}, inserted_window_dims={}, scatter_dims_to_operand_dims={0}, index_vector_dim=1 })"); EXPECT_THAT(priority_fusion_.Run(module.get()), IsOkAndHolds(true)); HloInstruction* root = module->entry_computation()->root_instruction(); ASSERT_THAT(root, GmockMatch(m::Fusion(m::Constant(), m::Parameter()))); EXPECT_EQ(root->fusion_kind(), HloInstruction::FusionKind::kInput); EXPECT_THAT(root->fused_expression_root(), GmockMatch(m::Scatter(m::Parameter(), m::Parameter(), m::Broadcast(m::Constant())))); } TEST_F(PriorityFusionTest, DoNotFuseReduceIntoReduceEvenIfOccupancyIsHigh) { constexpr absl::string_view kHlo = R"( HloModule test_module add { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT add = f32[] add(lhs, rhs) } ENTRY main { p0 = f32[4,3584,128,168]{3,2,1,0} parameter(0) c = f32[] constant(0) r1 = f32[4,3584,128]{2,1,0} reduce(p0, c), dimensions={3}, to_apply=add ROOT r2 = f32[4,3584]{1,0} reduce(r1, c), dimensions={2}, to_apply=add })"; RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"( CHECK: ROOT {{.*}} reduce( CHECK: ROOT {{.*}} reduce( )"); } TEST_F(PriorityFusionTest, FuseReductionEpilogueWithMultipleUsers) { constexpr absl::string_view kHlo = R"( HloModule test_module add { x = f32[] parameter(0) y = f32[] parameter(1) ROOT add = f32[] add(x, y) } fused_computation { p0 = f32[64,16384]{1,0} parameter(0) c0 = f32[] constant(0) ROOT reduce.858 = f32[64]{0} reduce(p0, c0), dimensions={1}, to_apply=add } ENTRY main { p0 = f32[64,16384]{1,0} parameter(0) fusion = f32[64]{0} fusion(p0), kind=kInput, calls=fused_computation log = f32[64]{0} log(fusion) negate = f32[64]{0} custom-call(log), custom_call_target="negate" ROOT add = f32[64]{0} add(negate, log) } )"; RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"( CHECK: ENTRY CHECK: %[[PARAM:.*]] = {{.*}} parameter(0) CHECK: %[[FUSION:.*]] = {{.*}} fusion(%[[PARAM]]) CHECK: custom-call(%[[FUSION]]) )"); } TEST_F(PriorityFusionTest, EpilogueFusion) { absl::string_view kHlo = R"( HloModule test_module add { p0 = f32[] parameter(0) p1 = f32[] parameter(1) ROOT add.13235 = f32[] add(p0, p1) } fused_computation.1 { p0 = f32[8,4,128,226]{3,2,1,0} parameter(0) c0 = f32[] constant(0) ROOT r0 = f32[8,4,128]{2,1,0} reduce(p0, c0), dimensions={3}, to_apply=add } fused_computation.2 { p0 = f32[8,4,128]{2,1,0} parameter(0) r1 = f32[8,4,128]{2,1,0} log(p0) ROOT r2 = f32[8,4,128]{2,1,0} log(r1) } ENTRY main { p0 = f32[8,4,128,226]{3,2,1,0} parameter(0) f1 = f32[8,4,128]{2,1,0} fusion(p0), kind=kInput, calls=%fused_computation.1 ROOT fusion = f32[8,4,128]{2,1,0} fusion(f1), kind=kLoop, calls=%fused_computation.2 })"; RunAndFilecheckHloRewrite(kHlo, std::move(priority_fusion_), R"( CHECK: ROOT {{.*}} = f32[8,4,128]{2,1,0} fusion(%p{{.*}}), kind=kInput, calls=%fused_computation)"); } TEST_F(PriorityFusionTest, EpilogueFusionFails) { auto module = *ParseAndReturnVerifiedModule(R"( HloModule test_module add { p0 = f32[] parameter(0) p1 = f32[] parameter(1) ROOT add.13235 = f32[] add(p0, p1) } fused_computation.1 { p0 = f32[28672,4096]{1,0} parameter(0) c0 = f32[] constant(0) ROOT r = f32[28672]{0} reduce(p0, c0), dimensions={1}, to_apply=add } fused_computation.2 { p0 = f32[28672]{0} parameter(0) p1 = f32[28672]{0} parameter(1) ROOT a = f32[28672]{0} add(p0, p1) } ENTRY main { p0 = f32[28672,4096]{1,0} parameter(0) p1 = f32[28672]{0} parameter(1) f = f32[28672]{0} fusion(p0), kind=kInput, calls=%fused_computation.1 ROOT fusion = f32[28672]{0} fusion(f,p1), kind=kLoop, calls=%fused_computation.2 })"); EXPECT_THAT(priority_fusion_.Run(module.get()), IsOkAndHolds(false)); } TEST_F(PriorityFusionTest, DoNotFuseIntoRoot) { auto module = *ParseAndReturnVerifiedModule(R"( HloModule test_module ENTRY %main (p.0: u32[2], p.1: u32[]) -> u32[2] { %p.0 = u32[2]{0} parameter(0) %p.1 = u32[] parameter(1) ROOT %broadcast = u32[2]{0} broadcast(u32[] %p.1), dimensions={}, sharding={replicated} %add = u32[2]{0} add(u32[2]{0} %p.0, u32[2]{0} %broadcast) %tuple.1 = (u32[2]{0}) tuple(u32[2]{0} %add) %token.0 = token[] after-all() %outfeed.6 = token[] outfeed((u32[2]{0}) %tuple.1, token[] %token.0), outfeed_shape=(u32[2]{0}), sharding={maximal device=0} })"); EXPECT_THAT(priority_fusion_.Run(module.get()), IsOkAndHolds(false)); } TEST_F(PriorityFusionTest, DontFuseConcat) { auto module = *ParseAndReturnVerifiedModule(R"( HloModule module %maximum (param_0: f32[], param_1: f32[]) -> f32[] { %param_0 = f32[] parameter(0) %param_1 = f32[] parameter(1) ROOT %maximum = f32[] maximum(f32[] %param_0, f32[] %param_1) } %fused_concat (param_0: f32[1,4,401,8,8], param_1: f32[1,1,4,1023,8], param_2: bf16[1,4,1023,8,8]) -> f32[1,4,1424,8,8] { %param_2 = bf16[1,4,1023,8,8]{4,3,2,1,0} parameter(2) %convert = f32[1,4,1023,8,8]{4,3,2,1,0} convert(bf16[1,4,1023,8,8]{4,3,2,1,0} %param_2) %param_1 = f32[1,1,4,1023,8]{4,3,2,1,0} parameter(1) %bitcast = f32[4,1023,8]{2,1,0} bitcast(f32[1,1,4,1023,8]{4,3,2,1,0} %param_1) %broadcast = f32[1,4,1023,8,8]{4,3,2,1,0} broadcast(f32[4,1023,8]{2,1,0} %bitcast), dimensions={1,2,4} %add = f32[1,4,1023,8,8]{4,3,2,1,0} add(f32[1,4,1023,8,8]{4,3,2,1,0} %convert, f32[1,4,1023,8,8]{4,3,2,1,0} %broadcast) %param_0 = f32[1,4,401,8,8]{4,3,2,1,0} parameter(0) ROOT %concatenate = f32[1,4,1424,8,8]{4,3,2,1,0} concatenate(f32[1,4,1023,8,8]{4,3,2,1,0} %add, f32[1,4,401,8,8]{4,3,2,1,0} %param_0), dimensions={2} } %fused_reduce (param_0: f32[], param_1: f32[1,4,1424,8,8]) -> f32[4,8,8] { %param_1 = f32[1,4,1424,8,8]{4,3,2,1,0} parameter(1) %bitcast = f32[4,1424,8,8]{3,2,1,0} bitcast(f32[1,4,1424,8,8]{4,3,2,1,0} %param_1) %param_0 = f32[] parameter(0) ROOT %reduce = f32[4,8,8]{2,1,0} reduce(f32[4,1424,8,8]{3,2,1,0} %bitcast, f32[] %param_0), dimensions={1}, to_apply=%maximum } %fused_broadcast (param_0: f32[1,4,1424,8,8], param_1: f32[4,8,8]) -> f32[1,4,1424,8,8] { %param_0 = f32[1,4,1424,8,8]{4,3,2,1,0} parameter(0) %param_1 = f32[4,8,8]{2,1,0} parameter(1) %broadcast = f32[1,4,1424,8,8]{4,3,2,1,0} broadcast(f32[4,8,8]{2,1,0} %param_1), dimensions={1,3,4} ROOT %subtract = f32[1,4,1424,8,8]{4,3,2,1,0} subtract(f32[1,4,1424,8,8]{4,3,2,1,0} %param_0, f32[1,4,1424,8,8]{4,3,2,1,0} %broadcast) } ENTRY fusion { %param_0 = f32[1,4,401,8,8]{4,3,2,1,0} parameter(0) %param_1 = f32[1,1,4,1023,8]{4,3,2,1,0} parameter(1) %param_2 = bf16[1,4,1023,8,8]{4,3,2,1,0} parameter(2) %concat = f32[1,4,1424,8,8]{4,3,2,1,0} fusion(%param_0, %param_1, %param_2), kind=kLoop, calls=fused_concat %param_3 = f32[] parameter(3) %reduce = f32[4,8,8]{2,1,0} fusion(%param_3, %concat), kind=kLoop, calls=fused_reduce %param_4 = f32[4,8,8]{2,1,0} parameter(4) %broadcast = f32[1,4,1424,8,8]{4,3,2,1,0} fusion(%concat, %param_4), kind=kLoop, calls=fused_broadcast ROOT tuple = (f32[4,8,8]{2,1,0}, f32[1,4,1424,8,8]{4,3,2,1,0}) tuple(%reduce, %broadcast) } )"); EXPECT_THAT(priority_fusion_.Run(module.get()), IsOkAndHolds(false)); } TEST_F(PriorityFusionTest, FuseOnlySmallConstant) { auto module = *ParseAndReturnVerifiedModule(R"( HloModule module ENTRY main { param_0 = f32[32,32]{1,0} parameter(0) c_1 = f32[] constant(1) c_2 = f32[32,32] constant({...}) broadcast = f32[32,32]{1,0} broadcast(c_1), dimensions={} add = f32[32,32]{1,0} add(param_0, broadcast) ROOT mul = f32[32,32]{1,0} multiply(c_2, add) } )"); EXPECT_THAT(priority_fusion_.Run(module.get()), IsOkAndHolds(true)); HloInstruction* root = module->entry_computation()->root_instruction(); ASSERT_THAT(root, GmockMatch(m::Fusion(m::Constant(), m::Parameter()))); EXPECT_THAT(root->fused_expression_root(), GmockMatch(m::Multiply( m::Parameter(), m::Add(m::Parameter(), m::Broadcast(m::Constant()))))); } TEST_F(PriorityFusionTest, FuseSmallConstantIntoTritonFusion) { auto module = *ParseAndReturnVerifiedModule(R"( HloModule module add { Arg_0 = f32[] parameter(0) Arg_1 = f32[] parameter(1) ROOT add = f32[] add(Arg_0, Arg_1) } triton_computation { param_0 = f32[32,64] parameter(0) param_1 = f32[] parameter(1) ROOT reduce = f32[32] reduce(param_0, param_1), dimensions={1}, to_apply=add } ENTRY main { param_0 = f32[32,64] parameter(0) c_0 = f32[] constant(0) ROOT triton_softmax = f32[32] fusion(param_0, c_0), kind=kCustom, calls=triton_computation, backend_config={"fusion_backend_config": {"kind":"__triton","block_level_fusion_config":{"output_tile_sizes":["1"],"num_warps":"1"}}} })"); EXPECT_THAT(priority_fusion_.Run(module.get()), IsOkAndHolds(true)); HloInstruction* root = module->entry_computation()->root_instruction(); ASSERT_THAT(root, GmockMatch(m::Fusion(m::Parameter()))); EXPECT_THAT(root->fused_expression_root(), GmockMatch(m::Reduce(m::Parameter(), m::Constant()))); } TEST_F(PriorityFusionTest, DoNotFuseProducerConsumerMergedTooLarge) { auto module = *ParseAndReturnVerifiedModule(R"( HloModule module fused_computation.1 { iota.9.7 = s32[3,1,1]{2,1,0} iota(), iota_dimension=0 param_3.29 = s32[] parameter(2) pad.2.7 = s32[3,1,2]{2,1,0} pad(iota.9.7, param_3.29), padding=0_0x0_0x0_1 param_2.39 = s32[] parameter(1) broadcast.76.1 = s32[3,1,2]{2,1,0} broadcast(param_2.39), dimensions={} compare.9.1 = pred[3,1,2]{2,1,0} compare(pad.2.7, broadcast.76.1), direction=GE param_1.73 = s32[2]{0} parameter(0) broadcast.78.1 = s32[3,2]{1,0} broadcast(param_1.73), dimensions={1} bitcast.1 = s32[3,2]{1,0} bitcast(pad.2.7) compare.10.1 = pred[3,2]{1,0} compare(bitcast.1, broadcast.78.1), direction=LE bitcast.2 = pred[3,1,2]{2,1,0} bitcast(compare.10.1) ROOT and.3.1 = pred[3,1,2]{2,1,0} and(compare.9.1, bitcast.2) } and { x = pred[] parameter(0) y = pred[] parameter(1) ROOT and = pred[] and(x, y) } fused_computation.2 { param0 = pred[3,1,2]{2,1,0} parameter(0) slice = pred[1,1,2]{2,1,0} slice(param0), slice={[0:1], [0:1], [0:2]} bitcast = pred[2]{0} bitcast(slice) init = pred[] constant(true) reduce = pred[2]{0} reduce(param0, init), dimensions={0,1}, to_apply=and and = pred[2]{0} and(bitcast, reduce) pad = pred[3]{0} pad(and, init), padding=0_1 broadcast = pred[3,2]{1,0} broadcast(pad), dimensions={0} bitcast2 = pred[6]{0} bitcast(broadcast) broadcast2 = pred[2,3]{1,0} broadcast(pad), dimensions={1} bitcast3 = pred[6]{0} bitcast(broadcast2) ROOT and2 = pred[6]{0} and(bitcast2, bitcast3) } ENTRY main { p0 = s32[2]{0} parameter(0) p1 = s32[] parameter(1) p2 = s32[] parameter(2) fusion1 = pred[3,1,2]{2,1,0} fusion(p0, p1, p2), kind=kLoop, calls=fused_computation.1 ROOT fusion2 = pred[6]{0} fusion(fusion1), kind=kInput, calls=fused_computation.2 } )"); auto& debug_options = module->mutable_config().mutable_debug_options(); debug_options.set_xla_gpu_mlir_emitter_level(3); EXPECT_THAT(priority_fusion_.Run(module.get()), IsOkAndHolds(false)); } TEST_F(PriorityFusionWithTritonEnabledTest, CanMergeTritonFusionWithBothProducerAndConsumer) { const std::string kHloText = R"( HloModule t add { Arg_0 = f32[] parameter(0) Arg_1 = f32[] parameter(1) ROOT add = f32[] add(Arg_0, Arg_1) } producer_computation { parameter_0 = f32[125]{0} parameter(0) ROOT broadcast = f32[125,127]{1,0} broadcast(parameter_0), dimensions={0} } consumer_computation { parameter_0 = f32[125,127]{1,0} parameter(0) parameter_1 = f32[125,127]{1,0} parameter(1) ROOT multiply = f32[125,127]{1,0} multiply(parameter_1, parameter_0) } triton_softmax_computation { parameter_0 = f32[125,127]{1,0} parameter(0) multiply_0 = f32[125,127]{1,0} multiply(parameter_0, parameter_0) constant_0 = f32[] constant(0) reduce_0 = f32[125]{0} reduce(multiply_0, constant_0), dimensions={1}, to_apply=add broadcast_4 = f32[125,127]{1,0} broadcast(reduce_0), dimensions={0} ROOT multiply = f32[125,127]{1,0} multiply(multiply_0, broadcast_4) } ENTRY main { param_0 = f32[125]{0} parameter(0) param_1 = f32[125,127]{1,0} parameter(1) producer_fusion = f32[125,127]{1,0} fusion(param_0), kind=kLoop, calls=producer_computation triton_softmax = f32[125,127]{1,0} fusion(producer_fusion), kind=kCustom, calls=triton_softmax_computation, backend_config={"fusion_backend_config": {"kind":"__triton","block_level_fusion_config":{"output_tile_sizes":["1","127"],"num_warps":"1"}}} ROOT consumer_fusion = f32[125,127]{1,0} fusion(param_1, triton_softmax), kind=kLoop, calls=consumer_computation })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHloText)); EXPECT_TRUE(priority_fusion_.Run(module.get()).value()); EXPECT_TRUE(verifier().Run(module.get()).status().ok()); HloInstruction* root = module->entry_computation()->root_instruction(); EXPECT_THAT(root, GmockMatch(m::Fusion(m::Parameter(), m::Parameter()))); EXPECT_EQ(root->fusion_kind(), HloInstruction::FusionKind::kCustom); ASSERT_TRUE(IsGenericTritonFusion(*root)); EXPECT_TRUE(root->backend_config<GpuBackendConfig>() ->fusion_backend_config() .has_block_level_fusion_config()); EXPECT_EQ(root->backend_config<GpuBackendConfig>() ->fusion_backend_config() .block_level_fusion_config() .output_tile_sizes_size(), 2); } TEST_F(PriorityFusionWithTritonEnabledTest, FuseTritonProducerWithTwoConsumers) { const std::string kHloText = R"( HloModule t add { Arg_0 = f32[] parameter(0) Arg_1 = f32[] parameter(1) ROOT add = f32[] add(Arg_0, Arg_1) } producer_computation { parameter_0 = f32[125]{0} parameter(0) ROOT broadcast = f32[125,127] broadcast(parameter_0), dimensions={0} } consumer_computation.1 { parameter_0 = f32[125,127] parameter(0) ROOT log = f32[125,127] log(parameter_0) } consumer_computation.2 { parameter_0 = f32[125,127] parameter(0) ROOT exp = f32[125,127] exponential(parameter_0) } ENTRY main { param_0 = f32[125]{0} parameter(0) producer_fusion = f32[125,127] fusion(param_0), kind=kCustom, calls=producer_computation, backend_config={"fusion_backend_config": {"kind":"__triton","block_level_fusion_config":{"output_tile_sizes":["1","127"],"num_warps":"1"}}} consumer_fusion.1 = f32[125,127] fusion(producer_fusion), kind=kLoop, calls=consumer_computation.1 consumer_fusion.2 = f32[125,127] fusion(producer_fusion), kind=kLoop, calls=consumer_computation.2 ROOT tuple = (f32[125,127], f32[125,127]) tuple(consumer_fusion.1, consumer_fusion.2) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHloText)); EXPECT_TRUE(priority_fusion_.Run(module.get()).value()); EXPECT_TRUE(verifier().Run(module.get()).status().ok()); HloInstruction* root = module->entry_computation()->root_instruction(); HloInstruction *fusion1, *fusion2; EXPECT_THAT(root, GmockMatch(m::Tuple(m::Fusion(&fusion1, m::Parameter()), m::Fusion(&fusion2, m::Parameter())))); EXPECT_TRUE(IsGenericTritonFusion(*fusion1)); TF_ASSERT_OK_AND_ASSIGN(auto backend_config1, fusion1->backend_config<GpuBackendConfig>()); EXPECT_TRUE( backend_config1.fusion_backend_config().has_block_level_fusion_config()); EXPECT_EQ(backend_config1.fusion_backend_config() .block_level_fusion_config() .output_tile_sizes_size(), 2); EXPECT_TRUE(IsGenericTritonFusion(*fusion2)); TF_ASSERT_OK_AND_ASSIGN(auto backend_config2, fusion2->backend_config<GpuBackendConfig>()); EXPECT_TRUE( backend_config2.fusion_backend_config().has_block_level_fusion_config()); EXPECT_EQ(backend_config2.fusion_backend_config() .block_level_fusion_config() .output_tile_sizes_size(), 2); } TEST_F(PriorityFusionWithTritonEnabledTest, TritonProducerNotSupported_DoNotFuse) { const std::string kHloText = R"( HloModule t producer_computation { parameter_0 = c64[] parameter(0) broadcast = c64[125,127] broadcast(parameter_0), dimensions={} ROOT real = f32[125,127] real(broadcast) } triton_computation { parameter_0 = f32[125,127] parameter(0) parameter_1 = f32[125,127] parameter(1) ROOT add = f32[125,127] add(parameter_0, parameter_1) } ENTRY main { param_0 = c64[] parameter(0) param_1 = f32[125,127] parameter(1) producer_fusion = f32[125,127] fusion(param_0), kind=kLoop, calls=producer_computation ROOT triton_fusion = f32[125,127] fusion(producer_fusion, param_1), kind=kCustom, calls=triton_computation, backend_config={"fusion_backend_config": {"kind":"__triton","block_level_fusion_config":{"output_tile_sizes":["1","127"],"num_warps":"1"}}} })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHloText)); EXPECT_FALSE(priority_fusion_.Run(module.get()).value()); } TEST_F(PriorityFusionWithTritonEnabledTest, TritonConsumerNotSupported_DoNotFuse) { const std::string kHloText = R"( HloModule t triton_computation { parameter_0 = f32[] parameter(0) ROOT boardcast = f32[125,127] broadcast(parameter_0), dimensions={} } consumer_computation { parameter_0 = c64[] parameter(0) parameter_1 = f32[125,127] parameter(1) broadcast = c64[125,127] broadcast(parameter_0), dimensions={} real = f32[125,127] real(broadcast) ROOT add = f32[125,127] add(real, parameter_1) } ENTRY main { param_0 = f32[] parameter(1) param_1 = c64[] parameter(0) triton_fusion = f32[125,127] fusion(param_0), kind=kCustom, calls=triton_computation, backend_config={"fusion_backend_config": {"kind":"__triton","block_level_fusion_config":{"output_tile_sizes":["1","127"],"num_warps":"1"}}} ROOT consumer_fusion = f32[125,127] fusion(param_1, triton_fusion), kind=kLoop, calls=consumer_computation })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHloText)); EXPECT_FALSE(priority_fusion_.Run(module.get()).value()); } TEST_F(PriorityFusionTest, DoNotFuseInsideReducer) { auto module = *ParseAndReturnVerifiedModule(R"( %reducer { p0 = f32[] parameter(0) p1 = f32[] parameter(1) add = f32[] add(p0, p1) ROOT max = f32[] maximum(add, p0) } %fused_reduce { p0 = f32[256] parameter(0) p1 = f32[] parameter(1) ROOT reduce = f32[] reduce(p0, p1), dimensions={0}, to_apply=%reducer } ENTRY fusion { p0 = f32[256] parameter(0) p1 = f32[] parameter(1) ROOT %reduce = f32[] fusion(p0, p1), kind=kInput, calls=fused_reduce } )"); EXPECT_THAT(priority_fusion_.Run(module.get()), IsOkAndHolds(false)); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/priority_fusion.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/priority_fusion_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
f24af8d1-7211-4c76-950a-dff58c823db9
cpp
tensorflow/tensorflow
windowed_einsum_handler
third_party/xla/xla/service/gpu/transforms/windowed_einsum_handler.cc
third_party/xla/xla/service/gpu/transforms/windowed_einsum_handler_test.cc
#include "xla/service/gpu/transforms/windowed_einsum_handler.h" #include <cstdint> #include <string> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_set.h" #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/dfs_hlo_visitor_with_default.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_query.h" #include "xla/literal.h" #include "xla/literal_util.h" #include "xla/service/algebraic_simplifier.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/hlo_constant_folding.h" #include "xla/service/hlo_creation_utils.h" #include "xla/service/pattern_matcher.h" #include "xla/service/shape_inference.h" #include "xla/service/while_loop_unroller.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" #include "tsl/platform/statusor.h" namespace xla::gpu { namespace { namespace m = match; absl::StatusOr<bool> ShiftDequantizationF8(HloComputation* while_body) { HloInstruction* while_instr = while_body->WhileCallInstruction(); if (!while_instr || while_instr->operand(0)->user_count() != 1) { return false; } HloInstruction* param_tuple = while_instr->mutable_operand(0); std::array<HloInstruction*, 2> binaries, operands, scales; std::array<std::vector<HloInstruction*>, 2> unaries; for (int k = 0; k < 2; ++k) { HloInstruction* operand = param_tuple->mutable_operand(k); while (operand->opcode() == HloOpcode::kBitcast || operand->opcode() == HloOpcode::kBroadcast || operand->opcode() == HloOpcode::kCopy || operand->opcode() == HloOpcode::kReshape || operand->opcode() == HloOpcode::kTranspose) { unaries[k].emplace_back(operand); operand = operand->mutable_operand(0); } std::reverse(unaries[k].begin(), unaries[k].end()); if (!Match(operand, m::AnyOf<HloInstruction>( m::Divide(&binaries[k], m::Convert(m::Op(&operands[k])), m::Broadcast(m::Op(&scales[k]))), m::MultiplyAnyOrder(&binaries[k], m::Convert(m::Op(&operands[k])), m::Broadcast(m::Op(&scales[k])))))) { VLOG(5) << "Unable to identify FP8 dequantization pattern."; return false; } } std::array<PrimitiveType, 2> operand_types{ operands[0]->shape().element_type(), operands[1]->shape().element_type()}; if (!((operand_types[0] == F8E4M3FN && operand_types[1] == F8E4M3FN) || (operand_types[0] == F8E4M3FN && operand_types[1] == F8E5M2) || (operand_types[0] == F8E5M2 && operand_types[1] == F8E4M3FN))) { VLOG(5) << "Unsupported types."; return false; } for (int k = 0; k < 2; ++k) { if (binaries[k]->shape().element_type() != BF16 && binaries[k]->shape().element_type() != F16 && binaries[k]->shape().element_type() != F32) { VLOG(5) << "Unsupported types."; return false; } } if (!ShapeUtil::IsScalar(scales[0]->shape()) || !ShapeUtil::IsScalar(scales[1]->shape())) { VLOG(5) << "Scaling factors must be scalars."; return false; } HloComputation* while_condition = while_instr->while_condition(); HloInstruction* while_root = while_body->root_instruction(); std::array<HloInstruction*, 2> dots, gtes, dyn_slices{nullptr, nullptr}, coll_perms{nullptr, nullptr}; if (Match(while_root, m::Tuple(m::CollectivePermute( &coll_perms[1], m::CollectivePermute( &coll_perms[0], m::GetTupleElement(&gtes[0], m::Parameter(), 0))), m::GetTupleElement(&gtes[1], m::Parameter(), 1), m::DynamicUpdateSlice( m::DynamicUpdateSlice().WithOperand( 1, m::Dot(&dots[0], m::Op(), m::Op())), m::Dot(&dots[1], m::Op(), m::Op()), m::Op(), m::Op(), m::Op()), m::Op(), m::Op())) && dots[0]->operand(0) == gtes[0] && dots[0]->operand(1) == gtes[1] && dots[1]->operand(1) == gtes[1]) { VLOG(5) << "Identified all-gather windowed einsum pattern."; } else if (Match( while_root, m::Tuple(m::GetTupleElement(&gtes[0], m::Parameter(), 0), m::GetTupleElement(&gtes[1], m::Parameter(), 1), m::AddAnyOrder( m::Dot(&dots[0], m::DynamicSlice(&dyn_slices[0]), m::Op()), m::Op()), m::CollectivePermute(m::AddAnyOrder( m::Dot(&dots[1], m::DynamicSlice(&dyn_slices[1]), m::Op()), m::Op())), m::Op())) && dots[0]->operand(1) == gtes[1] && dots[1]->operand(1) == gtes[1]) { VLOG(5) << "Identified reduce-scatter windowed einsum pattern."; } else { VLOG(5) << "Unable to identify valid windowed einsum pattern."; return false; } for (int k = 0; k < 2; ++k) { for (HloInstruction* unary : unaries[k]) { Shape new_shape = ShapeUtil::MakeShapeWithDenseLayout( operands[k]->shape().element_type(), unary->shape().dimensions(), unary->shape().layout().minor_to_major()); operands[k] = unary->AddInstruction(unary->CloneWithNewOperands( ShapeUtil::MakeShapeWithDenseLayout( operands[k]->shape().element_type(), unary->shape().dimensions(), unary->shape().layout().minor_to_major()), {operands[k]})); } } for (int k = 0; k < 2; ++k) { TF_RETURN_IF_ERROR( param_tuple->ReplaceOperandWithDifferentShape(k, operands[k])); ShapeUtil::UpdateTupleShape(operands[k]->shape(), k, param_tuple->mutable_shape()); param_tuple->AppendOperand(scales[k]); ShapeUtil::AppendShapeToTuple(scales[k]->shape(), param_tuple->mutable_shape()); } for (HloComputation* while_comp : {while_body, while_condition}) { while_comp->ReplaceParameter( 0, HloInstruction::CreateParameter( 0, param_tuple->shape(), while_comp->parameter_instruction(0)->name())); } HloInstruction* body_param = while_body->parameter_instruction(0); for (int k = 0; k < 2; ++k) { TF_ASSIGN_OR_RETURN(HloInstruction * operand_f8, MakeGetTupleElementHlo(body_param, k)); if (while_root->operand(k) == gtes[k]) { TF_RETURN_IF_ERROR( while_root->ReplaceOperandWithDifferentShape(k, operand_f8)); ShapeUtil::UpdateTupleShape(operand_f8->shape(), k, while_root->mutable_shape()); } TF_ASSIGN_OR_RETURN( HloInstruction * operand_scale, MakeGetTupleElementHlo( body_param, body_param->shape().tuple_shapes_size() - 2 + k)); while_root->AppendOperand(operand_scale); ShapeUtil::AppendShapeToTuple(operand_scale->shape(), while_root->mutable_shape()); HloInstruction* operand_f32 = MakeConvertToHlo(operand_f8, gtes[k]->shape().element_type()); HloInstruction* broadcast_scale = MakeBroadcastHlo(operand_scale, {}, operand_f32->shape()); TF_ASSIGN_OR_RETURN( HloInstruction * operand_scaled, MakeBinaryHlo(binaries[k]->opcode(), operand_f32, broadcast_scale)); for (int l = 0; l < 2; ++l) { if (dots[l]->operand(k) == gtes[k]) { TF_RETURN_IF_ERROR(dots[l]->ReplaceOperandWith(k, operand_scaled)); } if (dyn_slices[l] && dyn_slices[l]->operand(0) == gtes[k]) { TF_RETURN_IF_ERROR( dyn_slices[l]->ReplaceOperandWith(0, operand_scaled)); } } if (coll_perms[0] && coll_perms[0]->operand(0) == gtes[k]) { std::array<HloInstruction*, 2> coll_perms_f8{nullptr, nullptr}; coll_perms_f8[0] = while_body->AddInstruction(coll_perms[0]->CloneWithNewOperands( operand_f8->shape(), {operand_f8})); coll_perms_f8[1] = while_body->AddInstruction(coll_perms[1]->CloneWithNewOperands( coll_perms_f8[0]->shape(), {coll_perms_f8[0]})); HloInstruction* coll_perm0_f32 = MakeConvertToHlo(coll_perms_f8[0], gtes[k]->shape().element_type()); TF_ASSIGN_OR_RETURN(HloInstruction * x_scaled, MakeBinaryHlo(binaries[k]->opcode(), coll_perm0_f32, broadcast_scale)); TF_RETURN_IF_ERROR(dots[1]->ReplaceOperandWith(0, x_scaled)); TF_RETURN_IF_ERROR( while_root->ReplaceOperandWithDifferentShape(0, coll_perms_f8[1])); ShapeUtil::UpdateTupleShape(coll_perms_f8[1]->shape(), 0, while_root->mutable_shape()); } } HloInstruction* new_while_instr = while_instr->AddInstruction( while_instr->CloneWithNewShape(while_root->shape())); TF_RETURN_IF_ERROR( while_instr->ReplaceAllUsesWithDifferentShape(new_while_instr)); while_instr->while_body()->SetWhileCallInstruction(new_while_instr); TF_RETURN_IF_ERROR(while_instr->parent()->RemoveInstruction(while_instr)); if (coll_perms[0]) { TF_RETURN_IF_ERROR(while_body->RemoveInstruction(coll_perms[1])); TF_RETURN_IF_ERROR(while_body->RemoveInstruction(coll_perms[0])); } TF_RETURN_IF_ERROR(while_body->RemoveInstruction(gtes[0])); TF_RETURN_IF_ERROR(while_body->RemoveInstruction(gtes[1])); VLOG(5) << "FP8 dequantization moved into while loop."; return true; } int64_t NumberOfInstructionsInComp(const HloComputation* comp, HloOpcode op) { int64_t total_count = 0; for (const HloInstruction* inst : comp->instructions()) { if (inst->opcode() == op) { ++total_count; } } return total_count; } absl::Status UpdateDotAndConsumerConfig(HloInstruction* dot, int64_t stream_id) { auto dot_gpu_config = dot->backend_config<gpu::GpuBackendConfig>(); HloInstruction* updater = dot->users()[0]; auto updater_gpu_config = updater->backend_config<gpu::GpuBackendConfig>(); dot_gpu_config->set_operation_queue_id(stream_id); if (!absl::c_linear_search(updater_gpu_config->wait_on_operation_queues(), stream_id)) { updater_gpu_config->mutable_wait_on_operation_queues()->Add(stream_id); } TF_RETURN_IF_ERROR(dot->set_backend_config(dot_gpu_config.value())); TF_RETURN_IF_ERROR(updater->set_backend_config(updater_gpu_config.value())); return absl::OkStatus(); } absl::Status SetForceDelayForInstruction(HloInstruction* instr, bool force_delay) { auto gpu_config = instr->backend_config<gpu::GpuBackendConfig>(); gpu_config->set_force_earliest_schedule(force_delay); TF_RETURN_IF_ERROR(instr->set_backend_config(gpu_config.value())); return absl::OkStatus(); } static int64_t GetAgActivationCacheIndex(const HloInstruction* while_loop) { const HloInstruction* loop_tuple = while_loop->operand(0); const Shape& tuple_shape = loop_tuple->shape(); CHECK(tuple_shape.IsTuple()); return tuple_shape.tuple_shapes_size() - 1; } absl::Status ProcessWindowedEinsumLoopForActivationCaching( WindowedEinsumHandler::WindowedEinsumAgLoops& ag_loop) { HloInstruction* loop = ag_loop.loop; HloComputation* while_body = loop->while_body(); HloInstruction* input_gte; for (HloInstruction* gte : while_body->parameter_instruction(0)->users()) { if (gte->tuple_index() == 0) { input_gte = gte; } } HloInstruction* root = while_body->root_instruction(); HloInstruction* input_tuple = while_body->parameter_instruction(0); const Shape& input_shape = input_tuple->shape(); int64_t full_cache_buffer_index = GetAgActivationCacheIndex(loop); HloInstruction* full_buffer_output_gte = while_body->AddInstruction(HloInstruction::CreateGetTupleElement( ShapeUtil::GetTupleElementShape(input_shape, full_cache_buffer_index), input_tuple, full_cache_buffer_index)); HloInstruction* new_full_buffer_output = nullptr; HloInstruction* dus_boundary_constant; HloInstruction* first_cp_output; for (HloInstruction* gte_user : input_gte->users()) { if (gte_user->opcode() == HloOpcode::kCollectivePermute) { first_cp_output = gte_user; break; } } for (HloInstruction* inst : while_body->MakeInstructionPostOrder()) { HloInstruction* slice_indices; if (Match(inst, m::DynamicUpdateSlice( m::GetTupleElement(m::Parameter()), m::Op(), m::Constant(&dus_boundary_constant), m::Reshape(m::DynamicSlice(&slice_indices, m::Op(), m::Op())), m::Op()))) { slice_indices = while_body->AddInstruction(HloInstruction::CreateReshape( dus_boundary_constant->shape(), slice_indices)); VLOG(5) << "Created slice op for first slice: " << slice_indices->ToString(); full_buffer_output_gte = while_body->AddInstruction(HloInstruction::CreateDynamicUpdateSlice( full_buffer_output_gte->shape(), full_buffer_output_gte, input_gte, {dus_boundary_constant, slice_indices, dus_boundary_constant})); } if (Match(inst, m::DynamicUpdateSlice( m::DynamicUpdateSlice(), m::Op(), m::Constant(), m::Reshape(m::DynamicSlice(&slice_indices, m::Op(), m::Op())), m::Op()))) { slice_indices = while_body->AddInstruction(HloInstruction::CreateReshape( dus_boundary_constant->shape(), slice_indices)); VLOG(5) << "Created slice op for second slice: " << slice_indices->ToString(); new_full_buffer_output = while_body->AddInstruction(HloInstruction::CreateDynamicUpdateSlice( full_buffer_output_gte->shape(), full_buffer_output_gte, first_cp_output, {dus_boundary_constant, slice_indices, dus_boundary_constant})); } HloInstruction* slice_index; HloInstruction* ds_index_constant; HloInstruction* remainder; HloInstruction* ds_param; if (Match(inst, m::Dot(m::Op(), m::DynamicSlice(&ds_param))) && Match(ds_param->operand(0), m::GetTupleElement(m::Parameter(), 1))) { for (int64_t ds_op_i = 1; ds_op_i < ds_param->operands().size(); ds_op_i++) { if (!Match( ds_param->mutable_operand(ds_op_i), m::Reshape(&slice_index, m::DynamicSlice(m::Constant(), m::Op(&remainder)))) && !Match(ds_param->mutable_operand(ds_op_i), m::Constant(&ds_index_constant))) { return absl::OkStatus(); } } if (Match(remainder, m::Remainder(m::Add(m::GetTupleElement(), m::Op()), m::Op()))) { full_buffer_output_gte = while_body->AddInstruction(HloInstruction::CreateDynamicUpdateSlice( full_buffer_output_gte->shape(), full_buffer_output_gte, input_gte, {ds_index_constant, ds_index_constant, slice_index})); } if (Match(remainder, m::Remainder( m::Add(m::Add(m::GetTupleElement(), m::Op()), m::Op()), m::Op()))) { new_full_buffer_output = while_body->AddInstruction(HloInstruction::CreateDynamicUpdateSlice( full_buffer_output_gte->shape(), full_buffer_output_gte, first_cp_output, {ds_index_constant, ds_index_constant, slice_index})); } } } std::vector<HloInstruction*> original_operands(root->operands().begin(), root->operands().end()); original_operands.push_back(new_full_buffer_output); HloInstruction* new_output_tuple = while_body->AddInstruction( HloInstruction::CreateTuple(original_operands)); TF_RETURN_IF_ERROR( while_body->ReplaceInstructionWithDifferentShape(root, new_output_tuple)); return absl::OkStatus(); } bool HasReplicaGroups(const HloInstruction* inst) { return inst->replica_groups().size() > 0; } bool ShouldAddToChain(const HloInstruction* inst) { switch (inst->opcode()) { case HloOpcode::kTranspose: case HloOpcode::kReshape: case HloOpcode::kCopy: return inst->user_count() == 1; default: return false; } } absl::Status PostProcessUnrolledLoop(HloInstruction* loop, int64_t stream_id) { HloComputation* while_body = loop->while_body(); int64_t force_delay_cp_gte_index = while_body->name().find( WindowedEinsumHandler::kWindowedEinsumRsLoopName) == 0 ? 2 : 0; for (HloInstruction* inst : while_body->MakeInstructionPostOrder()) { HloInstruction* matched_cp; if (Match(inst, m::CollectivePermute( &matched_cp, m::GetTupleElement(m::Parameter(), force_delay_cp_gte_index)))) { TF_RETURN_IF_ERROR( SetForceDelayForInstruction(matched_cp, true)); } if (inst->opcode() == HloOpcode::kDot) { TF_RETURN_IF_ERROR(UpdateDotAndConsumerConfig(inst, stream_id)); ++stream_id; } } return absl::OkStatus(); } struct MatchedGemmA2aResult { HloInstruction* producer_gemm; HloInstruction* lhs; HloInstruction* rhs; HloInstruction* a2a_replacement = nullptr; bool matched = false; }; class WindowedEinsumVisitor : public DfsHloRewriteVisitor { public: explicit WindowedEinsumVisitor( std::vector<WindowedEinsumHandler::WindowedEinsumAgLoops>& all_ag_loops) : all_ag_loops_(all_ag_loops) {} absl::StatusOr<bool> MatchA2aGemmWithIntermediateReshapes( HloInstruction* dot, HloInstruction** lhs, HloInstruction** rhs) { if (Match(dot, m::Dot(m::AllToAll(lhs).WithOneUse().WithPredicate( HasReplicaGroups), m::Op(rhs))) && !DynCast<HloAllToAllInstruction>((*lhs))->constrain_layout() && !(*lhs)->shape().IsTuple()) { return true; } std::vector<HloInstruction*> allowed_intermediate_ops( {dot->mutable_operand(0)}); HloAllToAllInstruction* matched_a2a = nullptr; while (true) { HloInstruction* curr = allowed_intermediate_ops.back(); if (ShouldAddToChain(curr)) { allowed_intermediate_ops.insert(allowed_intermediate_ops.end(), std::begin(curr->operands()), std::end(curr->operands())); } else if (curr->opcode() == HloOpcode::kAllToAll && curr->user_count() == 1) { matched_a2a = DynCast<HloAllToAllInstruction>(curr); allowed_intermediate_ops.pop_back(); break; } else { return false; } } CHECK(matched_a2a != nullptr); if (matched_a2a->constrain_layout() || matched_a2a->shape().IsTuple() || !HasReplicaGroups(matched_a2a) || !matched_a2a->split_dimension()) { return false; } int64_t split_dimension = *matched_a2a->split_dimension(); for (int64_t i = allowed_intermediate_ops.size() - 1; i >= 0; i--) { HloInstruction* current_op = allowed_intermediate_ops[i]; if (current_op->opcode() == HloOpcode::kReshape) { std::vector<std::pair<int64_t, int64_t>> unmodified_dims = ShapeUtil::DimensionsUnmodifiedByReshape( current_op->operand(0)->shape(), current_op->shape()); auto it = absl::c_find_if( unmodified_dims, [&split_dimension](std::pair<int64_t, int64_t>& dim_pair) { return dim_pair.first == split_dimension; }); if (it == unmodified_dims.end()) { VLOG(5) << "Split dimension of: " << matched_a2a->ToShortString() << " has been modified by reshapes. Skip process it for " "decomposition."; return false; } split_dimension = it->second; } else if (current_op->opcode() == HloOpcode::kTranspose) { const auto& transpose_dims = current_op->dimensions(); for (int64_t j = 0; j < transpose_dims.size(); j++) { if ((int64_t)transpose_dims[j] == split_dimension) { split_dimension = j; break; } } } } TF_RETURN_IF_ERROR(allowed_intermediate_ops.back()->ReplaceOperandWith( 0, matched_a2a->mutable_operand(0))); HloInstruction* new_a2a = matched_a2a->parent()->AddInstruction(HloInstruction::CreateAllToAll( allowed_intermediate_ops.front()->shape(), {allowed_intermediate_ops.front()}, matched_a2a->replica_groups(), false, hlo_query::NextChannelId(*matched_a2a->GetModule()), split_dimension)); TF_RETURN_IF_ERROR(dot->ReplaceOperandWith(0, new_a2a)); TF_RETURN_IF_ERROR( matched_a2a->parent()->RemoveInstructionAndUnusedOperands(matched_a2a)); MarkAsChanged(); *lhs = new_a2a; *rhs = dot->mutable_operand(1); return true; } absl::Status HandleDot(HloInstruction* dot) override { CHECK_EQ(dot->opcode(), HloOpcode::kDot); HloComputation* comp = dot->parent(); for (WindowedEinsumHandler::WindowedEinsumAgLoops& ag_loop : all_ag_loops_) { HloComputation* comp = dot->parent(); HloInstruction* loop = ag_loop.loop; HloInstruction* windowed_lhs = loop->mutable_operand(0)->mutable_operand(0); HloInstruction *all_gather, *binary, *scale = nullptr; auto all_gather_optionally_dequantized = m::AnyOf<HloInstruction>( m::AllGather(&all_gather, m::Divide(&binary, m::Convert(m::Op().Is(windowed_lhs)), m::Broadcast(m::Op(&scale)))), m::AllGather( &all_gather, m::MultiplyAnyOrder(&binary, m::Convert(m::Op().Is(windowed_lhs)), m::Broadcast(m::Op(&scale)))), m::AllGather(&all_gather, m::Op().Is(windowed_lhs))); if (!Match(dot, m::Dot(all_gather_optionally_dequantized, m::Op())) && !Match(dot, m::Dot(m::Op(), all_gather_optionally_dequantized))) { continue; } if (scale) { if (!ShapeUtil::IsScalar(scale->shape())) { continue; } if (windowed_lhs->shape().element_type() != F8E4M3FN && windowed_lhs->shape().element_type() != F8E5M2) { continue; } if (binary->shape().element_type() != BF16 && binary->shape().element_type() != F16 && binary->shape().element_type() != F32) { continue; } } if (!ag_loop.consumed) { Literal zero_literal = LiteralUtil::Zero(windowed_lhs->shape().element_type()); HloInstruction* zero = comp->AddInstruction( HloInstruction::CreateConstant(std::move(zero_literal))); Shape zero_bcast_shape = ShapeUtil::ChangeElementType( all_gather->shape(), windowed_lhs->shape().element_type()); HloInstruction* zero_bcast = MakeBroadcastHlo(zero, {}, zero_bcast_shape); loop->mutable_operand(0)->AppendOperand(zero_bcast); ShapeUtil::AppendShapeToTuple( zero_bcast->shape(), loop->mutable_operand(0)->mutable_shape()); for (HloComputation* while_comp : {loop->while_body(), loop->while_condition()}) { while_comp->ReplaceParameter( 0, HloInstruction::CreateParameter( 0, loop->mutable_operand(0)->shape(), while_comp->parameter_instruction(0)->name())); } *loop->mutable_shape() = loop->operand(0)->shape(); VLOG(5) << "Found all-gather that shares the same operand with a " "windowed einsum loop : " << loop->ToString(); TF_RETURN_IF_ERROR( ProcessWindowedEinsumLoopForActivationCaching(ag_loop)); ag_loop.consumed = true; } int64_t cache_output_index = dot->operand_index(all_gather); HloInstruction* new_gte = comp->AddInstruction(HloInstruction::CreateGetTupleElement( loop, GetAgActivationCacheIndex(loop))); HloInstruction* new_gte_scaled; if (scale) { HloInstruction* new_convert = MakeConvertToHlo(new_gte, binary->shape().element_type()); HloInstruction* bcast_scale = MakeBroadcastHlo(scale, {}, new_convert->shape()); TF_ASSIGN_OR_RETURN( new_gte_scaled, MakeBinaryHlo(binary->opcode(), new_convert, bcast_scale)); } TF_RETURN_IF_ERROR(dot->ReplaceOperandWith( cache_output_index, scale ? new_gte_scaled : new_gte)); if (all_gather->user_count() == 0) { TF_RETURN_IF_ERROR(comp->RemoveInstruction(all_gather)); } } HloInstruction* lhs; HloInstruction* rhs; std::vector<xla::ReplicaGroup> replica_groups; TF_ASSIGN_OR_RETURN(bool matched, MatchA2aGemmWithIntermediateReshapes(dot, &lhs, &rhs)); if (matched) { replica_groups = lhs->replica_groups(); int64_t group_size = replica_groups[0].replica_ids_size(); if (absl::c_find_if(replica_groups, [&](ReplicaGroup& group) { return group.replica_ids_size() != group_size; }) != replica_groups.end()) { VLOG(5) << "All-to-all split groups don't have the same number of " "replicas."; return absl::OkStatus(); } const DotDimensionNumbers& original_dot_dnums = dot->dot_dimension_numbers(); const PrecisionConfig& original_precision = dot->precision_config(); const auto& lhs_contracting_dims = dot->dot_dimension_numbers().lhs_contracting_dimensions(); const auto& rhs_contracting_dims = dot->dot_dimension_numbers().rhs_contracting_dimensions(); if (lhs_contracting_dims.size() != 1 || rhs_contracting_dims.size() != 1) { VLOG(5) << "Contracting dimensions have multiple elements, all-to-all " "sharding will be skipped."; return absl::OkStatus(); } int64_t lhs_contracting_dim = lhs_contracting_dims[0]; int64_t rhs_contracting_dim = rhs_contracting_dims[0]; HloAllToAllInstruction* a2a = DynCast<HloAllToAllInstruction>(lhs); int64_t contracting_dim_value = rhs->shape().dimensions()[rhs_contracting_dim]; std::vector<int64_t> lhs_slice_sizes(a2a->shape().rank(), 0); std::vector<int64_t> lhs_slice_increments(a2a->shape().rank(), 1); std::vector<int64_t> lhs_slice_max_range( a2a->shape().dimensions().begin(), a2a->shape().dimensions().end()); std::vector<int64_t> rhs_slice_sizes(rhs->shape().rank(), 0); std::vector<int64_t> rhs_slice_increments(rhs->shape().rank(), 1); std::vector<int64_t> rhs_slice_max_range( rhs->shape().dimensions().begin(), rhs->shape().dimensions().end()); HloInstruction* output_buffer = comp->AddInstruction(HloInstruction::CreateBroadcast( dot->shape(), comp->AddInstruction(HloInstruction::CreateConstant( LiteralUtil::Zero(dot->shape().element_type()))), {})); HloInstruction* a2a_operand = a2a->mutable_operand(0); if (contracting_dim_value % group_size) { VLOG(5) << absl::StrFormat( "Contracting dimension %d needs to be divisible by group_size %d", contracting_dim_value, group_size); return absl::OkStatus(); } int64_t size_per_split = contracting_dim_value / group_size; lhs_slice_max_range[lhs_contracting_dim] = size_per_split; rhs_slice_max_range[rhs_contracting_dim] = size_per_split; Shape lhs_slice_shape = a2a->shape(); Shape rhs_slice_shape = rhs->shape(); lhs_slice_shape.set_dimensions(lhs_contracting_dim, size_per_split); rhs_slice_shape.set_dimensions(rhs_contracting_dim, size_per_split); HloInstruction* lhs_slice; HloInstruction* rhs_slice; HloInstruction* partial_result = output_buffer; Shape partial_all_to_all_shape = lhs_slice_shape; TF_ASSIGN_OR_RETURN( Shape partial_dot_shape, ShapeInference::InferDotOpShape( partial_all_to_all_shape, rhs_slice_shape, original_dot_dnums, std::nullopt)); int64_t stream_id = hlo_query::NextChannelId(*a2a->GetModule()); for (int64_t i = 0; i < group_size; ++i) { lhs_slice = comp->AddInstruction(HloInstruction::CreateSlice( lhs_slice_shape, a2a_operand, lhs_slice_sizes, lhs_slice_max_range, lhs_slice_increments)); a2a->SetupDerivedInstruction(lhs_slice); lhs_slice_sizes[lhs_contracting_dim] = lhs_slice_max_range[lhs_contracting_dim]; lhs_slice_max_range[lhs_contracting_dim] += size_per_split; rhs_slice = comp->AddInstruction(HloInstruction::CreateSlice( rhs_slice_shape, rhs, rhs_slice_sizes, rhs_slice_max_range, rhs_slice_increments)); a2a->SetupDerivedInstruction(rhs_slice); rhs_slice_sizes[rhs_contracting_dim] = rhs_slice_max_range[rhs_contracting_dim]; rhs_slice_max_range[rhs_contracting_dim] += size_per_split; HloInstruction* partial_all_to_all = comp->AddInstruction(HloInstruction::CreateAllToAll( partial_all_to_all_shape, {lhs_slice}, a2a->device_list(), false, hlo_query::NextChannelId(*a2a->GetModule()), a2a->split_dimension())); a2a->SetupDerivedInstruction(partial_all_to_all); HloInstruction* partial_dot = comp->AddInstruction(HloInstruction::CreateDot( partial_dot_shape, partial_all_to_all, rhs_slice, original_dot_dnums, original_precision)); partial_result = comp->AddInstruction( HloInstruction::CreateBinary(partial_dot->shape(), HloOpcode::kAdd, partial_dot, partial_result)); a2a->SetupDerivedInstruction(partial_result); TF_RETURN_IF_ERROR( UpdateDotAndConsumerConfig(partial_dot, stream_id++)); } TF_RETURN_IF_ERROR(ReplaceInstruction(dot, partial_result)); } return absl::OkStatus(); } absl::StatusOr<MatchedGemmA2aResult> MatchGemmA2aWithIntermediateReshapes( HloInstruction* inst) { MatchedGemmA2aResult result; HloAllToAllInstruction* a2a = DynCast<HloAllToAllInstruction>(inst); if (!HasReplicaGroups(a2a) || a2a->constrain_layout() || a2a->shape().IsTuple()) { return result; } if (Match(a2a, m::AllToAll(m::Dot(&result.producer_gemm, m::Op(&result.lhs), m::Op(&result.rhs)) .WithOneUse()))) { result.matched = true; return result; } std::vector<HloInstruction*> allowed_intermediate_ops( {a2a->mutable_operand(0)}); HloInstruction* matched_dot = nullptr; while (true) { HloInstruction* curr = allowed_intermediate_ops.back(); if (ShouldAddToChain(curr)) { allowed_intermediate_ops.insert(allowed_intermediate_ops.end(), std::begin(curr->operands()), std::end(curr->operands())); } else if (curr->opcode() == HloOpcode::kDot && curr->user_count() == 1) { matched_dot = curr; allowed_intermediate_ops.pop_back(); break; } else { return result; } } CHECK(matched_dot != nullptr); int64_t split_dimension = *a2a->split_dimension(); for (int64_t i = 0; i < allowed_intermediate_ops.size(); i++) { HloInstruction* current_op = allowed_intermediate_ops[i]; if (current_op->opcode() == HloOpcode::kReshape) { std::vector<std::pair<int64_t, int64_t>> unmodified_dims = ShapeUtil::DimensionsUnmodifiedByReshape( current_op->operand(0)->shape(), current_op->shape()); auto it = absl::c_find_if( unmodified_dims, [&split_dimension](std::pair<int64_t, int64_t>& dim_pair) { return dim_pair.second == split_dimension; }); if (it == unmodified_dims.end()) { VLOG(5) << "Split dimension of: " << a2a->ToShortString() << " has been modified by reshapes. Skip process it for " "decomposition."; return result; } split_dimension = it->first; } else if (current_op->opcode() == HloOpcode::kTranspose) { const auto& transpose_dims = current_op->dimensions(); split_dimension = transpose_dims[split_dimension]; } } result.a2a_replacement = matched_dot->parent()->AddInstruction(HloInstruction::CreateAllToAll( matched_dot->shape(), {matched_dot}, a2a->replica_groups(), false, hlo_query::NextChannelId(*matched_dot->GetModule()), split_dimension)); TF_RETURN_IF_ERROR(allowed_intermediate_ops.back()->ReplaceOperandWith( 0, result.a2a_replacement)); inst->SetupDerivedInstruction(result.a2a_replacement); TF_RETURN_IF_ERROR( ReplaceInstruction(inst, allowed_intermediate_ops.front())); result.lhs = matched_dot->mutable_operand(0); result.rhs = matched_dot->mutable_operand(1); result.producer_gemm = matched_dot; result.matched = true; return result; } absl::Status HandleAllToAll(HloInstruction* inst) override { CHECK_EQ(inst->opcode(), HloOpcode::kAllToAll); HloComputation* comp = inst->parent(); std::vector<xla::ReplicaGroup> replica_groups; TF_ASSIGN_OR_RETURN(MatchedGemmA2aResult matched_result, MatchGemmA2aWithIntermediateReshapes(inst)); if (matched_result.matched) { HloInstruction* a2a = inst; if (matched_result.a2a_replacement) { a2a = matched_result.a2a_replacement; } replica_groups = a2a->replica_groups(); int64_t group_size = replica_groups[0].replica_ids_size(); if (absl::c_find_if(replica_groups, [&](ReplicaGroup& group) { return group.replica_ids_size() != group_size; }) != replica_groups.end()) { VLOG(5) << "All-to-all split groups don't have the same number of " "replicas."; return absl::OkStatus(); } const DotDimensionNumbers& original_dot_dnums = matched_result.producer_gemm->dot_dimension_numbers(); const PrecisionConfig& original_precision = matched_result.producer_gemm->precision_config(); const auto& lhs_contracting_dims = matched_result.producer_gemm->dot_dimension_numbers() .lhs_contracting_dimensions(); const auto& rhs_contracting_dims = matched_result.producer_gemm->dot_dimension_numbers() .rhs_contracting_dimensions(); if (lhs_contracting_dims.size() != 1 || rhs_contracting_dims.size() != 1) { VLOG(5) << "Contracting dimensions have multiple elements, all-to-all " "sharding will be skipped."; return absl::OkStatus(); } int64_t lhs_contracting_dim = lhs_contracting_dims[0]; int64_t rhs_contracting_dim = rhs_contracting_dims[0]; HloAllToAllInstruction* all_to_all = DynCast<HloAllToAllInstruction>(a2a); int64_t contracting_dim_value = matched_result.rhs->shape().dimensions()[rhs_contracting_dim]; std::vector<int64_t> lhs_slice_sizes(matched_result.lhs->shape().rank(), 0); std::vector<int64_t> lhs_slice_increments( matched_result.lhs->shape().rank(), 1); std::vector<int64_t> lhs_slice_max_range( matched_result.lhs->shape().dimensions().begin(), matched_result.lhs->shape().dimensions().end()); std::vector<int64_t> rhs_slice_sizes(matched_result.rhs->shape().rank(), 0); std::vector<int64_t> rhs_slice_increments( matched_result.rhs->shape().rank(), 1); std::vector<int64_t> rhs_slice_max_range( matched_result.rhs->shape().dimensions().begin(), matched_result.rhs->shape().dimensions().end()); HloInstruction* output_buffer = comp->AddInstruction(HloInstruction::CreateBroadcast( all_to_all->shape(), comp->AddInstruction(HloInstruction::CreateConstant( LiteralUtil::Zero(all_to_all->shape().element_type()))), {})); if (contracting_dim_value % group_size) { VLOG(5) << absl::StrFormat( "Contracting dimension %d needs to be divisible by group_size %d", contracting_dim_value, group_size); return absl::OkStatus(); } int64_t size_per_split = contracting_dim_value / group_size; lhs_slice_max_range[lhs_contracting_dim] = size_per_split; rhs_slice_max_range[rhs_contracting_dim] = size_per_split; Shape lhs_slice_shape = matched_result.lhs->shape(); Shape rhs_slice_shape = matched_result.rhs->shape(); lhs_slice_shape.set_dimensions(lhs_contracting_dim, size_per_split); rhs_slice_shape.set_dimensions(rhs_contracting_dim, size_per_split); HloInstruction* lhs_slice; HloInstruction* rhs_slice; HloInstruction* partial_result = output_buffer; Shape partial_all_to_all_shape = all_to_all->shape(); TF_ASSIGN_OR_RETURN( Shape partial_dot_shape, ShapeInference::InferDotOpShape( lhs_slice_shape, rhs_slice_shape, original_dot_dnums, std::nullopt)); int64_t stream_id = hlo_query::NextChannelId(*all_to_all->GetModule()); for (int64_t i = 0; i < group_size; ++i) { lhs_slice = comp->AddInstruction(HloInstruction::CreateSlice( lhs_slice_shape, matched_result.lhs, lhs_slice_sizes, lhs_slice_max_range, lhs_slice_increments)); all_to_all->SetupDerivedInstruction(lhs_slice); lhs_slice_sizes[lhs_contracting_dim] = lhs_slice_max_range[lhs_contracting_dim]; lhs_slice_max_range[lhs_contracting_dim] += size_per_split; rhs_slice = comp->AddInstruction(HloInstruction::CreateSlice( rhs_slice_shape, matched_result.rhs, rhs_slice_sizes, rhs_slice_max_range, rhs_slice_increments)); all_to_all->SetupDerivedInstruction(rhs_slice); rhs_slice_sizes[rhs_contracting_dim] = rhs_slice_max_range[rhs_contracting_dim]; rhs_slice_max_range[rhs_contracting_dim] += size_per_split; HloInstruction* partial_dot = comp->AddInstruction( HloInstruction::CreateDot(partial_dot_shape, lhs_slice, rhs_slice, original_dot_dnums, original_precision)); HloInstruction* partial_all_to_all = comp->AddInstruction(HloInstruction::CreateAllToAll( partial_all_to_all_shape, {partial_dot}, all_to_all->device_list(), false, hlo_query::NextChannelId(*all_to_all->GetModule()), all_to_all->split_dimension())); all_to_all->SetupDerivedInstruction(partial_all_to_all); partial_result = comp->AddInstruction(HloInstruction::CreateBinary( partial_all_to_all_shape, HloOpcode::kAdd, partial_all_to_all, partial_result)); all_to_all->SetupDerivedInstruction(partial_result); TF_RETURN_IF_ERROR( UpdateDotAndConsumerConfig(partial_dot, stream_id++)); } TF_RETURN_IF_ERROR(ReplaceInstruction(all_to_all, partial_result)); } return absl::OkStatus(); } private: std::vector<WindowedEinsumHandler::WindowedEinsumAgLoops>& all_ag_loops_; }; } absl::StatusOr<bool> WindowedEinsumHandler::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { XLA_VLOG_LINES( 5, "WindowedEinsumHandler::Run(), before:\n" + module->ToString()); bool changed = false; int64_t stream_id = hlo_query::NextChannelId(*module); std::vector<HloInstruction*> all_windowed_einsum_loops; for (HloComputation* comp : module->MakeNonfusionComputations(execution_threads)) { if (NumberOfInstructionsInComp(comp, HloOpcode::kDot) <= 1) { continue; } if (comp->name().find(kWindowedEinsumRsLoopName) == 0 || comp->name().find(kWindowedEinsumAgLoopName) == 0) { VLOG(5) << "Processing computation: " << comp->name(); TF_ASSIGN_OR_RETURN(changed, ShiftDequantizationF8(comp)); if (comp->name().find(kWindowedEinsumAgLoopName) == 0) { all_ag_loops_.push_back( WindowedEinsumAgLoops(comp->WhileCallInstruction())); } all_windowed_einsum_loops.push_back(comp->WhileCallInstruction()); } } for (HloComputation* comp : module->MakeNonfusionComputations(execution_threads)) { WindowedEinsumVisitor visitor(all_ag_loops_); TF_RETURN_IF_ERROR(comp->Accept(&visitor)); changed |= visitor.changed(); } if (!all_windowed_einsum_loops.empty()) { TF_ASSIGN_OR_RETURN(bool applied_algsimp, AlgebraicSimplifier(AlgebraicSimplifierOptions()) .Run(module, execution_threads)); changed |= applied_algsimp; TF_ASSIGN_OR_RETURN(bool applied_cf, HloConstantFolding().Run(module, execution_threads)); changed |= applied_cf; } for (HloInstruction* loop : all_windowed_einsum_loops) { VLOG(5) << "Processing " << loop->ToString() << " for unrolling."; std::string original_body_name = std::string(loop->while_body()->name()); std::string original_cond_name = std::string(loop->while_condition()->name()); TF_ASSIGN_OR_RETURN( UnrollResult result, WhileLoopUnroller::UnrollAndReturnReplacement( loop, -1, true, false)); if (result.unrolled) { result.new_while_op->while_body()->SetAndSanitizeName( absl::StrCat("unrolled_", original_body_name)); result.new_while_op->while_condition()->SetAndSanitizeName( absl::StrCat("unrolled_", original_cond_name)); xla::FrontendAttributes attributes; (*attributes.mutable_map())["skip-simplify-while-loops_trip-count-one"] = "true"; result.new_while_op->add_frontend_attributes(attributes); TF_RETURN_IF_ERROR( PostProcessUnrolledLoop(result.new_while_op, stream_id)); } changed |= result.unrolled; } XLA_VLOG_LINES(5, "WindowedEinsumHandler::Run(), after:\n" + module->ToString()); return changed; } }
#include "xla/service/gpu/transforms/windowed_einsum_handler.h" #include <cstdint> #include <memory> #include <string> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/tests/filecheck.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/statusor.h" namespace xla::gpu { namespace { namespace m = ::xla::match; using WindowedEinsumHandlerTest = HloTestBase; HloInstruction* FindInstructionByName(HloComputation* comp, std::string name) { for (auto inst : comp->instructions()) { if (inst->name() == name) { return inst; } } return nullptr; } TEST_F(WindowedEinsumHandlerTest, AgLoopsHaveStreamIds) { constexpr absl::string_view kHloString = R"( HloModule pjit__unnamed_wrapped_function_, entry_computation_layout={(bf16[1,512,24576]{2,1,0}, bf16[24576,24576]{1,0})->bf16[2048,24576]{1,0}}, num_partitions=4 windowed_dot_general_body_ag.1 { param = (bf16[512,24576]{1,0}, bf16[24576,24576]{1,0}, bf16[2048,24576]{1,0}, bf16[2048,24576]{1,0}, u32[]) parameter(0) get-tuple-element = bf16[512,24576]{1,0} get-tuple-element(param), index=0 collective-permute.send_first_lhs_shard = bf16[512,24576]{1,0} collective-permute(get-tuple-element), channel_id=2, source_target_pairs={{0,3},{1,0},{2,1},{3,2}}, backend_config={"operation_queue_id":"0","wait_on_operation_queues":[]} get-tuple-element.lhs = bf16[24576,24576]{1,0} get-tuple-element(param), index=1 get-tuple-element.rhs = bf16[2048,24576]{1,0} get-tuple-element(param), index=2 dot.2 = bf16[512,24576]{1,0} dot(get-tuple-element, get-tuple-element.lhs), lhs_contracting_dims={1}, rhs_contracting_dims={0}, backend_config={"operation_queue_id":"0","wait_on_operation_queues":[]} constant.1 = s32[4]{0} constant({0, 512, 1024, 1536}) get-tuple-element.4 = u32[] get-tuple-element(param), index=4 partition-id = u32[] partition-id() add = u32[] add(get-tuple-element.4, partition-id) constant = u32[] constant(4) remainder = u32[] remainder(add, constant) dynamic-slice = s32[1]{0} dynamic-slice(constant.1, remainder), dynamic_slice_sizes={1} reshape.4 = s32[] reshape(dynamic-slice) constant.2 = s32[] constant(0) dynamic-update-slice = bf16[2048,24576]{1,0} dynamic-update-slice(get-tuple-element.rhs, dot.2, reshape.4, constant.2), backend_config={"operation_queue_id":"0","wait_on_operation_queues":[]} dot.3 = bf16[512,24576]{1,0} dot(collective-permute.send_first_lhs_shard, get-tuple-element.lhs), lhs_contracting_dims={1}, rhs_contracting_dims={0} constant.3 = u32[] constant(1) add.1 = u32[] add(get-tuple-element.4, constant.3) add.2 = u32[] add(add.1, partition-id) remainder.1 = u32[] remainder(add.2, constant) dynamic-slice.1 = s32[1]{0} dynamic-slice(constant.1, remainder.1), dynamic_slice_sizes={1} reshape.5 = s32[] reshape(dynamic-slice.1) dynamic-update-slice.1 = bf16[2048,24576]{1,0} dynamic-update-slice(dynamic-update-slice, dot.3, reshape.5, constant.2) get-tuple-element.3 = bf16[2048,24576]{1,0} get-tuple-element(param), index=3 add.3 = u32[] add(add.1, constant.3) ROOT tuple = (bf16[512,24576]{1,0}, bf16[24576,24576]{1,0}, bf16[2048,24576]{1,0}, bf16[2048,24576]{1,0}, u32[]) tuple(collective-permute.send_first_lhs_shard, get-tuple-element.lhs, dynamic-update-slice.1, get-tuple-element.3, add.3) } windowed_dot_general_cond_ag { param.1 = (bf16[512,24576]{1,0}, bf16[24576,24576]{1,0}, bf16[2048,24576]{1,0}, bf16[2048,24576]{1,0}, u32[]) parameter(0) get-tuple-element.5 = u32[] get-tuple-element(param.1), index=4 constant.8 = u32[] constant(4) ROOT compare = pred[] compare(get-tuple-element.5, constant.8), direction=LT } ENTRY test_main { param.4 = bf16[1,512,24576]{2,1,0} parameter(0), sharding={devices=[1,4,1]<=[4]} reshape.8 = bf16[512,24576]{1,0} reshape(param.4) param.5 = bf16[24576,24576]{1,0} parameter(1), sharding={devices=[1,4]<=[4]} constant.18 = bf16[] constant(0) broadcast = bf16[2048,24576]{1,0} broadcast(constant.18), dimensions={} constant.20 = u32[] constant(0) tuple.2 = (bf16[512,24576]{1,0}, bf16[24576,24576]{1,0}, bf16[2048,24576]{1,0}, bf16[2048,24576]{1,0}, u32[]) tuple(reshape.8, param.5, broadcast, broadcast, constant.20) while = (bf16[512,24576]{1,0}, bf16[24576,24576]{1,0}, bf16[2048,24576]{1,0}, bf16[2048,24576]{1,0}, u32[]) while(tuple.2), condition=windowed_dot_general_cond_ag, body=windowed_dot_general_body_ag.1 ROOT get-tuple-element.13 = bf16[2048,24576]{1,0} get-tuple-element(while), index=2 } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloString)); WindowedEinsumHandler gpu_handler; bool changed; TF_ASSERT_OK_AND_ASSIGN(changed, gpu_handler.Run(module.get())); EXPECT_TRUE(changed); HloInstruction* ag_loop = module->entry_computation()->root_instruction()->mutable_operand(0); HloComputation* ag_loop_body = ag_loop->while_body(); int64_t dot_count = 0; for (HloInstruction* inst : ag_loop_body->MakeInstructionPostOrder()) { if (inst->opcode() == HloOpcode::kDot) { dot_count++; EXPECT_GT(inst->backend_config<GpuBackendConfig>()->operation_queue_id(), 0); } } EXPECT_EQ(dot_count, 4); HloInstruction* cp1 = FindInstructionByName( ag_loop_body, "collective-permute.send_first_lhs_shard.3"); EXPECT_TRUE( cp1->backend_config<GpuBackendConfig>()->force_earliest_schedule()); } TEST_F(WindowedEinsumHandlerTest, RsLoopsHaveStreamIds) { constexpr absl::string_view kHloString = R"( HloModule pjit__unnamed_wrapped_function_, entry_computation_layout={(bf16[24576,24576]{1,0}, bf16[512,24576]{1,0}, bf16[2048,24576]{1,0})->bf16[512,24576]{1,0}}, num_partitions=4 windowed_dot_general_body_rs_clone.1 { param.2 = (bf16[2048,24576]{1,0}, bf16[24576,24576]{1,0}, bf16[512,24576]{1,0}, bf16[512,24576]{1,0}, u32[]) parameter(0) get-tuple-element.6 = bf16[2048,24576]{1,0} get-tuple-element(param.2), index=0 get-tuple-element.7 = bf16[24576,24576]{1,0} get-tuple-element(param.2), index=1 get-tuple-element.9 = bf16[512,24576]{1,0} get-tuple-element(param.2), index=2 collective-permute.send_second_lhs_shard = bf16[512,24576]{1,0} collective-permute(get-tuple-element.9), channel_id=4, source_target_pairs={{0,2},{1,3},{2,0},{3,1}}, backend_config={"operation_queue_id":"0","wait_on_operation_queues":[]} constant.10 = s32[4]{0} constant({0, 512, 1024, 1536}) get-tuple-element.11 = u32[] get-tuple-element(param.2), index=4 constant.12 = u32[] constant(2) add.8 = u32[] add(get-tuple-element.11, constant.12) constant.13 = u32[] constant(1) add.9 = u32[] add(add.8, constant.13) partition-id.3 = u32[] partition-id() add.10 = u32[] add(add.9, partition-id.3) constant.9 = u32[] constant(4) remainder.3 = u32[] remainder(add.10, constant.9) dynamic-slice.4 = s32[1]{0} dynamic-slice(constant.10, remainder.3), dynamic_slice_sizes={1} reshape.7 = s32[] reshape(dynamic-slice.4) constant.11 = s32[] constant(0) dynamic-slice.5 = bf16[512,24576]{1,0} dynamic-slice(get-tuple-element.6, reshape.7, constant.11), dynamic_slice_sizes={512,24576} dot.7 = bf16[512,24576]{1,0} dot(dynamic-slice.5, get-tuple-element.7), lhs_contracting_dims={1}, rhs_contracting_dims={0}, backend_config={"operation_queue_id":"0","wait_on_operation_queues":[]} add.11 = bf16[512,24576]{1,0} add(collective-permute.send_second_lhs_shard, dot.7), backend_config={"operation_queue_id":"0","wait_on_operation_queues":[]} get-tuple-element.10 = bf16[512,24576]{1,0} get-tuple-element(param.2), index=3 add.6 = u32[] add(get-tuple-element.11, partition-id.3) remainder.2 = u32[] remainder(add.6, constant.9) dynamic-slice.2 = s32[1]{0} dynamic-slice(constant.10, remainder.2), dynamic_slice_sizes={1} reshape.6 = s32[] reshape(dynamic-slice.2) dynamic-slice.3 = bf16[512,24576]{1,0} dynamic-slice(get-tuple-element.6, reshape.6, constant.11), dynamic_slice_sizes={512,24576} dot.5 = bf16[512,24576]{1,0} dot(dynamic-slice.3, get-tuple-element.7), lhs_contracting_dims={1}, rhs_contracting_dims={0}, backend_config={"operation_queue_id":"0","wait_on_operation_queues":[]} add.7 = bf16[512,24576]{1,0} add(get-tuple-element.10, dot.5), backend_config={"operation_queue_id":"0","wait_on_operation_queues":[]} collective-permute.2 = bf16[512,24576]{1,0} collective-permute(add.7), channel_id=5, source_target_pairs={{0,2},{1,3},{2,0},{3,1}} ROOT tuple.1 = (bf16[2048,24576]{1,0}, bf16[24576,24576]{1,0}, bf16[512,24576]{1,0}, bf16[512,24576]{1,0}, u32[]) tuple(get-tuple-element.6, get-tuple-element.7, add.11, collective-permute.2, add.8) } windowed_dot_general_cond_rs { param.3 = (bf16[2048,24576]{1,0}, bf16[24576,24576]{1,0}, bf16[512,24576]{1,0}, bf16[512,24576]{1,0}, u32[]) parameter(0) get-tuple-element.12 = u32[] get-tuple-element(param.3), index=4 constant.17 = u32[] constant(4) ROOT compare.1 = pred[] compare(get-tuple-element.12, constant.17), direction=LT } ENTRY main.9_spmd { param.6 = bf16[24576,24576]{1,0} parameter(0), sharding={devices=[4,1]<=[4]} param.7 = bf16[512,24576]{1,0} parameter(1) param.8 = bf16[2048,24576]{1,0} parameter(2) constant.20 = u32[] constant(0) tuple.3 = (bf16[2048,24576]{1,0}, bf16[24576,24576]{1,0}, bf16[512,24576]{1,0}, bf16[512,24576]{1,0}, u32[]) tuple(param.8, param.6, param.7, param.7, constant.20) while.1 = (bf16[2048,24576]{1,0}, bf16[24576,24576]{1,0}, bf16[512,24576]{1,0}, bf16[512,24576]{1,0}, u32[]) while(tuple.3), condition=windowed_dot_general_cond_rs, body=windowed_dot_general_body_rs_clone.1 ROOT get-tuple-element.14 = bf16[512,24576]{1,0} get-tuple-element(while.1), index=2 } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloString)); WindowedEinsumHandler gpu_handler; bool changed; TF_ASSERT_OK_AND_ASSIGN(changed, gpu_handler.Run(module.get())); EXPECT_TRUE(changed); HloInstruction* rs_loop = module->entry_computation()->root_instruction()->mutable_operand(0); HloComputation* rs_loop_body = rs_loop->while_body(); int64_t dot_count = 0; for (HloInstruction* inst : rs_loop_body->MakeInstructionPostOrder()) { if (inst->opcode() == HloOpcode::kDot) { dot_count++; EXPECT_GT(inst->backend_config<GpuBackendConfig>()->operation_queue_id(), 0); } } EXPECT_EQ(dot_count, 4); } TEST_F(WindowedEinsumHandlerTest, AgLoopsMultipleConsumersAreChained) { constexpr absl::string_view kHloString = R"( HloModule pjit__unnamed_wrapped_function_, entry_computation_layout={(bf16[2,512,24576]{2,1,0}, bf16[24576,24576]{1,0}, bf16[24576,24576]{1,0})->bf16[2,2048,24576]{2,1,0}}, num_partitions=4 windowed_dot_general_body_ag { param.1 = (bf16[2,512,24576]{2,1,0}, bf16[24576,24576]{1,0}, bf16[2,2048,24576]{2,1,0}, bf16[2,2048,24576]{2,1,0}, u32[]) parameter(0) get-tuple-element.lhs = bf16[2,512,24576]{2,1,0} get-tuple-element(param.1), index=0 collective-permute.send_first_lhs_shard = bf16[2,512,24576]{2,1,0} collective-permute(get-tuple-element.lhs), channel_id=2, source_target_pairs={{0,3},{1,0},{2,1},{3,2}} collective-permute.send_second_lhs_shard = bf16[2,512,24576]{2,1,0} collective-permute(collective-permute.send_first_lhs_shard), channel_id=3, source_target_pairs={{0,3},{1,0},{2,1},{3,2}} get-tuple-element.rhs = bf16[24576,24576]{1,0} get-tuple-element(param.1), index=1 get-tuple-element.3 = bf16[2,2048,24576]{2,1,0} get-tuple-element(param.1), index=2 dot = bf16[2,512,24576]{2,1,0} dot(get-tuple-element.lhs, get-tuple-element.rhs), lhs_contracting_dims={2}, rhs_contracting_dims={0} constant.2 = s32[] constant(0) constant.3 = s32[4]{0} constant({0, 512, 1024, 1536}) get-tuple-element.5 = u32[] get-tuple-element(param.1), index=4 partition-id = u32[] partition-id() add = u32[] add(get-tuple-element.5, partition-id) constant.1 = u32[] constant(4) remainder = u32[] remainder(add, constant.1) dynamic-slice = s32[1]{0} dynamic-slice(constant.3, remainder), dynamic_slice_sizes={1} reshape = s32[] reshape(dynamic-slice) dynamic-update-slice = bf16[2,2048,24576]{2,1,0} dynamic-update-slice(get-tuple-element.3, dot, constant.2, reshape, constant.2) dot.1 = bf16[2,512,24576]{2,1,0} dot(collective-permute.send_first_lhs_shard, get-tuple-element.rhs), lhs_contracting_dims={2}, rhs_contracting_dims={0} constant.5 = u32[] constant(1) add.1 = u32[] add(get-tuple-element.5, constant.5) add.2 = u32[] add(add.1, partition-id) remainder.1 = u32[] remainder(add.2, constant.1) dynamic-slice.1 = s32[1]{0} dynamic-slice(constant.3, remainder.1), dynamic_slice_sizes={1} reshape.1 = s32[] reshape(dynamic-slice.1) dynamic-update-slice.1 = bf16[2,2048,24576]{2,1,0} dynamic-update-slice(dynamic-update-slice, dot.1, constant.2, reshape.1, constant.2) get-tuple-element.4 = bf16[2,2048,24576]{2,1,0} get-tuple-element(param.1), index=3 add.3 = u32[] add(add.1, constant.5) ROOT tuple = (bf16[2,512,24576]{2,1,0}, bf16[24576,24576]{1,0}, bf16[2,2048,24576]{2,1,0}, bf16[2,2048,24576]{2,1,0}, u32[]) tuple(collective-permute.send_second_lhs_shard, get-tuple-element.rhs, dynamic-update-slice.1, get-tuple-element.4, add.3) } windowed_dot_general_cond_ag { param = (bf16[2,512,24576]{2,1,0}, bf16[24576,24576]{1,0}, bf16[2,2048,24576]{2,1,0}, bf16[2,2048,24576]{2,1,0}, u32[]) parameter(0) get-tuple-element = u32[] get-tuple-element(param), index=4 constant = u32[] constant(4) ROOT compare = pred[] compare(get-tuple-element, constant), direction=LT } ENTRY main.12_spmd { param.4 = bf16[2,512,24576]{2,1,0} parameter(0), sharding={devices=[1,4,1]<=[4]} param.5 = bf16[24576,24576]{1,0} parameter(1), sharding={devices=[1,4]<=[4]} constant.22 = bf16[] constant(0) broadcast = bf16[2,2048,24576]{2,1,0} broadcast(constant.22), dimensions={} constant.24 = u32[] constant(0) tuple.2 = (bf16[2,512,24576]{2,1,0}, bf16[24576,24576]{1,0}, bf16[2,2048,24576]{2,1,0}, bf16[2,2048,24576]{2,1,0}, u32[]) tuple(param.4, param.5, broadcast, broadcast, constant.24) while = (bf16[2,512,24576]{2,1,0}, bf16[24576,24576]{1,0}, bf16[2,2048,24576]{2,1,0}, bf16[2,2048,24576]{2,1,0}, u32[]) while(tuple.2), condition=windowed_dot_general_cond_ag, body=windowed_dot_general_body_ag get-tuple-element.13 = bf16[2,2048,24576]{2,1,0} get-tuple-element(while), index=2 copy.1 = bf16[2,2048,24576]{2,1,0} copy(get-tuple-element.13) all-gather = bf16[2,2048,24576]{2,1,0} all-gather(param.4), channel_id=1, replica_groups={{0,1,2,3}}, dimensions={1}, use_global_device_ids=true param.6 = bf16[24576,24576]{1,0} parameter(2), sharding={devices=[1,4]<=[4]} ROOT dot.7 = bf16[2,2048,24576]{2,1,0} dot(all-gather, param.6), lhs_contracting_dims={2}, rhs_contracting_dims={0} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloString)); WindowedEinsumHandler gpu_handler; bool changed; TF_ASSERT_OK_AND_ASSIGN(changed, gpu_handler.Run(module.get())); EXPECT_TRUE(changed); HloInstruction* inst = FindInstructionByName(module->entry_computation(), "dot.7"); EXPECT_EQ(inst->operand(0)->opcode(), HloOpcode::kGetTupleElement); EXPECT_EQ(inst->operand(0)->tuple_index(), 5); const HloInstruction* while_loop = inst->operand(0)->operand(0); EXPECT_EQ(while_loop->opcode(), HloOpcode::kWhile); HloComputation* while_body = while_loop->while_body(); int64_t dot_count = 0; for (HloInstruction* ins : while_body->MakeInstructionPostOrder()) { if (ins->opcode() == HloOpcode::kDot) { dot_count++; EXPECT_GT(ins->backend_config<GpuBackendConfig>()->operation_queue_id(), 0); } } EXPECT_EQ(dot_count, 4); HloInstruction* ag_loop = FindInstructionByName(module->entry_computation(), "while"); HloInstruction* ag_while_root = ag_loop->while_body()->root_instruction(); EXPECT_THAT( ag_while_root, GmockMatch(m::Tuple( m::Op(), m::Op(), m::Op(), m::Op(), m::Op(), m::DynamicUpdateSlice( m::DynamicUpdateSlice( m::GetTupleElement( m::Tuple(m::Op(), m::Op(), m::Op(), m::Op(), m::Op(), m::DynamicUpdateSlice( m::DynamicUpdateSlice( m::GetTupleElement(m::Parameter()) .WithPredicate( [](const HloInstruction* instr) { return instr->tuple_index() == 5; }), m::Op(), m::Op(), m::Op(), m::Op()), m::Op(), m::Op(), m::Op(), m::Op()))) .WithPredicate([](const HloInstruction* instr) { return instr->tuple_index() == 5; }), m::Op(), m::Op(), m::Op(), m::Op()), m::Op(), m::Op(), m::Op(), m::Op())))); EXPECT_EQ(FindInstructionByName(module->entry_computation(), "all-gather"), nullptr); } TEST_F(WindowedEinsumHandlerTest, A2aGemmHaveStreamIds) { constexpr absl::string_view kHloString = R"( HloModule pjit__unnamed_wrapped_function_, entry_computation_layout={(bf16[1,8192,32768]{2,1,0}, bf16[1,4,2048,8192]{3,2,1,0})->bf16[1,4,2048,32768]{3,2,1,0}}, num_partitions=8 ENTRY main.9_spmd { param0 = bf16[1,8192,32768]{2,1,0} parameter(0) param1 = bf16[1,4,2048,8192]{3,2,1,0} parameter(1) all-to-all = bf16[1,4,2048,8192]{3,2,1,0} all-to-all(param1), channel_id=4, replica_groups={{0,1,2,3},{4,5,6,7}}, dimensions={1} ROOT dot.12 = bf16[1,4,2048,32768]{3,2,1,0} dot(all-to-all, param0), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1} } )"; const char* kExpected = R"( CHECK: ENTRY CHECK-DAG: %[[P1:.*]] = bf16[1,4,2048,8192]{3,2,1,0} parameter(1) CHECK-DAG: %[[SLICE0:.*]] = bf16[1,4,2048,2048]{3,2,1,0} slice(bf16[1,4,2048,8192]{3,2,1,0} %[[P1]]), slice={[0:1], [0:4], [0:2048], [6144:8192]} CHECK: %[[A2A0:.*]] = bf16[1,4,2048,2048]{3,2,1,0} all-to-all(bf16[1,4,2048,2048]{3,2,1,0} %[[SLICE0]]), CHECK: replica_groups={ CHECK: {0,1,2,3},{4,5,6,7} CHECK: } CHECK: dimensions={1} CHECK-DAG: %[[P0:.*]] = bf16[1,8192,32768]{2,1,0} parameter(0) CHECK-DAG: %[[SLICE4:.*]] = bf16[1,2048,32768]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [6144:8192], [0:32768]} CHECK-DAG: %[[DOT0:.*]] = bf16[1,4,2048,32768]{3,2,1,0} dot(bf16[1,4,2048,2048]{3,2,1,0} %[[A2A0:.*]], bf16[1,2048,32768]{2,1,0} %[[SLICE4:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}, backend_config={"operation_queue_id":"8","wait_on_operation_queues":[],"force_earliest_schedule":false} CHECK-DAG: %[[SLICE1:.*]] = bf16[1,4,2048,2048]{3,2,1,0} slice(bf16[1,4,2048,8192]{3,2,1,0} %[[P1]]), slice={[0:1], [0:4], [0:2048], [4096:6144]} CHECK: %[[A2A1:.*]] = bf16[1,4,2048,2048]{3,2,1,0} all-to-all(bf16[1,4,2048,2048]{3,2,1,0} %[[SLICE1]]), CHECK: replica_groups={ CHECK: {0,1,2,3},{4,5,6,7} CHECK: } CHECK: dimensions={1} CHECK-DAG: %[[SLICE5:.*]] = bf16[1,2048,32768]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [4096:6144], [0:32768]} CHECK-DAG: %[[DOT1:.*]] = bf16[1,4,2048,32768]{3,2,1,0} dot(bf16[1,4,2048,2048]{3,2,1,0} %[[A2A1:.*]], bf16[1,2048,32768]{2,1,0} %[[SLICE5:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}, backend_config={"operation_queue_id":"7","wait_on_operation_queues":[],"force_earliest_schedule":false} CHECK-DAG: %[[SLICE2:.*]] = bf16[1,4,2048,2048]{3,2,1,0} slice(bf16[1,4,2048,8192]{3,2,1,0} %[[P1]]), slice={[0:1], [0:4], [0:2048], [2048:4096]} CHECK: %[[A2A2:.*]] = bf16[1,4,2048,2048]{3,2,1,0} all-to-all(bf16[1,4,2048,2048]{3,2,1,0} %[[SLICE2]]), CHECK: replica_groups={ CHECK: {0,1,2,3},{4,5,6,7} CHECK: } CHECK: dimensions={1} CHECK-DAG: %[[SLICE6:.*]] = bf16[1,2048,32768]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [2048:4096], [0:32768]} CHECK-DAG: %[[DOT2:.*]] = bf16[1,4,2048,32768]{3,2,1,0} dot(bf16[1,4,2048,2048]{3,2,1,0} %[[A2A2:.*]], bf16[1,2048,32768]{2,1,0} %[[SLICE6:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}, backend_config={"operation_queue_id":"6","wait_on_operation_queues":[],"force_earliest_schedule":false} CHECK-DAG: %[[SLICE3:.*]] = bf16[1,4,2048,2048]{3,2,1,0} slice(bf16[1,4,2048,8192]{3,2,1,0} %[[P1]]), slice={[0:1], [0:4], [0:2048], [0:2048]} CHECK: %[[A2A2:.*]] = bf16[1,4,2048,2048]{3,2,1,0} all-to-all(bf16[1,4,2048,2048]{3,2,1,0} %[[SLICE3]]), CHECK: replica_groups={ CHECK: {0,1,2,3},{4,5,6,7} CHECK: } CHECK: dimensions={1} CHECK-DAG: %[[SLICE7:.*]] = bf16[1,2048,32768]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [0:2048], [0:32768]} CHECK-DAG: %[[DOT3:.*]] = bf16[1,4,2048,32768]{3,2,1,0} dot(bf16[1,4,2048,2048]{3,2,1,0} %[[A2A3:.*]], bf16[1,2048,32768]{2,1,0} %[[SLICE7:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}, backend_config={"operation_queue_id":"5","wait_on_operation_queues":[],"force_earliest_schedule":false} CHECK-DAG: %[[CONSTANT:.*]] = bf16[] constant(0) CHECK-DAG: %[[BROADCAST:.*]] = bf16[1,4,2048,32768]{3,2,1,0} broadcast(bf16[] %[[CONSTANT:.*]]), dimensions={} CHECK-DAG: %[[ADD0:.*]] = bf16[1,4,2048,32768]{3,2,1,0} add(bf16[1,4,2048,32768]{3,2,1,0} %[[DOT0:.*]], bf16[1,4,2048,32768]{3,2,1,0} %[[BROADCAST:.*]]), backend_config={"operation_queue_id":"0","wait_on_operation_queues":["5"],"force_earliest_schedule":false} CHECK-DAG: %[[ADD1:.*]] = bf16[1,4,2048,32768]{3,2,1,0} add(bf16[1,4,2048,32768]{3,2,1,0} %[[DOT1:.*]], bf16[1,4,2048,32768]{3,2,1,0} %[[ADD0:.*]]), backend_config={"operation_queue_id":"0","wait_on_operation_queues":["6"],"force_earliest_schedule":false} CHECK-DAG: %[[ADD2:.*]] = bf16[1,4,2048,32768]{3,2,1,0} add(bf16[1,4,2048,32768]{3,2,1,0} %[[DOT2:.*]], bf16[1,4,2048,32768]{3,2,1,0} %[[ADD1:.*]]), backend_config={"operation_queue_id":"0","wait_on_operation_queues":["7"],"force_earliest_schedule":false} CHECK: ROOT {{.*}} = bf16[1,4,2048,32768]{3,2,1,0} add(bf16[1,4,2048,32768]{3,2,1,0} %[[DOT3:.*]], bf16[1,4,2048,32768]{3,2,1,0} %[[ADD2:.*]]) )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloString)); WindowedEinsumHandler gpu_handler; bool changed; TF_ASSERT_OK_AND_ASSIGN(changed, gpu_handler.Run(module.get())); TF_ASSERT_OK_AND_ASSIGN(bool filecheck_matched, RunFileCheck(module->ToString(), kExpected)); EXPECT_TRUE(filecheck_matched); } TEST_F(WindowedEinsumHandlerTest, GemmA2aHaveStreamIds) { constexpr absl::string_view kHloString = R"( HloModule pjit__unnamed_wrapped_function_, entry_computation_layout={(bf16[1,8192,32768]{2,1,0}, bf16[1,4,2048,32768]{3,2,1,0})->bf16[1,4,2048,8192]{3,2,1,0}}, num_partitions=4 ENTRY main.9_spmd { param.9 = bf16[1,8192,32768]{2,1,0} parameter(0) param.10 = bf16[1,4,2048,32768]{3,2,1,0} parameter(1) dot.12 = bf16[1,4,2048,8192]{3,2,1,0} dot(param.10, param.9), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={2} ROOT all-to-all = bf16[1,4,2048,8192]{3,2,1,0} all-to-all(dot.12), channel_id=4, replica_groups={{0,1,2,3}}, dimensions={1} } )"; const char* kExpected = R"( CHECK: ENTRY CHECK-DAG: %[[P1:.*]] = bf16[1,4,2048,32768]{3,2,1,0} parameter(1) CHECK-DAG: %[[SLICE0:.*]] = bf16[1,4,2048,8192]{3,2,1,0} slice(bf16[1,4,2048,32768]{3,2,1,0} %[[P1]]), slice={[0:1], [0:4], [0:2048], [24576:32768]} CHECK-DAG: %[[P0:.*]] = bf16[1,8192,32768]{2,1,0} parameter(0) CHECK-DAG: %[[SLICE4:.*]] = bf16[1,8192,8192]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [0:8192], [24576:32768]} CHECK-DAG: %[[DOT0:.*]] = bf16[1,4,2048,8192]{3,2,1,0} dot(bf16[1,4,2048,8192]{3,2,1,0} %[[SLICE0:.*]], bf16[1,8192,8192]{2,1,0} %[[SLICE4:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={2}, backend_config={"operation_queue_id":"8","wait_on_operation_queues":[],"force_earliest_schedule":false} CHECK: %[[A2A0:.*]] = bf16[1,4,2048,8192]{3,2,1,0} all-to-all(bf16[1,4,2048,8192]{3,2,1,0} %[[DOT0:.*]]), CHECK: replica_groups={ CHECK: {0,1,2,3} CHECK: } CHECK: dimensions={1} CHECK-DAG: %[[SLICE1:.*]] = bf16[1,4,2048,8192]{3,2,1,0} slice(bf16[1,4,2048,32768]{3,2,1,0} %[[P1]]), slice={[0:1], [0:4], [0:2048], [16384:24576]} CHECK-DAG: %[[SLICE5:.*]] = bf16[1,8192,8192]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [0:8192], [16384:24576]} CHECK-DAG: %[[DOT1:.*]] = bf16[1,4,2048,8192]{3,2,1,0} dot(bf16[1,4,2048,8192]{3,2,1,0} %[[SLICE1:.*]], bf16[1,8192,8192]{2,1,0} %[[SLICE5:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={2}, backend_config={"operation_queue_id":"7","wait_on_operation_queues":[],"force_earliest_schedule":false} CHECK: %[[A2A1:.*]] = bf16[1,4,2048,8192]{3,2,1,0} all-to-all(bf16[1,4,2048,8192]{3,2,1,0} %[[DOT1:.*]]), CHECK: replica_groups={ CHECK: {0,1,2,3} CHECK: } CHECK: dimensions={1} CHECK-DAG: %[[SLICE2:.*]] = bf16[1,4,2048,8192]{3,2,1,0} slice(bf16[1,4,2048,32768]{3,2,1,0} %[[P1]]), slice={[0:1], [0:4], [0:2048], [8192:16384]} CHECK-DAG: %[[SLICE6:.*]] = bf16[1,8192,8192]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [0:8192], [8192:16384]} CHECK-DAG: %[[DOT2:.*]] = bf16[1,4,2048,8192]{3,2,1,0} dot(bf16[1,4,2048,8192]{3,2,1,0} %[[SLICE2:.*]], bf16[1,8192,8192]{2,1,0} %[[SLICE6:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={2}, backend_config={"operation_queue_id":"6","wait_on_operation_queues":[],"force_earliest_schedule":false} CHECK: %[[A2A2:.*]] = bf16[1,4,2048,8192]{3,2,1,0} all-to-all(bf16[1,4,2048,8192]{3,2,1,0} %[[DOT2:.*]]), CHECK: replica_groups={ CHECK: {0,1,2,3} CHECK: } CHECK: dimensions={1} CHECK-DAG: %[[SLICE3:.*]] = bf16[1,4,2048,8192]{3,2,1,0} slice(bf16[1,4,2048,32768]{3,2,1,0} %[[P1]]), slice={[0:1], [0:4], [0:2048], [0:8192]} CHECK-DAG: %[[SLICE7:.*]] = bf16[1,8192,8192]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [0:8192], [0:8192]} CHECK-DAG: %[[DOT3:.*]] = bf16[1,4,2048,8192]{3,2,1,0} dot(bf16[1,4,2048,8192]{3,2,1,0} %[[SLICE3:.*]], bf16[1,8192,8192]{2,1,0} %[[SLICE7:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={2}, backend_config={"operation_queue_id":"5","wait_on_operation_queues":[],"force_earliest_schedule":false} CHECK: %[[A2A2:.*]] = bf16[1,4,2048,8192]{3,2,1,0} all-to-all(bf16[1,4,2048,8192]{3,2,1,0} %[[DOT3:.*]]), CHECK: replica_groups={ CHECK: {0,1,2,3} CHECK: } CHECK: dimensions={1} CHECK-DAG: %[[CONSTANT:.*]] = bf16[] constant(0) CHECK-DAG: %[[BROADCAST:.*]] = bf16[1,4,2048,8192]{3,2,1,0} broadcast(bf16[] %[[CONSTANT:.*]]), dimensions={} CHECK-DAG: %[[ADD0:.*]] = bf16[1,4,2048,8192]{3,2,1,0} add(bf16[1,4,2048,8192]{3,2,1,0} %[[A2A0:.*]], bf16[1,4,2048,8192]{3,2,1,0} %[[BROADCAST:.*]]) CHECK-DAG: %[[ADD1:.*]] = bf16[1,4,2048,8192]{3,2,1,0} add(bf16[1,4,2048,8192]{3,2,1,0} %[[A2A1:.*]], bf16[1,4,2048,8192]{3,2,1,0} %[[ADD0:.*]]) CHECK-DAG: %[[ADD2:.*]] = bf16[1,4,2048,8192]{3,2,1,0} add(bf16[1,4,2048,8192]{3,2,1,0} %[[A2A2:.*]], bf16[1,4,2048,8192]{3,2,1,0} %[[ADD1:.*]]) CHECK: ROOT {{.*}} = bf16[1,4,2048,8192]{3,2,1,0} add(bf16[1,4,2048,8192]{3,2,1,0} %[[A2A3:.*]], bf16[1,4,2048,8192]{3,2,1,0} %[[ADD2:.*]]) )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloString)); WindowedEinsumHandler gpu_handler; bool changed; TF_ASSERT_OK_AND_ASSIGN(changed, gpu_handler.Run(module.get())); TF_ASSERT_OK_AND_ASSIGN(bool filecheck_matched, RunFileCheck(module->ToString(), kExpected)); EXPECT_TRUE(filecheck_matched); } TEST_F(WindowedEinsumHandlerTest, A2aTransposeLoopsHaveStreamIds) { constexpr absl::string_view kHloString = R"( HloModule pjit__unnamed_wrapped_function_, entry_computation_layout={(bf16[1,8192,32768]{2,1,0}, bf16[1,1,8192,4,1,2048]{5,4,3,2,1,0})->bf16[1,4,2048,32768]{3,2,1,0}}, num_partitions=4 ENTRY main.9_spmd { param.9 = bf16[1,8192,32768]{2,1,0} parameter(0) param.10 = bf16[1,1,8192,4,1,2048]{5,4,3,2,1,0} parameter(1) all-to-all = bf16[1,1,8192,4,1,2048]{5,4,3,2,1,0} all-to-all(param.10), channel_id=4, replica_groups={{0,1,2,3}}, dimensions={3} transpose.15 = bf16[1,4,1,8192,1,2048]{5,4,1,3,2,0} transpose(all-to-all), dimensions={0,3,1,2,4,5} reshape.2170 = bf16[1,4,8192,1,2048]{4,3,2,1,0} reshape(transpose.15) reshape.2173 = bf16[4,8192,1,2048]{3,2,1,0} reshape(reshape.2170) transpose.16 = bf16[1,4,2048,8192]{2,0,3,1} transpose(reshape.2173), dimensions={2,0,3,1} copy.53 = bf16[1,4,2048,8192]{3,2,1,0} copy(transpose.16) ROOT dot.12 = bf16[1,4,2048,32768]{3,2,1,0} dot(copy.53, param.9), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1} } )"; const char* kExpected = R"( CHECK: ENTRY CHECK-DAG: %[[P1:.*]] = bf16[1,1,8192,4,1,2048]{5,4,3,2,1,0} parameter(1) CHECK-DAG: %[[TRANSPOSE0:.*]] = bf16[1,4,1,8192,1,2048]{5,4,1,3,2,0} transpose(bf16[1,1,8192,4,1,2048]{5,4,3,2,1,0} %[[P1:.*]]), dimensions={0,3,1,2,4,5} CHECK-DAG: %[[RESHAPE0:.*]] = bf16[1,4,8192,1,2048]{4,3,2,1,0} reshape(bf16[1,4,1,8192,1,2048]{5,4,1,3,2,0} %[[TRANSPOSE0:.*]]) CHECK-DAG: %[[RESHAPE1:.*]] = bf16[4,8192,1,2048]{3,2,1,0} reshape(bf16[1,4,8192,1,2048]{4,3,2,1,0} %[[RESHAPE0:.*]]) CHECK-DAG: %[[TRANSPOSE1:.*]] = bf16[1,4,2048,8192]{2,0,3,1} transpose(bf16[4,8192,1,2048]{3,2,1,0} %[[RESHAPE1:.*]]), dimensions={2,0,3,1} CHECK-DAG: %[[COPY:.*]] = bf16[1,4,2048,8192]{3,2,1,0} copy(bf16[1,4,2048,8192]{2,0,3,1} %[[TRANSPOSE1:.*]]) CHECK-DAG: %[[SLICE0:.*]] = bf16[1,4,2048,2048]{3,2,1,0} slice(bf16[1,4,2048,8192]{3,2,1,0} %[[COPY:.*]]), slice={[0:1], [0:4], [0:2048], [6144:8192]} CHECK: %[[A2A0:.*]] = bf16[1,4,2048,2048]{3,2,1,0} all-to-all(bf16[1,4,2048,2048]{3,2,1,0} %[[SLICE0]]), CHECK: replica_groups={ CHECK: {0,1,2,3} CHECK: } CHECK: dimensions={1} CHECK-DAG: %[[P0:.*]] = bf16[1,8192,32768]{2,1,0} parameter(0) CHECK-DAG: %[[SLICE4:.*]] = bf16[1,2048,32768]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [6144:8192], [0:32768]} CHECK-DAG: %[[DOT0:.*]] = bf16[1,4,2048,32768]{3,2,1,0} dot(bf16[1,4,2048,2048]{3,2,1,0} %[[A2A0:.*]], bf16[1,2048,32768]{2,1,0} %[[SLICE4:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}, backend_config={"operation_queue_id":"9","wait_on_operation_queues":[],"force_earliest_schedule":false} CHECK-DAG: %[[SLICE1:.*]] = bf16[1,4,2048,2048]{3,2,1,0} slice(bf16[1,4,2048,8192]{3,2,1,0} %[[COPY:.*]]), slice={[0:1], [0:4], [0:2048], [4096:6144]} CHECK: %[[A2A1:.*]] = bf16[1,4,2048,2048]{3,2,1,0} all-to-all(bf16[1,4,2048,2048]{3,2,1,0} %[[SLICE1]]), CHECK: replica_groups={ CHECK: {0,1,2,3} CHECK: } CHECK: dimensions={1} CHECK-DAG: %[[SLICE5:.*]] = bf16[1,2048,32768]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [4096:6144], [0:32768]} CHECK-DAG: %[[DOT1:.*]] = bf16[1,4,2048,32768]{3,2,1,0} dot(bf16[1,4,2048,2048]{3,2,1,0} %[[A2A1:.*]], bf16[1,2048,32768]{2,1,0} %[[SLICE5:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}, backend_config={"operation_queue_id":"8","wait_on_operation_queues":[],"force_earliest_schedule":false} CHECK-DAG: %[[SLICE2:.*]] = bf16[1,4,2048,2048]{3,2,1,0} slice(bf16[1,4,2048,8192]{3,2,1,0} %[[COPY:.*]]), slice={[0:1], [0:4], [0:2048], [2048:4096]} CHECK: %[[A2A2:.*]] = bf16[1,4,2048,2048]{3,2,1,0} all-to-all(bf16[1,4,2048,2048]{3,2,1,0} %[[SLICE2]]), CHECK: replica_groups={ CHECK: {0,1,2,3} CHECK: } CHECK: dimensions={1} CHECK-DAG: %[[SLICE6:.*]] = bf16[1,2048,32768]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [2048:4096], [0:32768]} CHECK-DAG: %[[DOT2:.*]] = bf16[1,4,2048,32768]{3,2,1,0} dot(bf16[1,4,2048,2048]{3,2,1,0} %[[A2A2:.*]], bf16[1,2048,32768]{2,1,0} %[[SLICE6:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}, backend_config={"operation_queue_id":"7","wait_on_operation_queues":[],"force_earliest_schedule":false} CHECK-DAG: %[[SLICE3:.*]] = bf16[1,4,2048,2048]{3,2,1,0} slice(bf16[1,4,2048,8192]{3,2,1,0} %[[COPY:.*]]), slice={[0:1], [0:4], [0:2048], [0:2048]} CHECK: %[[A2A2:.*]] = bf16[1,4,2048,2048]{3,2,1,0} all-to-all(bf16[1,4,2048,2048]{3,2,1,0} %[[SLICE3]]), CHECK: replica_groups={ CHECK: {0,1,2,3} CHECK: } CHECK: dimensions={1} CHECK-DAG: %[[SLICE7:.*]] = bf16[1,2048,32768]{2,1,0} slice(bf16[1,8192,32768]{2,1,0} %[[P0:.*]]), slice={[0:1], [0:2048], [0:32768]} CHECK-DAG: %[[DOT3:.*]] = bf16[1,4,2048,32768]{3,2,1,0} dot(bf16[1,4,2048,2048]{3,2,1,0} %[[A2A3:.*]], bf16[1,2048,32768]{2,1,0} %[[SLICE7:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}, backend_config={"operation_queue_id":"6","wait_on_operation_queues":[],"force_earliest_schedule":false} CHECK-DAG: %[[CONSTANT:.*]] = bf16[] constant(0) CHECK-DAG: %[[BROADCAST:.*]] = bf16[1,4,2048,32768]{3,2,1,0} broadcast(bf16[] %[[CONSTANT:.*]]), dimensions={} CHECK-DAG: %[[ADD0:.*]] = bf16[1,4,2048,32768]{3,2,1,0} add(bf16[1,4,2048,32768]{3,2,1,0} %[[DOT0:.*]], bf16[1,4,2048,32768]{3,2,1,0} %[[BROADCAST:.*]]), backend_config={"operation_queue_id":"0","wait_on_operation_queues":["6"],"force_earliest_schedule":false} CHECK-DAG: %[[ADD1:.*]] = bf16[1,4,2048,32768]{3,2,1,0} add(bf16[1,4,2048,32768]{3,2,1,0} %[[DOT1:.*]], bf16[1,4,2048,32768]{3,2,1,0} %[[ADD0:.*]]), backend_config={"operation_queue_id":"0","wait_on_operation_queues":["7"],"force_earliest_schedule":false} CHECK-DAG: %[[ADD2:.*]] = bf16[1,4,2048,32768]{3,2,1,0} add(bf16[1,4,2048,32768]{3,2,1,0} %[[DOT2:.*]], bf16[1,4,2048,32768]{3,2,1,0} %[[ADD1:.*]]), backend_config={"operation_queue_id":"0","wait_on_operation_queues":["8"],"force_earliest_schedule":false} CHECK: ROOT {{.*}} = bf16[1,4,2048,32768]{3,2,1,0} add(bf16[1,4,2048,32768]{3,2,1,0} %[[DOT3:.*]], bf16[1,4,2048,32768]{3,2,1,0} %[[ADD2:.*]]) )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloString)); WindowedEinsumHandler gpu_handler; bool changed; TF_ASSERT_OK_AND_ASSIGN(changed, gpu_handler.Run(module.get())); EXPECT_TRUE(changed); TF_ASSERT_OK_AND_ASSIGN(bool filecheck_matched, RunFileCheck(module->ToString(), kExpected)); EXPECT_TRUE(filecheck_matched); } TEST_F(WindowedEinsumHandlerTest, GemmA2aTransposeLoopsHaveStreamIds) { constexpr absl::string_view kHloString = R"( HloModule pjit__unnamed_wrapped_function_, entry_computation_layout={(bf16[1,4,2048,32768]{3,2,1,0}, bf16[1,32768,8192]{2,1,0})->bf16[1,4,1,1,2048,8192]{5,4,3,2,1,0}}, num_partitions=4 ENTRY main.9_spmd { param.9 = bf16[1,4,2048,32768]{3,2,1,0} parameter(0) param.10 = bf16[1,32768,8192]{2,1,0} parameter(1) dot.13 = bf16[1,4,2048,8192]{3,2,1,0} dot(param.9, param.10), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1} copy.55 = bf16[1,4,2048,8192]{3,2,1,0} copy(dot.13) transpose.17 = bf16[4,1,2048,8192]{3,2,0,1} transpose(copy.55), dimensions={1,0,2,3} copy.56 = bf16[4,1,2048,8192]{3,2,1,0} copy(transpose.17) reshape.2216 = bf16[1,4,1,2048,8192]{4,3,2,1,0} reshape(copy.56) reshape.2219 = bf16[1,4,1,1,2048,8192]{5,4,3,2,1,0} reshape(reshape.2216) ROOT all-to-all.1 = bf16[1,4,1,1,2048,8192]{5,4,3,2,1,0} all-to-all(reshape.2219), channel_id=7, replica_groups={{0,1,2,3}}, dimensions={1} } )"; const char* kExpected = R"( CHECK: ENTRY CHECK-DAG: %[[P1:.*]] = bf16[1,4,2048,32768]{3,2,1,0} parameter(0) CHECK-DAG: %[[SLICE0:.*]] = bf16[1,4,2048,8192]{3,2,1,0} slice(bf16[1,4,2048,32768]{3,2,1,0} %[[P1]]), slice={[0:1], [0:4], [0:2048], [24576:32768]} CHECK-DAG: %[[P0:.*]] = bf16[1,32768,8192]{2,1,0} parameter(1) CHECK-DAG: %[[SLICE4:.*]] = bf16[1,8192,8192]{2,1,0} slice(bf16[1,32768,8192]{2,1,0} %[[P0:.*]]), slice={[0:1], [24576:32768], [0:8192]} CHECK-DAG: %[[DOT0:.*]] = bf16[1,4,2048,8192]{3,2,1,0} dot(bf16[1,4,2048,8192]{3,2,1,0} %[[SLICE0:.*]], bf16[1,8192,8192]{2,1,0} %[[SLICE4:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}, backend_config={"operation_queue_id":"12","wait_on_operation_queues":[],"force_earliest_schedule":false} CHECK: %[[A2A0:.*]] = bf16[1,4,2048,8192]{3,2,1,0} all-to-all(bf16[1,4,2048,8192]{3,2,1,0} %[[DOT0:.*]]), CHECK: replica_groups={ CHECK: {0,1,2,3} CHECK: } CHECK: dimensions={1} CHECK-DAG: %[[SLICE1:.*]] = bf16[1,4,2048,8192]{3,2,1,0} slice(bf16[1,4,2048,32768]{3,2,1,0} %[[P1]]), slice={[0:1], [0:4], [0:2048], [16384:24576]} CHECK-DAG: %[[SLICE5:.*]] = bf16[1,8192,8192]{2,1,0} slice(bf16[1,32768,8192]{2,1,0} %[[P0:.*]]), slice={[0:1], [16384:24576], [0:8192]} CHECK-DAG: %[[DOT1:.*]] = bf16[1,4,2048,8192]{3,2,1,0} dot(bf16[1,4,2048,8192]{3,2,1,0} %[[SLICE1:.*]], bf16[1,8192,8192]{2,1,0} %[[SLICE5:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}, backend_config={"operation_queue_id":"11","wait_on_operation_queues":[],"force_earliest_schedule":false} CHECK: %[[A2A1:.*]] = bf16[1,4,2048,8192]{3,2,1,0} all-to-all(bf16[1,4,2048,8192]{3,2,1,0} %[[DOT1:.*]]), CHECK: replica_groups={ CHECK: {0,1,2,3} CHECK: } CHECK: dimensions={1} CHECK-DAG: %[[SLICE2:.*]] = bf16[1,4,2048,8192]{3,2,1,0} slice(bf16[1,4,2048,32768]{3,2,1,0} %[[P1]]), slice={[0:1], [0:4], [0:2048], [8192:16384]} CHECK-DAG: %[[SLICE6:.*]] = bf16[1,8192,8192]{2,1,0} slice(bf16[1,32768,8192]{2,1,0} %[[P0:.*]]), slice={[0:1], [8192:16384], [0:8192]} CHECK-DAG: %[[DOT2:.*]] = bf16[1,4,2048,8192]{3,2,1,0} dot(bf16[1,4,2048,8192]{3,2,1,0} %[[SLICE2:.*]], bf16[1,8192,8192]{2,1,0} %[[SLICE6:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}, backend_config={"operation_queue_id":"10","wait_on_operation_queues":[],"force_earliest_schedule":false} CHECK: %[[A2A2:.*]] = bf16[1,4,2048,8192]{3,2,1,0} all-to-all(bf16[1,4,2048,8192]{3,2,1,0} %[[DOT2:.*]]), CHECK: replica_groups={ CHECK: {0,1,2,3} CHECK: } CHECK: dimensions={1} CHECK-DAG: %[[SLICE3:.*]] = bf16[1,4,2048,8192]{3,2,1,0} slice(bf16[1,4,2048,32768]{3,2,1,0} %[[P1]]), slice={[0:1], [0:4], [0:2048], [0:8192]} CHECK-DAG: %[[SLICE7:.*]] = bf16[1,8192,8192]{2,1,0} slice(bf16[1,32768,8192]{2,1,0} %[[P0:.*]]), slice={[0:1], [0:8192], [0:8192]} CHECK-DAG: %[[DOT3:.*]] = bf16[1,4,2048,8192]{3,2,1,0} dot(bf16[1,4,2048,8192]{3,2,1,0} %[[SLICE3:.*]], bf16[1,8192,8192]{2,1,0} %[[SLICE7:.*]]), lhs_batch_dims={0}, lhs_contracting_dims={3}, rhs_batch_dims={0}, rhs_contracting_dims={1}, backend_config={"operation_queue_id":"9","wait_on_operation_queues":[],"force_earliest_schedule":false} CHECK: %[[A2A2:.*]] = bf16[1,4,2048,8192]{3,2,1,0} all-to-all(bf16[1,4,2048,8192]{3,2,1,0} %[[DOT3:.*]]), CHECK: replica_groups={ CHECK: {0,1,2,3} CHECK: } CHECK: dimensions={1} CHECK-DAG: %[[CONSTANT:.*]] = bf16[] constant(0) CHECK-DAG: %[[BROADCAST:.*]] = bf16[1,4,2048,8192]{3,2,1,0} broadcast(bf16[] %[[CONSTANT:.*]]), dimensions={} CHECK-DAG: %[[ADD0:.*]] = bf16[1,4,2048,8192]{3,2,1,0} add(bf16[1,4,2048,8192]{3,2,1,0} %[[A2A0:.*]], bf16[1,4,2048,8192]{3,2,1,0} %[[BROADCAST:.*]]) CHECK-DAG: %[[ADD1:.*]] = bf16[1,4,2048,8192]{3,2,1,0} add(bf16[1,4,2048,8192]{3,2,1,0} %[[A2A1:.*]], bf16[1,4,2048,8192]{3,2,1,0} %[[ADD0:.*]]) CHECK-DAG: %[[ADD2:.*]] = bf16[1,4,2048,8192]{3,2,1,0} add(bf16[1,4,2048,8192]{3,2,1,0} %[[A2A2:.*]], bf16[1,4,2048,8192]{3,2,1,0} %[[ADD1:.*]]) CHECK-DAG: %[[ADD3:.*]] = bf16[1,4,2048,8192]{3,2,1,0} add(bf16[1,4,2048,8192]{3,2,1,0} %[[A2A3:.*]], bf16[1,4,2048,8192]{3,2,1,0} %[[ADD2:.*]]) CHECK-DAG: %[[COPY:.*]] = bf16[1,4,2048,8192]{3,2,1,0} copy(bf16[1,4,2048,8192]{3,2,1,0} %[[ADD3:.*]]) CHECK-DAG: %[[TRANSPOSE0:.*]] = bf16[4,1,2048,8192]{3,2,0,1} transpose(bf16[1,4,2048,8192]{3,2,1,0} %[[COPY:.*]]), dimensions={1,0,2,3} CHECK-DAG: %[[COPY1:.*]] = bf16[4,1,2048,8192]{3,2,1,0} copy(bf16[4,1,2048,8192]{3,2,0,1} %[[TRANSPOSE0:.*]]) CHECK-DAG: %[[RESHAPE0:.*]] = bf16[1,4,1,2048,8192]{4,3,2,1,0} reshape(bf16[4,1,2048,8192]{3,2,1,0} %[[COPY1:.*]]) CHECK: ROOT {{.*}} = bf16[1,4,1,1,2048,8192]{5,4,3,2,1,0} reshape(bf16[1,4,1,2048,8192]{4,3,2,1,0} %[[RESHAPE0:.*]]) )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloString)); WindowedEinsumHandler gpu_handler; bool changed; TF_ASSERT_OK_AND_ASSIGN(changed, gpu_handler.Run(module.get())); EXPECT_TRUE(changed); TF_ASSERT_OK_AND_ASSIGN(bool filecheck_matched, RunFileCheck(module->ToString(), kExpected)); EXPECT_TRUE(filecheck_matched); } TEST_F(WindowedEinsumHandlerTest, AllGatherF8) { constexpr absl::string_view kHloString = R"( HloModule pjit__unnamed_wrapped_function_, entry_computation_layout={(f8e4m3fn[2,512,24576]{2,1,0}, f8e4m3fn[1536,24576]{1,0}, f32[], f32[])->f32[2,2048,24576]{2,1,0}}, num_partitions=4 windowed_dot_general_body_ag { input = (f32[2,512,24576]{2,1,0}, f32[24576,24576]{1,0}, f32[2,2048,24576]{2,1,0}, f32[2,2048,24576]{2,1,0}, u32[]) parameter(0) lhs = f32[2,512,24576]{2,1,0} get-tuple-element(input), index=0 permuted_lhs0 = f32[2,512,24576]{2,1,0} collective-permute(lhs), channel_id=4, source_target_pairs={{0,3},{1,0},{2,1},{3,2}} permuted_lhs1 = f32[2,512,24576]{2,1,0} collective-permute(permuted_lhs0), channel_id=5, source_target_pairs={{0,3},{1,0},{2,1},{3,2}} rhs = f32[24576,24576]{1,0} get-tuple-element(input), index=1 partial_dot_output = f32[2,2048,24576]{2,1,0} get-tuple-element(input), index=2 dot0 = f32[2,512,24576]{2,1,0} dot(lhs, rhs), lhs_contracting_dims={2}, rhs_contracting_dims={0} c0 = s32[] constant(0) dot_update_slice_offsets = s32[4]{0} constant({0, 512, 1024, 1536}) loop_counter = u32[] get-tuple-element(input), index=4 partition_id = u32[] partition-id() loop_counter_plus_partition_id = u32[] add(loop_counter, partition_id) c4 = u32[] constant(4) dot_update_slice_offsets_index0 = u32[] remainder(loop_counter_plus_partition_id, c4) dot_update_slice_offset0 = s32[1]{0} dynamic-slice(dot_update_slice_offsets, dot_update_slice_offsets_index0), dynamic_slice_sizes={1} dot_update_slice_offset_scalar0 = s32[] reshape(dot_update_slice_offset0) updated_dot_output0 = f32[2,2048,24576]{2,1,0} dynamic-update-slice(partial_dot_output, dot0, c0, dot_update_slice_offset_scalar0, c0) dot1 = f32[2,512,24576]{2,1,0} dot(permuted_lhs0, rhs), lhs_contracting_dims={2}, rhs_contracting_dims={0} c1 = u32[] constant(1) loop_counter_plus_one = u32[] add(loop_counter, c1) loop_counter_plus_partiion_id_plus_one = u32[] add(loop_counter_plus_one, partition_id) dot_update_slice_offsets_index1 = u32[] remainder(loop_counter_plus_partiion_id_plus_one, c4) dot_update_slice_offset1 = s32[1]{0} dynamic-slice(dot_update_slice_offsets, dot_update_slice_offsets_index1), dynamic_slice_sizes={1} dot_update_slice_offset1_scalar = s32[] reshape(dot_update_slice_offset1) updated_dot_output1 = f32[2,2048,24576]{2,1,0} dynamic-update-slice(updated_dot_output0, dot1, c0, dot_update_slice_offset1_scalar, c0) pass_through = f32[2,2048,24576]{2,1,0} get-tuple-element(input), index=3 next_loop_counter = u32[] add(loop_counter_plus_one, c1) ROOT tuple = (f32[2,512,24576]{2,1,0}, f32[24576,24576]{1,0}, f32[2,2048,24576]{2,1,0}, f32[2,2048,24576]{2,1,0}, u32[]) tuple(permuted_lhs1, rhs, updated_dot_output1, pass_through, next_loop_counter) } windowed_dot_general_cond_ag { input = (f32[2,512,24576]{2,1,0}, f32[24576,24576]{1,0}, f32[2,2048,24576]{2,1,0}, f32[2,2048,24576]{2,1,0}, u32[]) parameter(0) loop_counter = u32[] get-tuple-element(input), index=4 loop_limit = u32[] constant(4) ROOT compare = pred[] compare(loop_counter, loop_limit), direction=LT } ENTRY main { lhs = f8e4m3fn[2,512,24576]{2,1,0} parameter(0), sharding={devices=[1,4,1]<=[4]} rhs = f8e4m3fn[1536,24576]{1,0} parameter(1), sharding={devices=[1,4]<=[4]} c0_f32 = f32[] constant(0) c0_f32_bcast = f32[2,2048,24576]{2,1,0} broadcast(c0_f32), dimensions={} c0_u32 = u32[] constant(0) scale_lhs = f32[] parameter(2) scale_lhs_bcast = f32[2,512,24576]{2,1,0} broadcast(scale_lhs), dimensions={} lhs_f32 = f32[2,512,24576]{2,1,0} convert(lhs) lhs_scaled = f32[2,512,24576]{2,1,0} multiply(lhs_f32, scale_lhs_bcast) scale_rhs = f32[] parameter(3) scale_rhs_bcast = f32[1536,24576]{1,0} broadcast(scale_rhs), dimensions={} rhs_f32 = f32[1536,24576]{1,0} convert(rhs) rhs_scaled = f32[1536,24576]{1,0} multiply(rhs_f32, scale_rhs_bcast) rhs_bcast = f32[16,1536,24576]{2,1,0} broadcast(rhs_scaled), dimensions={1,2} rhs_reshaped = f32[24576,24576]{1,0} reshape(rhs_bcast) while_input = (f32[2,512,24576]{2,1,0}, f32[24576,24576]{1,0}, f32[2,2048,24576]{2,1,0}, f32[2,2048,24576]{2,1,0}, u32[]) tuple(lhs_scaled, rhs_reshaped, c0_f32_bcast, c0_f32_bcast, c0_u32) while = (f32[2,512,24576]{2,1,0}, f32[24576,24576]{1,0}, f32[2,2048,24576]{2,1,0}, f32[2,2048,24576]{2,1,0}, u32[]) while(while_input), condition=windowed_dot_general_cond_ag, body=windowed_dot_general_body_ag ROOT get-tuple-element.13 = f32[2,2048,24576]{2,1,0} get-tuple-element(while), index=2 } )"; RunAndFilecheckHloRewrite(kHloString, WindowedEinsumHandler(), R"( ; CHECK-LABEL: %unrolled_windowed_dot_general_body_ag ; CHECK-NEXT: [[INPUT:%[^ ]+]] = (f8e4m3fn[2,512,24576]{2,1,0}, f8e4m3fn[24576,24576]{1,0}, f32[2,2048,24576]{2,1,0}, f32[2,2048,24576]{2,1,0}, u32[], f32[], f32[]) parameter(0) ; CHECK-NEXT: [[LHS:%[^ ]+]] = f8e4m3fn[2,512,24576]{2,1,0} get-tuple-element([[INPUT]]), index=0 ; CHECK-NEXT: [[PERMUTED_LHS0:%[^ ]+]] = f8e4m3fn[2,512,24576]{2,1,0} collective-permute([[LHS]]), channel_id=6 ; CHECK-NEXT: [[PERMUTED_LHS1:%[^ ]+]] = f8e4m3fn[2,512,24576]{2,1,0} collective-permute([[PERMUTED_LHS0]]), channel_id=7 ; CHECK-NEXT: [[RHS:%[^ ]+]] = f8e4m3fn[24576,24576]{1,0} get-tuple-element([[INPUT]]), index=1 ; CHECK-NEXT: [[PARTIAL_DOT_OUTPUT:%[^ ]+]] = f32[2,2048,24576]{2,1,0} get-tuple-element([[INPUT]]), index=2 ; CHECK-NEXT: [[LHS_F32:%[^ ]+]] = f32[2,512,24576]{2,1,0} convert([[LHS]]) ; CHECK-NEXT: [[SCALE_LHS:%[^ ]+]] = f32[] get-tuple-element([[INPUT]]), index=5 ; CHECK-NEXT: [[SCALE_LHS_BCAST:%[^ ]+]] = f32[2,512,24576]{2,1,0} broadcast([[SCALE_LHS]]), dimensions={} ; CHECK-NEXT: [[LHS_SCALED:%[^ ]+]] = f32[2,512,24576]{2,1,0} multiply([[LHS_F32]], [[SCALE_LHS_BCAST]]) ; CHECK-NEXT: [[RHS_F32:%[^ ]+]] = f32[24576,24576]{1,0} convert([[RHS]]) ; CHECK-NEXT: [[SCALE_RHS:%[^ ]+]] = f32[] get-tuple-element([[INPUT]]), index=6 ; CHECK-NEXT: [[SCALE_RHS_BCAST:%[^ ]+]] = f32[24576,24576]{1,0} broadcast([[SCALE_RHS]]), dimensions={} ; CHECK-NEXT: [[RHS_SCALED:%[^ ]+]] = f32[24576,24576]{1,0} multiply([[RHS_F32]], [[SCALE_RHS_BCAST]]) ; CHECK-NEXT: [[DOT0:%[^ ]+]] = f32[2,512,24576]{2,1,0} dot([[LHS_SCALED]], [[RHS_SCALED]]), ; CHECK-DAG: lhs_contracting_dims={2}, ; CHECK-DAG: rhs_contracting_dims={0}, ; CHECK-DAG: backend_config={ ; CHECK-DAG: "operation_queue_id":"[[OPQUEUEID:[0-9]+]]", ; CHECK-DAG: "wait_on_operation_queues":[], ; CHECK-DAG: "force_earliest_schedule":false} ; CHECK-NEXT: [[C0_S32:%[^ ]+]] = s32[] constant(0) ; CHECK-NEXT: [[C0_U32:%[^ ]+]] = u32[] constant(0) ; CHECK-NEXT: [[C5:%[^ ]+]] = u32[] constant(0) ; CHECK-NEXT: [[PARTITION_ID:%[^ ]+]] = u32[] partition-id() ; CHECK-NEXT: [[ADD0:%[^ ]+]] = u32[] add([[C5]], [[PARTITION_ID]]) ; CHECK-NEXT: [[C3:%[^ ]+]] = u32[] constant(3) ; CHECK-NEXT: [[AND0:%[^ ]+]] = u32[] and([[ADD0]], [[C3]]) ; CHECK-NEXT: [[CLAMP0:%[^ ]+]] = u32[] clamp([[C0_U32]], [[AND0]], [[C3]]) ; CHECK-NEXT: [[CONVERT3:%[^ ]+]] = s32[] convert([[CLAMP0]]) ; CHECK-NEXT: [[C512:%[^ ]+]] = s32[] constant(512) ; CHECK-NEXT: [[MUL3:%[^ ]+]] = s32[] multiply([[CONVERT3]], [[C512]]) ; CHECK-NEXT: [[RESHAPE0:%[^ ]+]] = s32[] reshape([[MUL3]]) ; CHECK-NEXT: [[UPDATED_DOT_OUTPUT0:%[^ ]+]] = f32[2,2048,24576]{2,1,0} dynamic-update-slice([[PARTIAL_DOT_OUTPUT]], [[DOT0]], [[C0_S32]], [[RESHAPE0]], [[C0_S32]]), ; CHECK-DAG: backend_config={ ; CHECK-DAG: "operation_queue_id":"0", ; CHECK-DAG: "wait_on_operation_queues":["[[OPQUEUEID]]"], ; CHECK-DAG: "force_earliest_schedule":false} ; CHECK-NEXT: [[PERMUTED_LHS0_F32:%[^ ]+]] = f32[2,512,24576]{2,1,0} convert([[PERMUTED_LHS0]]) ; CHECK-NEXT: [[PERMUTED_LHS_SCALED:%[^ ]+]] = f32[2,512,24576]{2,1,0} multiply([[PERMUTED_LHS0_F32]], [[SCALE_LHS_BCAST]]) ; CHECK-NEXT: [[DOT1:%[^ ]+]] = f32[2,512,24576]{2,1,0} dot([[PERMUTED_LHS_SCALED]], [[RHS_SCALED]]), ; CHECK-DAG: lhs_contracting_dims={2}, ; CHECK-DAG: rhs_contracting_dims={0} ; CHECK-NEXT: [[LOOP_COUNTER:%[^ ]+]] = u32[] get-tuple-element([[INPUT]]), index=4 ; CHECK-NEXT: [[C1:%[^ ]+]] = u32[] constant(1) ; CHECK-NEXT: [[LOOP_COUNTER_PLUS_ONE:%[^ ]+]] = u32[] add([[LOOP_COUNTER]], [[C1]]) ; CHECK-NEXT: [[LOOP_COUNTER_PLUS_ONE_PLUS_PARTITION_ID:%[^ ]+]] = u32[] add([[LOOP_COUNTER_PLUS_ONE]], [[PARTITION_ID]]) ; CHECK-NEXT: [[AND1:%[^ ]+]] = u32[] and([[LOOP_COUNTER_PLUS_ONE_PLUS_PARTITION_ID]], [[C3]]) ; CHECK-NEXT: [[CLAMP1:%[^ ]+]] = u32[] clamp([[C0_U32]], [[AND1]], [[C3]]) ; CHECK-NEXT: [[CONVERT4:%[^ ]+]] = s32[] convert([[CLAMP1]]) ; CHECK-NEXT: [[MUL4:%[^ ]+]] = s32[] multiply([[CONVERT4]], [[C512]]) ; CHECK-NEXT: [[RESHAPE1:%[^ ]+]] = s32[] reshape([[MUL4]]) ; CHECK-NEXT: [[UPDATED_DOT_OUTPUT1:%[^ ]+]] = f32[2,2048,24576]{2,1,0} dynamic-update-slice([[UPDATED_DOT_OUTPUT0]], [[DOT1]], [[C0_S32]], [[RESHAPE1]], [[C0_S32]]) ; CHECK-NEXT: [[PASS_THROUGH:%[^ ]+]] = f32[2,2048,24576]{2,1,0} get-tuple-element([[INPUT]]), index=3 ; CHECK-NEXT: [[C2:%[^ ]+]] = u32[] constant(2) ; CHECK-NEXT: [[NEXT_LOOP_COUNTER:%[^ ]+]] = u32[] add([[LOOP_COUNTER]], [[C2]]) ; CHECK-NEXT: [[TUPLE:%[^ ]+]] = (f8e4m3fn[2,512,24576]{2,1,0}, f8e4m3fn[24576,24576]{1,0}, f32[2,2048,24576]{2,1,0}, f32[2,2048,24576]{2,1,0}, u32[], f32[], f32[]) tuple([[PERMUTED_LHS1]], [[RHS]], [[UPDATED_DOT_OUTPUT1]], [[PASS_THROUGH]], [[NEXT_LOOP_COUNTER]], [[SCALE_LHS]], [[SCALE_RHS]]) ; CHECK-LABEL: ENTRY %main ; CHECK: [[LHS:%[^ ]+]] = f8e4m3fn[2,512,24576]{2,1,0} parameter(0), sharding={devices=[1,4,1]<=[4]} ; CHECK-NEXT: [[RHS:%[^ ]+]] = f8e4m3fn[1536,24576]{1,0} parameter(1), sharding={devices=[1,4]<=[4]} ; CHECK-NEXT: [[RHS_BCAST:%[^ ]+]] = f8e4m3fn[16,1536,24576]{2,1,0} broadcast([[RHS]]), dimensions={1,2} ; CHECK-NEXT: [[RHS_RESHAPED:%[^ ]+]] = f8e4m3fn[24576,24576]{1,0} reshape([[RHS_BCAST]]) ; CHECK-NEXT: [[C0:%[^ ]+]] = f32[] constant(0) ; CHECK-NEXT: [[C0_BCAST:%[^ ]+]] = f32[2,2048,24576]{2,1,0} broadcast([[C0]]), dimensions={} ; CHECK-NEXT: [[C0_U32:%[^ ]+]] = u32[] constant(0) ; CHECK-NEXT: [[SCALE_LHS:%[^ ]+]] = f32[] parameter(2) ; CHECK-NEXT: [[SCALE_RHS:%[^ ]+]] = f32[] parameter(3) ; CHECK-NEXT: [[WHILE_INPUT:%[^ ]+]] = (f8e4m3fn[2,512,24576]{2,1,0}, f8e4m3fn[24576,24576]{1,0}, f32[2,2048,24576]{2,1,0}, f32[2,2048,24576]{2,1,0}, u32[], f32[], f32[]) tuple([[LHS]], [[RHS_RESHAPED]], [[C0_BCAST]], [[C0_BCAST]], [[C0_U32]], [[SCALE_LHS]], [[SCALE_RHS]]) ; CHECK: [[WHILE:%[^ ]+]] = (f8e4m3fn[2,512,24576]{2,1,0}, f8e4m3fn[24576,24576]{1,0}, f32[2,2048,24576]{2,1,0}, f32[2,2048,24576]{2,1,0}, u32[], f32[], f32[]) while([[WHILE_INPUT]]), ; CHECK-DAG: condition=%unrolled_windowed_dot_general_cond_ag, ; CHECK-DAG: body=%unrolled_windowed_dot_general_body_ag )"); } TEST_F(WindowedEinsumHandlerTest, ReduceScatterF8) { constexpr absl::string_view kHloString = R"( HloModule pjit__unnamed_wrapped_function_, entry_computation_layout={(f8e4m3fn[24576,24576]{1,0}, f32[2,512,24576]{2,1,0}, f8e4m3fn[2,2048,24576]{2,1,0}, f32[], f32[])->f32[2,512,24576]{2,1,0}}, num_partitions=4 windowed_dot_general_body_rs { param.3 = (f32[2,2048,24576]{2,1,0}, f32[24576,24576]{1,0}, f32[2,512,24576]{2,1,0}, f32[2,512,24576]{2,1,0}, u32[]) parameter(0) get-tuple-element.lhs = f32[2,2048,24576]{2,1,0} get-tuple-element(param.3), index=0 get-tuple-element.rhs = f32[24576,24576]{1,0} get-tuple-element(param.3), index=1 get-tuple-element.output = f32[2,512,24576]{2,1,0} get-tuple-element(param.3), index=2 collective-permute.send_shard = f32[2,512,24576]{2,1,0} collective-permute(get-tuple-element.output), channel_id=9, source_target_pairs={{0,2},{1,3},{2,0},{3,1}} constant.zero = s32[] constant(0) constant.loop_index = s32[4]{0} constant({0, 512, 1024, 1536}) get-tuple-element.loop_iter = u32[] get-tuple-element(param.3), index=4 constant.iter_increment = u32[] constant(2) add.8 = u32[] add(get-tuple-element.loop_iter, constant.iter_increment) constant.27 = u32[] constant(1) add.9 = u32[] add(add.8, constant.27) partition-id.3 = u32[] partition-id() add.shard_index = u32[] add(add.9, partition-id.3) constant.22 = u32[] constant(4) remainder.shard_index = u32[] remainder(add.shard_index, constant.22) dynamic-slice.shard_start_index = s32[1]{0} dynamic-slice(constant.loop_index, remainder.shard_index), dynamic_slice_sizes={1} reshape.3 = s32[] reshape(dynamic-slice.shard_start_index) dynamic-slice.shard_to_compute = f32[2,512,24576]{2,1,0} dynamic-slice(get-tuple-element.lhs, constant.zero, reshape.3, constant.zero), dynamic_slice_sizes={2,512,24576} dot.first_shard_dot = f32[2,512,24576]{2,1,0} dot(dynamic-slice.shard_to_compute, get-tuple-element.rhs), lhs_contracting_dims={2}, rhs_contracting_dims={0} add.shard_partial_result = f32[2,512,24576]{2,1,0} add(collective-permute.send_shard, dot.first_shard_dot) get-tuple-element.10 = f32[2,512,24576]{2,1,0} get-tuple-element(param.3), index=3 add.6 = u32[] add(get-tuple-element.loop_iter, partition-id.3) remainder.2 = u32[] remainder(add.6, constant.22) dynamic-slice.2 = s32[1]{0} dynamic-slice(constant.loop_index, remainder.2), dynamic_slice_sizes={1} reshape.2 = s32[] reshape(dynamic-slice.2) dynamic-slice.3 = f32[2,512,24576]{2,1,0} dynamic-slice(get-tuple-element.lhs, constant.zero, reshape.2, constant.zero), dynamic_slice_sizes={2,512,24576} dot.second_shard_dot = f32[2,512,24576]{2,1,0} dot(dynamic-slice.3, get-tuple-element.rhs), lhs_contracting_dims={2}, rhs_contracting_dims={0} add.7 = f32[2,512,24576]{2,1,0} add(get-tuple-element.10, dot.second_shard_dot) collective-permute.send_second_shard = f32[2,512,24576]{2,1,0} collective-permute(add.7), channel_id=10, source_target_pairs={{0,2},{1,3},{2,0},{3,1}} ROOT tuple.1 = (f32[2,2048,24576]{2,1,0}, f32[24576,24576]{1,0}, f32[2,512,24576]{2,1,0}, f32[2,512,24576]{2,1,0}, u32[]) tuple(get-tuple-element.lhs, get-tuple-element.rhs, add.shard_partial_result, collective-permute.send_second_shard, add.8) } windowed_dot_general_cond_rs { param.2 = (f32[2,2048,24576]{2,1,0}, f32[24576,24576]{1,0}, f32[2,512,24576]{2,1,0}, f32[2,512,24576]{2,1,0}, u32[]) parameter(0) get-tuple-element.6 = u32[] get-tuple-element(param.2), index=4 constant.21 = u32[] constant(4) ROOT compare.1 = pred[] compare(get-tuple-element.6, constant.21), direction=LT } ENTRY main.9_spmd { param.6 = f8e4m3fn[24576,24576]{1,0} parameter(0), sharding={devices=[4,1]<=[4]} param.7 = f32[2,512,24576]{2,1,0} parameter(1) param.8 = f8e4m3fn[2,2048,24576]{2,1,0} parameter(2) constant.20 = u32[] constant(0) scale_lhs = f32[] parameter(3) scale_lhs_bcast = f32[2,2048,24576]{2,1,0} broadcast(scale_lhs), dimensions={} lhs_bf16 = f32[2,2048,24576]{2,1,0} convert(param.8) lhs_scaled = f32[2,2048,24576]{2,1,0} multiply(lhs_bf16, scale_lhs_bcast) scale_rhs = f32[] parameter(4) scale_rhs_bcast = f32[24576,24576]{1,0} broadcast(scale_rhs), dimensions={} rhs_bf16 = f32[24576,24576]{1,0} convert(param.6) rhs_scaled = f32[24576,24576]{1,0} multiply(rhs_bf16, scale_rhs_bcast) tuple.3 = (f32[2,2048,24576]{2,1,0}, f32[24576,24576]{1,0}, f32[2,512,24576]{2,1,0}, f32[2,512,24576]{2,1,0}, u32[]) tuple(lhs_scaled, rhs_scaled, param.7, param.7, constant.20) while.1 = (f32[2,2048,24576]{2,1,0}, f32[24576,24576]{1,0}, f32[2,512,24576]{2,1,0}, f32[2,512,24576]{2,1,0}, u32[]) while(tuple.3), condition=windowed_dot_general_cond_rs, body=windowed_dot_general_body_rs ROOT get-tuple-element.14 = f32[2,512,24576]{2,1,0} get-tuple-element(while.1), index=2 } )"; RunAndFilecheckHloRewrite(kHloString, WindowedEinsumHandler(), R"( ; CHECK-LABEL: unrolled_windowed_dot_general_body_rs ; CHECK-NEXT: [[P0:%[^ ]+]] = (f8e4m3fn[2,2048,24576]{2,1,0}, f8e4m3fn[24576,24576]{1,0}, f32[2,512,24576]{2,1,0}, f32[2,512,24576]{2,1,0}, u32[], f32[], f32[]) parameter(0) ; CHECK-NEXT: [[GTE0:%[^ ]+]] = f8e4m3fn[2,2048,24576]{2,1,0} get-tuple-element([[P0]]), index=0 ; CHECK-NEXT: [[GTE1:%[^ ]+]] = f8e4m3fn[24576,24576]{1,0} get-tuple-element([[P0]]), index=1 ; CHECK-NEXT: [[GTE2:%[^ ]+]] = f32[2,512,24576]{2,1,0} get-tuple-element([[P0]]), index=2 ; CHECK-NEXT: [[CP0:%[^ ]+]] = f32[2,512,24576]{2,1,0} collective-permute([[GTE2]]), channel_id=11 ; CHECK-NEXT: [[CONVERT0:%[^ ]+]] = f32[2,2048,24576]{2,1,0} convert([[GTE0]]) ; CHECK-NEXT: [[GTE3:%[^ ]+]] = f32[] get-tuple-element([[P0]]), index=5 ; CHECK-NEXT: [[BCAST0:%[^ ]+]] = f32[2,2048,24576]{2,1,0} broadcast([[GTE3]]), dimensions={} ; CHECK-NEXT: [[MUL0:%[^ ]+]] = f32[2,2048,24576]{2,1,0} multiply([[CONVERT0]], [[BCAST0]]) ; CHECK-NEXT: [[C0:%[^ ]+]] = s32[] constant(0) ; CHECK-NEXT: [[C1:%[^ ]+]] = u32[] constant(0) ; CHECK-NEXT: [[GTE4:%[^ ]+]] = u32[] get-tuple-element([[P0]]), index=4 ; CHECK-NEXT: [[C2:%[^ ]+]] = u32[] constant(3) ; CHECK-NEXT: [[ADD0:%[^ ]+]] = u32[] add([[GTE4]], [[C2]]) ; CHECK-NEXT: [[PID:%[^ ]+]] = u32[] partition-id() ; CHECK-NEXT: [[ADD2:%[^ ]+]] = u32[] add([[ADD0]], [[PID]]) ; CHECK-NEXT: [[AND0:%[^ ]+]] = u32[] and([[ADD2]], [[C2]]) ; CHECK-NEXT: [[CLAMP0:%[^ ]+]] = u32[] clamp([[C1]], [[AND0]], [[C2]]) ; CHECK-NEXT: [[CONVERT10:%[^ ]+]] = s32[] convert([[CLAMP0]]) ; CHECK-NEXT: [[C10:%[^ ]+]] = s32[] constant(512) ; CHECK-NEXT: [[MUL10:%[^ ]+]] = s32[] multiply([[CONVERT10]], [[C10]]) ; CHECK-NEXT: [[RESHAPE0:%[^ ]+]] = s32[] reshape([[MUL10]]) ; CHECK-NEXT: [[DSLICE1:%[^ ]+]] = f32[2,512,24576]{2,1,0} dynamic-slice([[MUL0]], [[C0]], [[RESHAPE0]], [[C0]]), dynamic_slice_sizes={2,512,24576} ; CHECK-NEXT: [[CONVERT1:%[^ ]+]] = f32[24576,24576]{1,0} convert([[GTE1]]) ; CHECK-NEXT: [[GTE5:%[^ ]+]] = f32[] get-tuple-element([[P0]]), index=6 ; CHECK-NEXT: [[BCAST1:%[^ ]+]] = f32[24576,24576]{1,0} broadcast([[GTE5]]), dimensions={} ; CHECK-NEXT: [[MUL1:%[^ ]+]] = f32[24576,24576]{1,0} multiply([[CONVERT1]], [[BCAST1]]) ; CHECK-NEXT: [[DOT0:%[^ ]+]] = f32[2,512,24576]{2,1,0} dot([[DSLICE1]], [[MUL1]]), ; CHECK-DAG: lhs_contracting_dims={2}, ; CHECK-DAG: rhs_contracting_dims={0}, ; CHECK-DAG: backend_config={ ; CHECK-DAG: "operation_queue_id":"[[OPQUEUEID0:[1-9][0-9]*]]", ; CHECK-DAG: "wait_on_operation_queues":[], ; CHECK-DAG: "force_earliest_schedule":false} ; CHECK-NEXT: [[ADD3:%[^ ]+]] = f32[2,512,24576]{2,1,0} add([[CP0]], [[DOT0]]), ; CHECK-DAG: backend_config={" ; CHECK-DAG: operation_queue_id":"0", ; CHECK-DAG: "wait_on_operation_queues":["[[OPQUEUEID0]]"], ; CHECK-DAG: "force_earliest_schedule":false} ; CHECK-NEXT: [[GTE6:[^ ]+]] = f32[2,512,24576]{2,1,0} get-tuple-element([[P0]]), index=3 ; CHECK-NEXT: [[C11:%[^ ]+]] = u32[] constant(0) ; CHECK-NEXT: [[ADD6:%[^ ]+]] = u32[] add([[C11]], [[PID]]) ; CHECK-NEXT: [[AND1:%[^ ]+]] = u32[] and([[ADD6]], [[C2]]) ; CHECK-NEXT: [[CLAMP1:%[^ ]+]] = u32[] clamp([[C1]], [[AND1]], [[C2]]) ; CHECK-NEXT: [[CONVERT11:%[^ ]+]] = s32[] convert([[CLAMP1]]) ; CHECK-NEXT: [[MUL11:%[^ ]+]] = s32[] multiply([[CONVERT11]], [[C10]]) ; CHECK-NEXT: [[RESHAPE2:%[^ ]+]] = s32[] reshape([[MUL11]]) ; CHECK-NEXT: [[DSLICE3:%[^ ]+]] = f32[2,512,24576]{2,1,0} dynamic-slice([[MUL0]], [[C0]], [[RESHAPE2]], [[C0]]), dynamic_slice_sizes={2,512,24576} ; CHECK-NEXT: [[DOT1:%[^ ]+]] = f32[2,512,24576]{2,1,0} dot([[DSLICE3]], [[MUL1]]), ; CHECK-DAG: lhs_contracting_dims={2}, ; CHECK-DAG: rhs_contracting_dims={0} ; CHECK-DAG: backend_config={ ; CHECK-DAG: "operation_queue_id":"[[OPQUEUEID:[0-9]+]]", ; CHECK-DAG: "wait_on_operation_queues":[], ; CHECK-DAG: "force_earliest_schedule":false} ; CHECK-NEXT: [[ADD5:%[^ ]+]] = f32[2,512,24576]{2,1,0} add([[GTE6]], [[DOT1]]) ; CHECK-NEXT: [[CP1:[^ ]+]] = f32[2,512,24576]{2,1,0} collective-permute([[ADD5]]), channel_id=12 ; CHECK-NEXT: [[C3:%[^ ]+]] = u32[] constant(2) ; CHECK-NEXT: [[ADD7:%[^ ]+]] = u32[] add([[GTE4]], [[C3]]) ; CHECK-NEXT: [[TUPLE0:[^ ]+]] = (f8e4m3fn[2,2048,24576]{2,1,0}, f8e4m3fn[24576,24576]{1,0}, f32[2,512,24576]{2,1,0}, f32[2,512,24576]{2,1,0}, u32[], f32[], f32[]) tuple([[GTE0]], [[GTE1]], [[ADD3]], [[CP1]], [[ADD7]], [[GTE3]], [[GTE5]]) ; CHECK-NEXT: [[GTE0:%[^ ]+]] = f8e4m3fn[2,2048,24576]{2,1,0} get-tuple-element([[TUPLE0]]), index=0 ; CHECK-NEXT: [[GTE1:%[^ ]+]] = f8e4m3fn[24576,24576]{1,0} get-tuple-element([[TUPLE0]]), index=1 ; CHECK-NEXT: [[GTE2:%[^ ]+]] = f32[2,512,24576]{2,1,0} get-tuple-element([[TUPLE0]]), index=2 ; CHECK-NEXT: [[CP0:%[^ ]+]] = f32[2,512,24576]{2,1,0} collective-permute([[GTE2]]), channel_id=13 ; CHECK-NEXT: [[CONVERT0:%[^ ]+]] = f32[2,2048,24576]{2,1,0} convert([[GTE0]]) ; CHECK-NEXT: [[GTE3:%[^ ]+]] = f32[] get-tuple-element([[TUPLE0]]), index=5 ; CHECK-NEXT: [[BCAST0:%[^ ]+]] = f32[2,2048,24576]{2,1,0} broadcast([[GTE3]]), dimensions={} ; CHECK-NEXT: [[MUL0:%[^ ]+]] = f32[2,2048,24576]{2,1,0} multiply([[CONVERT0]], [[BCAST0]]) ; CHECK-NEXT: [[C0:%[^ ]+]] = s32[] constant(0) ; CHECK-NEXT: [[C1:%[^ ]+]] = u32[] constant(0) ; CHECK-NEXT: [[GTE4:%[^ ]+]] = u32[] get-tuple-element([[TUPLE0]]), index=4 ; CHECK-NEXT: [[C2:%[^ ]+]] = u32[] constant(3) ; CHECK-NEXT: [[ADD0:%[^ ]+]] = u32[] add([[GTE4]], [[C2]]) ; CHECK-NEXT: [[PID:%[^ ]+]] = u32[] partition-id() ; CHECK-NEXT: [[ADD2:%[^ ]+]] = u32[] add([[ADD0]], [[PID]]) ; CHECK-NEXT: [[AND0:%[^ ]+]] = u32[] and([[ADD2]], [[C2]]) ; CHECK-NEXT: [[CLAMP0:%[^ ]+]] = u32[] clamp([[C1]], [[AND0]], [[C2]]) ; CHECK-NEXT: [[CONVERT10:%[^ ]+]] = s32[] convert([[CLAMP0]]) ; CHECK-NEXT: [[C10:%[^ ]+]] = s32[] constant(512) ; CHECK-NEXT: [[MUL10:%[^ ]+]] = s32[] multiply([[CONVERT10]], [[C10]]) ; CHECK-NEXT: [[RESHAPE0:%[^ ]+]] = s32[] reshape([[MUL10]]) ; CHECK-NEXT: [[DSLICE1:%[^ ]+]] = f32[2,512,24576]{2,1,0} dynamic-slice([[MUL0]], [[C0]], [[RESHAPE0]], [[C0]]), dynamic_slice_sizes={2,512,24576} ; CHECK-NEXT: [[CONVERT1:%[^ ]+]] = f32[24576,24576]{1,0} convert([[GTE1]]) ; CHECK-NEXT: [[GTE5:%[^ ]+]] = f32[] get-tuple-element([[TUPLE0]]), index=6 ; CHECK-NEXT: [[BCAST1:%[^ ]+]] = f32[24576,24576]{1,0} broadcast([[GTE5]]), dimensions={} ; CHECK-NEXT: [[MUL1:%[^ ]+]] = f32[24576,24576]{1,0} multiply([[CONVERT1]], [[BCAST1]]) ; CHECK-NEXT: [[DOT0:%[^ ]+]] = f32[2,512,24576]{2,1,0} dot([[DSLICE1]], [[MUL1]]), ; CHECK-DAG: lhs_contracting_dims={2}, ; CHECK-DAG: rhs_contracting_dims={0}, ; CHECK-DAG: backend_config={ ; CHECK-DAG: "operation_queue_id":"[[OPQUEUEID:[0-9]+]]", ; CHECK-DAG: "wait_on_operation_queues":[], ; CHECK-DAG: "force_earliest_schedule":false} ; CHECK-NEXT: [[ADD3:%[^ ]+]] = f32[2,512,24576]{2,1,0} add([[CP0]], [[DOT0]]), ; CHECK-DAG: backend_config={" ; CHECK-DAG: operation_queue_id":"0", ; CHECK-DAG: "wait_on_operation_queues":["[[OPQUEUEID]]"], ; CHECK-DAG: "force_earliest_schedule":false} ; CHECK-NEXT: [[GTE6:[^ ]+]] = f32[2,512,24576]{2,1,0} get-tuple-element([[TUPLE0]]), index=3 ; CHECK-NEXT: [[C11:%[^ ]+]] = u32[] constant(1) ; CHECK-NEXT: [[ADD6:%[^ ]+]] = u32[] add([[C11]], [[PID]]) ; CHECK-NEXT: [[AND1:%[^ ]+]] = u32[] and([[ADD6]], [[C2]]) ; CHECK-NEXT: [[CLAMP1:%[^ ]+]] = u32[] clamp([[C1]], [[AND1]], [[C2]]) ; CHECK-NEXT: [[CONVERT11:%[^ ]+]] = s32[] convert([[CLAMP1]]) ; CHECK-NEXT: [[MUL11:%[^ ]+]] = s32[] multiply([[CONVERT11]], [[C10]]) ; CHECK-NEXT: [[RESHAPE2:%[^ ]+]] = s32[] reshape([[MUL11]]) ; CHECK-NEXT: [[DSLICE3:%[^ ]+]] = f32[2,512,24576]{2,1,0} dynamic-slice([[MUL0]], [[C0]], [[RESHAPE2]], [[C0]]), dynamic_slice_sizes={2,512,24576} ; CHECK-NEXT: [[DOT1:%[^ ]+]] = f32[2,512,24576]{2,1,0} dot([[DSLICE3]], [[MUL1]]), ; CHECK-DAG: lhs_contracting_dims={2}, ; CHECK-DAG: rhs_contracting_dims={0} ; CHECK-DAG: backend_config={ ; CHECK-DAG: "operation_queue_id":"[[OPQUEUEID:[0-9]+]]", ; CHECK-DAG: "wait_on_operation_queues":[], ; CHECK-DAG: "force_earliest_schedule":false} ; CHECK-NEXT: [[ADD5:%[^ ]+]] = f32[2,512,24576]{2,1,0} add([[GTE6]], [[DOT1]]) ; CHECK-NEXT: [[CP1:[^ ]+]] = f32[2,512,24576]{2,1,0} collective-permute([[ADD5]]), channel_id=14 ; CHECK-NEXT: [[C3:%[^ ]+]] = u32[] constant(2) ; CHECK-NEXT: [[ADD7:%[^ ]+]] = u32[] add([[GTE4]], [[C3]]) )"); } TEST_F(WindowedEinsumHandlerTest, AllGatherMultipleConsumersF8) { constexpr absl::string_view kHloString = R"( HloModule all_gather_multiple_consumers_f8, entry_computation_layout={(f8e4m3fn[2,512,24576]{2,1,0}, f8e4m3fn[24576,24576]{1,0}, f8e4m3fn[24576,24576]{1,0}, f8e4m3fn[24576,24576]{1,0}, f32[], f32[], f32[], f32[])->f32[2,2048,24576]{2,1,0}}, num_partitions=4 windowed_dot_general_body_ag { input = (f32[2,512,24576]{2,1,0}, f32[24576,24576]{1,0}, f32[2,2048,24576]{2,1,0}, f32[2,2048,24576]{2,1,0}, u32[]) parameter(0) lhs = f32[2,512,24576]{2,1,0} get-tuple-element(input), index=0 permuted_lhs0 = f32[2,512,24576]{2,1,0} collective-permute(lhs), channel_id=2, source_target_pairs={{0,3},{1,0},{2,1},{3,2}} permuted_lhs1 = f32[2,512,24576]{2,1,0} collective-permute(permuted_lhs0), channel_id=3, source_target_pairs={{0,3},{1,0},{2,1},{3,2}} rhs = f32[24576,24576]{1,0} get-tuple-element(input), index=1 partial_dot_output = f32[2,2048,24576]{2,1,0} get-tuple-element(input), index=2 dot0 = f32[2,512,24576]{2,1,0} dot(lhs, rhs), lhs_contracting_dims={2}, rhs_contracting_dims={0} c0 = s32[] constant(0) dot_update_slice_offsets = s32[4]{0} constant({0, 512, 1024, 1536}) loop_counter = u32[] get-tuple-element(input), index=4 partition_id = u32[] partition-id() loop_counter_plus_partition_id = u32[] add(loop_counter, partition_id) c4 = u32[] constant(4) dot_update_slice_offsets_index0 = u32[] remainder(loop_counter_plus_partition_id, c4) dot_update_slice_offset0 = s32[1]{0} dynamic-slice(dot_update_slice_offsets, dot_update_slice_offsets_index0), dynamic_slice_sizes={1} dot_update_slice_offset_scalar0 = s32[] reshape(dot_update_slice_offset0) updated_dot_output0 = f32[2,2048,24576]{2,1,0} dynamic-update-slice(partial_dot_output, dot0, c0, dot_update_slice_offset_scalar0, c0) dot1 = f32[2,512,24576]{2,1,0} dot(permuted_lhs0, rhs), lhs_contracting_dims={2}, rhs_contracting_dims={0} c1 = u32[] constant(1) loop_counter_plus_one = u32[] add(loop_counter, c1) loop_counter_plus_partition_id_plus_one = u32[] add(loop_counter_plus_one, partition_id) dot_update_slice_offsets_index1 = u32[] remainder(loop_counter_plus_partition_id_plus_one, c4) dot_update_slice_offset1 = s32[1]{0} dynamic-slice(dot_update_slice_offsets, dot_update_slice_offsets_index1), dynamic_slice_sizes={1} dot_update_slice_offset1_scalar = s32[] reshape(dot_update_slice_offset1) updated_dot_output1 = f32[2,2048,24576]{2,1,0} dynamic-update-slice(updated_dot_output0, dot1, c0, dot_update_slice_offset1_scalar, c0) pass_through = f32[2,2048,24576]{2,1,0} get-tuple-element(input), index=3 next_loop_counter = u32[] add(loop_counter_plus_one, c1) ROOT tuple = (f32[2,512,24576]{2,1,0}, f32[24576,24576]{1,0}, f32[2,2048,24576]{2,1,0}, f32[2,2048,24576]{2,1,0}, u32[]) tuple(permuted_lhs1, rhs, updated_dot_output1, pass_through, next_loop_counter) } windowed_dot_general_cond_ag { input = (f32[2,512,24576]{2,1,0}, f32[24576,24576]{1,0}, f32[2,2048,24576]{2,1,0}, f32[2,2048,24576]{2,1,0}, u32[]) parameter(0) loop_counter = u32[] get-tuple-element(input), index=4 loop_limit = u32[] constant(4) ROOT compare = pred[] compare(loop_counter, loop_limit), direction=LT } ENTRY main { lhs = f8e4m3fn[2,512,24576]{2,1,0} parameter(0), sharding={devices=[1,4,1]<=[4]} rhs0 = f8e4m3fn[24576,24576]{1,0} parameter(1), sharding={devices=[1,4]<=[4]} c0_f32 = f32[] constant(0) c0_f32_bcast = f32[2,2048,24576]{2,1,0} broadcast(c0_f32), dimensions={} c0_u32 = u32[] constant(0) scale_lhs = f32[] parameter(4) scale_lhs_bcast = f32[2,512,24576]{2,1,0} broadcast(scale_lhs), dimensions={} lhs_f32 = f32[2,512,24576]{2,1,0} convert(lhs) lhs_scaled = f32[2,512,24576]{2,1,0} multiply(lhs_f32, scale_lhs_bcast) scale_rhs0 = f32[] parameter(5) scale_rhs0_bcast = f32[24576,24576]{1,0} broadcast(scale_rhs0), dimensions={} rhs0_f32 = f32[24576,24576]{1,0} convert(rhs0) rhs0_scaled = f32[24576,24576]{1,0} multiply(rhs0_f32, scale_rhs0_bcast) while_input = (f32[2,512,24576]{2,1,0}, f32[24576,24576]{1,0}, f32[2,2048,24576]{2,1,0}, f32[2,2048,24576]{2,1,0}, u32[]) tuple(lhs_scaled, rhs0_scaled, c0_f32_bcast, c0_f32_bcast, c0_u32) while = (f32[2,512,24576]{2,1,0}, f32[24576,24576]{1,0}, f32[2,2048,24576]{2,1,0}, f32[2,2048,24576]{2,1,0}, u32[]) while(while_input), condition=windowed_dot_general_cond_ag, body=windowed_dot_general_body_ag all-gather1 = f32[2,2048,24576]{2,1,0} all-gather(lhs_scaled), channel_id=1, replica_groups={{0,1,2,3}}, dimensions={1}, use_global_device_ids=true rhs1 = f8e4m3fn[24576,24576]{1,0} parameter(2), sharding={devices=[1,4]<=[4]} scale_rhs1 = f32[] parameter(6) scale_rhs1_bcast = f32[24576,24576]{1,0} broadcast(scale_rhs1), dimensions={} rhs1_f32 = f32[24576,24576]{1,0} convert(rhs1) rhs1_scaled = f32[24576,24576]{1,0} multiply(rhs1_f32, scale_rhs1_bcast) dot1 = f32[2,2048,24576]{2,1,0} dot(all-gather1, rhs1_scaled), lhs_contracting_dims={2}, rhs_contracting_dims={0} all-gather2 = f32[2,2048,24576]{2,1,0} all-gather(lhs_scaled), channel_id=1, replica_groups={{0,1,2,3}}, dimensions={1}, use_global_device_ids=true rhs2 = f8e4m3fn[24576,24576]{1,0} parameter(3), sharding={devices=[1,4]<=[4]} scale_rhs2 = f32[] parameter(7) scale_rhs2_bcast = f32[24576,24576]{1,0} broadcast(scale_rhs2), dimensions={} rhs2_f32 = f32[24576,24576]{1,0} convert(rhs2) rhs2_scaled = f32[24576,24576]{1,0} multiply(rhs2_f32, scale_rhs2_bcast) dot2 = f32[2,2048,24576]{2,1,0} dot(all-gather2, rhs2_scaled), lhs_contracting_dims={2}, rhs_contracting_dims={0} ROOT product = f32[2,2048,24576]{2,1,0} multiply(dot1, dot2) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloString)); RunAndFilecheckHloRewrite(kHloString, WindowedEinsumHandler(), R"( ; CHECK-LABEL: %main ; CHECK: [[WHILE0:%[^ ]+]] = (f8e4m3fn[2,512,24576]{2,1,0}, f8e4m3fn[24576,24576]{1,0}, f32[2,2048,24576]{2,1,0}, f32[2,2048,24576]{2,1,0}, u32[], f32[], f32[], f8e4m3fn[2,2048,24576]{2,1,0}) while([[TUPLE0:%[^ ]+]]), ; CHECK-DAG: condition=%unrolled_windowed_dot_general_cond_ag, ; CHECK-DAG: body=%unrolled_windowed_dot_general_body_ag ; CHECK: [[LHS1:%[^ ]+]] = f8e4m3fn[2,2048,24576]{2,1,0} get-tuple-element([[WHILE0]]), index=7 ; CHECK-NEXT: [[LHS1_F32:%[^ ]+]] = f32[2,2048,24576]{2,1,0} convert([[LHS1]]) ; CHECK-NEXT: [[SCALE_LHS1_BCAST:%[^ ]+]] = f32[2,2048,24576]{2,1,0} broadcast([[SCALE_LHS1:%[^ ]+]]), dimensions={} ; CHECK-NEXT: [[LHS1_SCALED:%[^ ]+]] = f32[2,2048,24576]{2,1,0} multiply([[LHS1_F32]], [[SCALE_LHS1_BCAST]]) ; CHECK-NEXT: [[RHS1:%[^ ]+]] = f8e4m3fn[24576,24576]{1,0} parameter(2), sharding={devices=[1,4]<=[4]} ; CHECK-NEXT: [[RHS1_F32:%[^ ]+]] = f32[24576,24576]{1,0} convert([[RHS1]]) ; CHECK: [[SCALE_RHS1_BCAST:%[^ ]+]] = f32[24576,24576]{1,0} broadcast([[SCALE_RHS1:%[^ ]+]]), dimensions={} ; CHECK-NEXT: [[RHS1_SCALED:%[^ ]+]] = f32[24576,24576]{1,0} multiply([[RHS1_F32]], [[SCALE_RHS1_BCAST]]) ; CHECK-NEXT: [[DOT1:%[^ ]+]] = f32[2,2048,24576]{2,1,0} dot([[LHS1_SCALED]], [[RHS1_SCALED]]), ; CHECK-DAG: lhs_contracting_dims={2}, ; CHECK-DAG: rhs_contracting_dims={0} ; CHECK: [[LHS2:%[^ ]+]] = f8e4m3fn[2,2048,24576]{2,1,0} get-tuple-element([[WHILE0]]), index=7 ; CHECK-NEXT: [[LHS2_F32:%[^ ]+]] = f32[2,2048,24576]{2,1,0} convert([[LHS2]]) ; CHECK-NEXT: [[SCALE_LHS2_BCAST:%[^ ]+]] = f32[2,2048,24576]{2,1,0} broadcast([[SCALE_LHS2:%[^ ]+]]), dimensions={} ; CHECK-NEXT: [[LHS2_SCALED:%[^ ]+]] = f32[2,2048,24576]{2,1,0} multiply([[LHS2_F32]], [[SCALE_LHS2_BCAST]]) ; CHECK-NEXT: [[RHS2:%[^ ]+]] = f8e4m3fn[24576,24576]{1,0} parameter(3), sharding={devices=[1,4]<=[4]} ; CHECK-NEXT: [[RHS2_F32:%[^ ]+]] = f32[24576,24576]{1,0} convert([[RHS2]]) ; CHECK-NEXT: [[SCALE_RHS2:%[^ ]+]] = f32[] parameter(7) ; CHECK-NEXT: [[SCALE_RHS2_BCAST:%[^ ]+]] = f32[24576,24576]{1,0} broadcast([[SCALE_RHS2]]), dimensions={} ; CHECK-NEXT: [[RHS2_SCALED:%[^ ]+]] = f32[24576,24576]{1,0} multiply([[RHS2_F32]], [[SCALE_RHS2_BCAST]]) ; CHECK-NEXT: [[DOT2:%[^ ]+]] = f32[2,2048,24576]{2,1,0} dot([[LHS2_SCALED]], [[RHS2_SCALED]]), ; CHECK-DAG: lhs_contracting_dims={2}, ; CHECK-DAG: rhs_contracting_dims={0} ; CHECK-NEXT: ROOT [[OUT:[^ ]+]] = f32[2,2048,24576]{2,1,0} multiply([[DOT1]], [[DOT2]]) )"); } TEST_F(WindowedEinsumHandlerTest, AgLoopsMultipleConsumersAreChainedWithShardedContratingDim) { constexpr absl::string_view kHloString = R"( HloModule pjit__unnamed_wrapped_function_, entry_computation_layout={(bf16[16,2048,512]{2,1,0}, bf16[4096,6288]{1,0}, bf16[16,2048,6288]{2,1,0})->bf16[4096,6288]{1,0}}, num_partitions=8 windowed_dot_general_body_ag { param.195 = (bf16[16,2048,512]{2,1,0}, bf16[4096,6288]{1,0}, bf16[16,2048,6288]{2,1,0}, bf16[16,2048,6288]{2,1,0}, u32[]) parameter(0) get-tuple-element.588 = bf16[16,2048,512]{2,1,0} get-tuple-element(param.195), index=0 collective-permute.194 = bf16[16,2048,512]{2,1,0} collective-permute(get-tuple-element.588), channel_id=446, source_target_pairs={{0,7},{1,0},{2,1},{3,2},{4,3},{5,4},{6,5},{7,6}} collective-permute.195 = bf16[16,2048,512]{2,1,0} collective-permute(collective-permute.194), channel_id=447, source_target_pairs={{0,7},{1,0},{2,1},{3,2},{4,3},{5,4},{6,5},{7,6}} get-tuple-element.589 = bf16[4096,6288]{1,0} get-tuple-element(param.195), index=1 get-tuple-element.590 = bf16[16,2048,6288]{2,1,0} get-tuple-element(param.195), index=2 constant.11432 = s32[8]{0} constant({0, 512, 1024, 1536, 2048, 2560, 3072, 3584}) get-tuple-element.592 = u32[] get-tuple-element(param.195), index=4 partition-id.194 = u32[] partition-id() add.4309 = u32[] add(get-tuple-element.592, partition-id.194) constant.11431 = u32[] constant(8) remainder.194 = u32[] remainder(add.4309, constant.11431) dynamic-slice.388 = s32[1]{0} dynamic-slice(constant.11432, remainder.194), dynamic_slice_sizes={1} reshape.12959 = s32[] reshape(dynamic-slice.388) constant.11433 = s32[] constant(0) dynamic-slice.389 = bf16[512,6288]{1,0} dynamic-slice(get-tuple-element.589, reshape.12959, constant.11433), dynamic_slice_sizes={512,6288} dot.244 = bf16[16,2048,6288]{2,1,0} dot(get-tuple-element.588, dynamic-slice.389), lhs_contracting_dims={2}, rhs_contracting_dims={0} add.4310 = bf16[16,2048,6288]{2,1,0} add(get-tuple-element.590, dot.244) constant.11434 = u32[] constant(1) add.4312 = u32[] add(get-tuple-element.592, constant.11434) add.4313 = u32[] add(add.4312, partition-id.194) remainder.195 = u32[] remainder(add.4313, constant.11431) dynamic-slice.390 = s32[1]{0} dynamic-slice(constant.11432, remainder.195), dynamic_slice_sizes={1} reshape.12960 = s32[] reshape(dynamic-slice.390) dynamic-slice.391 = bf16[512,6288]{1,0} dynamic-slice(get-tuple-element.589, reshape.12960, constant.11433), dynamic_slice_sizes={512,6288} dot.245 = bf16[16,2048,6288]{2,1,0} dot(collective-permute.194, dynamic-slice.391), lhs_contracting_dims={2}, rhs_contracting_dims={0} add.4314 = bf16[16,2048,6288]{2,1,0} add(add.4310, dot.245) get-tuple-element.591 = bf16[16,2048,6288]{2,1,0} get-tuple-element(param.195), index=3 add.4315 = u32[] add(add.4312, constant.11434) ROOT tuple.98 = (bf16[16,2048,512]{2,1,0}, bf16[4096,6288]{1,0}, bf16[16,2048,6288]{2,1,0}, bf16[16,2048,6288]{2,1,0}, u32[]) tuple(collective-permute.195, get-tuple-element.589, add.4314, get-tuple-element.591, add.4315) } windowed_dot_general_cond_ag { param = (bf16[16,2048,512]{2,1,0}, bf16[4096,6288]{1,0}, bf16[16,2048,6288]{2,1,0}, bf16[16,2048,6288]{2,1,0}, u32[]) parameter(0) get-tuple-element = u32[] get-tuple-element(param), index=4 constant = u32[] constant(4) ROOT compare = pred[] compare(get-tuple-element, constant), direction=LT } ENTRY main.12_spmd { param.4 = bf16[16,2048,512]{2,1,0} parameter(0) param.5 = bf16[4096,6288]{1,0} parameter(1) constant.22 = bf16[] constant(0) broadcast = bf16[16,2048,6288]{2,1,0} broadcast(constant.22), dimensions={} constant.24 = u32[] constant(0) tuple.2 = (bf16[16,2048,512]{2,1,0}, bf16[4096,6288]{1,0}, bf16[16,2048,6288]{2,1,0}, bf16[16,2048,6288]{2,1,0}, u32[]) tuple(param.4, param.5, broadcast, broadcast, constant.24) while = (bf16[16,2048,512]{2,1,0}, bf16[4096,6288]{1,0}, bf16[16,2048,6288]{2,1,0}, bf16[16,2048,6288]{2,1,0}, u32[]) while(tuple.2), condition=windowed_dot_general_cond_ag, body=windowed_dot_general_body_ag get-tuple-element.13 = bf16[16,2048,6288]{2,1,0} get-tuple-element(while), index=2 all-gather = bf16[16,2048,4096]{2,1,0} all-gather(param.4), channel_id=1, replica_groups={{0,1,2,3,4,5,6,7}}, dimensions={2}, use_global_device_ids=true param.6 = bf16[16,2048,6288]{2,1,0} parameter(2) ROOT dot.7 = bf16[4096,6288]{1,0} dot(all-gather, param.6), lhs_contracting_dims={0,1}, rhs_contracting_dims={0,1} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloString)); WindowedEinsumHandler gpu_handler; bool changed; TF_ASSERT_OK_AND_ASSIGN(changed, gpu_handler.Run(module.get())); EXPECT_TRUE(changed); HloInstruction* ag_loop = FindInstructionByName(module->entry_computation(), "while"); HloInstruction* inst = FindInstructionByName(module->entry_computation(), "dot.7"); EXPECT_EQ(inst->operand(0)->opcode(), HloOpcode::kGetTupleElement); EXPECT_EQ(inst->operand(0)->tuple_index(), 5); EXPECT_EQ(inst->operand(0)->operand(0), ag_loop); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/windowed_einsum_handler.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/windowed_einsum_handler_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
97498087-bc6d-4463-bd2c-846b5f2c5f4d
cpp
tensorflow/tensorflow
stream_attribute_annotator
third_party/xla/xla/service/gpu/transforms/stream_attribute_annotator.cc
third_party/xla/xla/service/gpu/transforms/stream_attribute_annotator_test.cc
#include "xla/service/gpu/transforms/stream_attribute_annotator.h" #include <cstdint> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_set.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_query.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/gpu_fusible.h" #include "xla/service/gpu/runtime/thunk.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" #include "tsl/platform/statusor.h" namespace xla::gpu { namespace { bool IsOnlyRootNonDefaultStream(HloComputation* computation) { HloInstruction* root = computation->root_instruction(); auto root_gpu_config = root->backend_config<GpuBackendConfig>(); if (!root_gpu_config.ok() || root->opcode() == HloOpcode::kTuple) { return false; } int64_t root_stream_id = root_gpu_config->operation_queue_id(); VLOG(2) << "Found fusion computation's root stream id to be " << root_stream_id; if (root_stream_id == Thunk::kDefaultExecutionStreamId.value()) { return false; } for (HloInstruction* instr : computation->MakeInstructionPostOrder()) { if (instr == root) { continue; } int64_t instr_stream_id = instr->backend_config<GpuBackendConfig>()->operation_queue_id(); if (instr_stream_id != Thunk::kDefaultExecutionStreamId.value() && instr_stream_id != root_stream_id) { return false; } } return true; } absl::StatusOr<bool> AnnotateStreamAttributesForInstruction( HloInstruction* instr, GpuBackendConfig& instr_gpu_config) { if (instr->called_computations().size() != 1) { return false; } HloComputation* called_comp = instr->called_computations()[0]; int64_t stream_id = instr_gpu_config.operation_queue_id(); if (!IsOnlyRootNonDefaultStream(called_comp) || stream_id != Thunk::kDefaultExecutionStreamId.value()) { return false; } auto comp_root_gpu_config = called_comp->root_instruction()->backend_config<GpuBackendConfig>(); instr_gpu_config.set_operation_queue_id( comp_root_gpu_config->operation_queue_id()); *instr_gpu_config.mutable_wait_on_operation_queues() = comp_root_gpu_config->wait_on_operation_queues(); TF_RETURN_IF_ERROR(instr->set_backend_config(instr_gpu_config)); return true; } absl::StatusOr<bool> AnnotateStreamAttributesForCopyStart( HloInstruction* instr, int64_t channel_id, GpuBackendConfig& instr_gpu_config) { if (instr_gpu_config.operation_queue_id() != Thunk::kDefaultExecutionStreamId.value()) { return false; } instr_gpu_config.set_operation_queue_id(channel_id); TF_RETURN_IF_ERROR(instr->set_backend_config(instr_gpu_config)); VLOG(3) << "Add copy-start's backend config: " << channel_id; return true; } absl::StatusOr<bool> WrapIntoFusionAndAnnotateStreamAttributes( HloInstruction* instruction, int64_t channel_id, GpuBackendConfig& instr_gpu_config) { auto* computation = instruction->parent(); auto* module = computation->parent(); auto* fusion_instruction = computation->AddInstruction(HloInstruction::CreateFusion( instruction->shape(), ChooseFusionKind(*instruction, *instruction), instruction)); const absl::string_view wrapped_opcode = HloOpcodeString(instruction->opcode()); module->SetAndUniquifyInstrName(fusion_instruction, absl::StrCat("wrapped_", wrapped_opcode)); module->SetAndUniquifyComputationName( fusion_instruction->fused_instructions_computation(), absl::StrCat("wrapped_", wrapped_opcode, "_computation")); if (module->has_schedule()) { fusion_instruction->set_metadata_scheduling_name( fusion_instruction->name()); HloInstruction* root = fusion_instruction->fused_expression_root(); root->set_metadata_scheduling_name(root->name()); module->schedule().replace_instruction(computation, instruction, fusion_instruction); } TF_RETURN_IF_ERROR(fusion_instruction->CopyAllControlDepsFrom(instruction)); TF_RETURN_IF_ERROR(instruction->DropAllControlDeps()); TF_RETURN_IF_ERROR(instruction->ReplaceAllUsesWith(fusion_instruction)); TF_RETURN_IF_ERROR(computation->RemoveInstruction(instruction)); instr_gpu_config.set_operation_queue_id(channel_id); TF_RETURN_IF_ERROR(fusion_instruction->set_backend_config(instr_gpu_config)); VLOG(3) << "Add async stream " << channel_id << " and wrapped instruction " << instruction->ToString(); VLOG(3) << " Fusion wrapper: " << fusion_instruction->ToString(); return true; } absl::StatusOr<bool> AnnotateStreamAttributesForUsers( HloInstruction* instr, GpuBackendConfig& instr_gpu_config) { bool changed = false; int64_t stream_id = instr_gpu_config.operation_queue_id(); if (stream_id == Thunk::kDefaultExecutionStreamId.value()) { return changed; } std::vector<HloInstruction*> all_consumers; for (auto user : instr->users()) { if (user->opcode() == HloOpcode::kGetTupleElement) { user = user->users()[0]; } all_consumers.push_back(user); } for (auto user : all_consumers) { TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config, user->backend_config<GpuBackendConfig>()); auto it = absl::c_find(gpu_config.wait_on_operation_queues(), stream_id); if (it == gpu_config.wait_on_operation_queues().end() && gpu_config.operation_queue_id() != stream_id) { gpu_config.mutable_wait_on_operation_queues()->Add(stream_id); TF_RETURN_IF_ERROR(user->set_backend_config(gpu_config)); changed = true; } } return changed; } } absl::StatusOr<bool> StreamAttributeAnnotator::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { XLA_VLOG_LINES( 5, "StreamAttributeAnnotator::Run(), before:\n" + module->ToString()); bool changed = false; int64_t channel_id = hlo_query::NextChannelId(*module); for (const HloComputation* comp : module->MakeComputationPostOrder(execution_threads)) { for (HloInstruction* instr : comp->MakeInstructionPostOrder()) { auto instr_gpu_config = instr->backend_config<GpuBackendConfig>(); if (!instr_gpu_config.ok()) { continue; } if (instr->opcode() == HloOpcode::kFusion) { TF_ASSIGN_OR_RETURN(bool comp_result, AnnotateStreamAttributesForInstruction( instr, instr_gpu_config.value())); changed |= comp_result; } else if (instr->opcode() == HloOpcode::kCopyStart) { TF_ASSIGN_OR_RETURN(bool comp_result, AnnotateStreamAttributesForCopyStart( instr, channel_id, instr_gpu_config.value())); changed |= comp_result; continue; } else if (comp->IsAsyncComputation() && (instr->opcode() == HloOpcode::kDynamicSlice || instr->opcode() == HloOpcode::kDynamicUpdateSlice)) { TF_ASSIGN_OR_RETURN(bool comp_result, WrapIntoFusionAndAnnotateStreamAttributes( instr, channel_id, instr_gpu_config.value())); changed |= comp_result; continue; } TF_ASSIGN_OR_RETURN( bool user_result, AnnotateStreamAttributesForUsers(instr, instr_gpu_config.value())); changed |= user_result; } } XLA_VLOG_LINES( 5, "StreamAttributeAnnotator::Run(), after:\n" + module->ToString()); return changed; } }
#include "xla/service/gpu/transforms/stream_attribute_annotator.h" #include <cstdint> #include <memory> #include <string> #include <vector> #include <gtest/gtest.h> #include "absl/algorithm/container.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/tests/filecheck.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/statusor.h" namespace xla::gpu { namespace { using StreamAttributeAnnotatorTest = HloTestBase; TEST_F(StreamAttributeAnnotatorTest, AllUsersAreAnnotated) { constexpr absl::string_view kHloString = R"( HloModule ModuleWithAsync ENTRY entry { p1_32 = f32[1] parameter(0) p2_32 = f32[1] parameter(1) add_32 = f32[1] add(p1_32, p2_32), backend_config={"operation_queue_id":"1", "wait_on_operation_queues":[]} exp_32 = f32[1] exponential(add_32) neg32 = f32[1] negate(add_32) ROOT add_out_32 = f32[1] add(neg32, exp_32) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloString)); StreamAttributeAnnotator attr_annotator; bool changed; TF_ASSERT_OK_AND_ASSIGN(changed, attr_annotator.Run(module.get())); EXPECT_TRUE(changed); const HloInstruction* add = FindInstruction(module.get(), "add_32"); for (auto user : add->users()) { EXPECT_TRUE(user->has_backend_config()); TF_ASSERT_OK_AND_ASSIGN(GpuBackendConfig gpu_config, user->backend_config<GpuBackendConfig>()); EXPECT_EQ(gpu_config.wait_on_operation_queues()[0], 1); } } TEST_F(StreamAttributeAnnotatorTest, MultipleStreamsAreCombined) { constexpr absl::string_view kHloString = R"( HloModule ModuleWithAsync ENTRY entry { p1_32 = f32[1] parameter(0) p2_32 = f32[1] parameter(1) add_32 = f32[1] add(p1_32, p2_32), backend_config={"operation_queue_id":"1", "wait_on_operation_queues":[]} exp_32 = f32[1] exponential(p2_32), backend_config={"operation_queue_id":"2", "wait_on_operation_queues":[]} ROOT add_out_32 = f32[1] add(add_32, exp_32) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloString)); StreamAttributeAnnotator attr_annotator; bool changed; TF_ASSERT_OK_AND_ASSIGN(changed, attr_annotator.Run(module.get())); EXPECT_TRUE(changed); const HloInstruction* root = module->entry_computation()->root_instruction(); EXPECT_TRUE(root->has_backend_config()); TF_ASSERT_OK_AND_ASSIGN(GpuBackendConfig gpu_config, root->backend_config<GpuBackendConfig>()); std::vector<int64_t> expected_stream_ids = {1, 2}; for (auto id : expected_stream_ids) { auto it = absl::c_find(gpu_config.wait_on_operation_queues(), id); EXPECT_NE(it, gpu_config.wait_on_operation_queues().end()); } } TEST_F(StreamAttributeAnnotatorTest, GTEUserIsAnnotated) { constexpr absl::string_view kHloString = R"( HloModule ModuleWithAsync ENTRY entry { p1_32 = f32[16,32] parameter(0) p2_32 = f32[32,16] parameter(1) custom-call.3 = (f32[16,16], s8[1028]{0}) custom-call(p1_32, p2_32), custom_call_target="__cublas$gemm", backend_config={"operation_queue_id":"1","wait_on_operation_queues":[],"gemm_backend_config":{"alpha_real":1,"alpha_imag":0,"beta":0,"dot_dimension_numbers":{"lhs_contracting_dimensions":["1"],"rhs_contracting_dimensions":["0"],"lhs_batch_dimensions":[],"rhs_batch_dimensions":[]},"precision_config":{"operand_precision":["DEFAULT","DEFAULT"]},"epilogue":"DEFAULT","grad_x":false,"grad_y":false}} get-tuple-element.24 = f32[16,16] get-tuple-element(custom-call.3), index=0 exp_32 = f32[16,16] exponential(get-tuple-element.24) ROOT neg32 = f32[16,16] negate(exp_32) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloString)); StreamAttributeAnnotator attr_annotator; bool changed; TF_ASSERT_OK_AND_ASSIGN(changed, attr_annotator.Run(module.get())); EXPECT_TRUE(changed); const HloInstruction* exp = FindInstruction(module.get(), "exp_32"); EXPECT_TRUE(exp->has_backend_config()); TF_ASSERT_OK_AND_ASSIGN(GpuBackendConfig gpu_config, exp->backend_config<GpuBackendConfig>()); EXPECT_EQ(gpu_config.wait_on_operation_queues()[0], 1); } TEST_F(StreamAttributeAnnotatorTest, FusionIsAnnotated) { constexpr absl::string_view kHloString = R"( HloModule ModuleWithFusion fused_computation.1 { fusion_p0_32 = f32[16,16] parameter(0) fusion_p2_32 = f32[16,16] parameter(1) ROOT add = f32[16,16] add(fusion_p0_32, fusion_p2_32), backend_config={"operation_queue_id":"1","wait_on_operation_queues":[]} } ENTRY entry { p1_32 = f32[16,16] parameter(0) p2_32 = f32[16,16] parameter(1) ROOT fusion.1 = f32[16,16] fusion(p1_32, p2_32), kind=kLoop, calls=fused_computation.1 } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloString)); StreamAttributeAnnotator attr_annotator; bool changed; TF_ASSERT_OK_AND_ASSIGN(changed, attr_annotator.Run(module.get())); EXPECT_TRUE(changed); const HloInstruction* fusion = FindInstruction(module.get(), "fusion.1"); EXPECT_TRUE(fusion->has_backend_config()); TF_ASSERT_OK_AND_ASSIGN(GpuBackendConfig gpu_config, fusion->backend_config<GpuBackendConfig>()); EXPECT_EQ(gpu_config.operation_queue_id(), 1); } TEST_F(StreamAttributeAnnotatorTest, CopyStartIsAnnotated) { constexpr absl::string_view kHloString = R"( HloModule offloading ENTRY %main (param_0: f32[1024], param_1: f32[1024]) -> f32[1024] { %param_1 = f32[1024]{0} parameter(1) %param_0 = f32[1024]{0} parameter(0) %res_3 = f32[1024]{0} add(f32[1024]{0} %param_0, f32[1024]{0} %param_1) %copy-start = (f32[1024]{0:S(5)}, f32[1024]{0}, u32[]) copy-start(f32[1024]{0} %res_3) %res_4 = f32[1024]{0} tanh(f32[1024]{0} %res_3) %copy-start.2 = (f32[1024]{0:S(5)}, f32[1024]{0}, u32[]) copy-start(f32[1024]{0} %res_4) %res_5 = f32[1024]{0} tanh(f32[1024]{0} %res_4) %copy-done = f32[1024]{0:S(5)} copy-done((f32[1024]{0:S(5)}, f32[1024]{0}, u32[]) %copy-start) %res_6 = f32[1024]{0} tanh(f32[1024]{0} %res_5) %copy-done.2 = f32[1024]{0:S(5)} copy-done((f32[1024]{0:S(5)}, f32[1024]{0}, u32[]) %copy-start.2) %copy-start.3 = (f32[1024]{0}, f32[1024]{0:S(5)}, u32[]) copy-start(f32[1024]{0:S(5)} %copy-done.2) %res_7 = f32[1024]{0} add(f32[1024]{0} %res_6, f32[1024]{0} %res_6) %copy-start.1 = (f32[1024]{0}, f32[1024]{0:S(5)}, u32[]) copy-start(f32[1024]{0:S(5)} %copy-done) %res_8 = f32[1024]{0} add(f32[1024]{0} %res_7, f32[1024]{0} %res_5) %copy-done.3 = f32[1024]{0} copy-done((f32[1024]{0}, f32[1024]{0:S(5)}, u32[]) %copy-start.3) %res_9 = f32[1024]{0} add(f32[1024]{0} %res_8, f32[1024]{0} %copy-done.3) %copy-done.1 = f32[1024]{0} copy-done((f32[1024]{0}, f32[1024]{0:S(5)}, u32[]) %copy-start.1) %res_10 = f32[1024]{0} add(f32[1024]{0} %res_9, f32[1024]{0} %copy-done.1) ROOT %res_11 = f32[1024]{0} tanh(f32[1024]{0} %res_10) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloString)); StreamAttributeAnnotator attr_annotator; bool changed; TF_ASSERT_OK_AND_ASSIGN(changed, attr_annotator.Run(module.get())); EXPECT_TRUE(changed); for (std::string i : {"", ".1", ".2", ".3"}) { const HloInstruction* cp_start = FindInstruction(module.get(), "copy-start" + i); EXPECT_TRUE(cp_start->has_backend_config()); TF_ASSERT_OK_AND_ASSIGN(GpuBackendConfig gpu_config, cp_start->backend_config<GpuBackendConfig>()); EXPECT_EQ(gpu_config.operation_queue_id(), 1); } } TEST_F(StreamAttributeAnnotatorTest, DynamicUpdateSliceWrappedAndAnnotated) { constexpr absl::string_view kHloString = R"( HloModule ModuleWithAsyncDynamicUpdateSlice, is_scheduled=true ENTRY entry (param_0: f32[256,128,128], param_1: f32[1,128,128]) -> f32[256,128,128] { param_0 = f32[256,128,128]{2,1,0:S(5)} parameter(0), metadata={scheduling_name="param_0"} param_1 = f32[1,128,128]{2,1,0} parameter(1), metadata={scheduling_name="param_1"} izero = s32[] constant(0), metadata={scheduling_name="izero"} dynamic-update-slice-start.2 = ((f32[256,128,128]{2,1,0:S(5)}, f32[1,128,128]{2,1,0}, s32[], s32[], s32[]), f32[256,128,128]{2,1,0:S(5)}, u32[]) dynamic-update-slice-start(param_0, param_1, izero, izero, izero), metadata={scheduling_name="dynamic-update-slice-start.2"} ROOT dynamic-update-slice-done.2 = f32[256,128,128]{2,1,0:S(5)} dynamic-update-slice-done(dynamic-update-slice-start.2), metadata={scheduling_name="dynamic-update-slice-done.2"} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloString)); EXPECT_TRUE(module->has_schedule()); TF_ASSERT_OK_AND_ASSIGN(bool changed, StreamAttributeAnnotator().Run(module.get())); EXPECT_TRUE(changed); const HloInstruction* dus = FindInstruction(module.get(), HloOpcode::kDynamicUpdateSlice); const HloComputation* computation = dus->parent(); EXPECT_TRUE(computation->IsFusionComputation()); const HloInstruction* fusion = computation->FusionInstruction(); EXPECT_EQ(fusion->opcode(), HloOpcode::kFusion); EXPECT_TRUE(fusion->parent()->IsAsyncComputation()); EXPECT_TRUE(fusion->has_backend_config()); TF_ASSERT_OK_AND_ASSIGN(GpuBackendConfig gpu_config, fusion->backend_config<GpuBackendConfig>()); EXPECT_EQ(gpu_config.operation_queue_id(), 1); for (const auto* comp : module->computations()) { for (const auto* instruction : comp->instructions()) { if (!instruction->metadata().scheduling_name().empty()) { EXPECT_EQ(instruction->name(), instruction->metadata().scheduling_name()); } } } constexpr absl::string_view kExpectedSchedulingName = R"( )"; TF_ASSERT_OK_AND_ASSIGN( bool filecheck_matches, RunFileCheck( module->ToString(HloPrintOptions().set_print_operand_shape(false)), kExpectedSchedulingName)); EXPECT_TRUE(filecheck_matches); } TEST_F(StreamAttributeAnnotatorTest, DynamicSliceWrappedAndAnnotated) { constexpr absl::string_view kHloString = R"( HloModule ModuleWithAsyncDynamicSlice, is_scheduled=true ENTRY entry (param_0: f32[256,128,128]) -> f32[1,128,128] { param_0 = f32[256,128,128]{2,1,0:S(5)} parameter(0), metadata={scheduling_name="param_0"} izero = s32[] constant(0), metadata={scheduling_name="izero"} dynamic-slice-start.2 = ((f32[256,128,128]{2,1,0:S(5)}, s32[], s32[], s32[]), f32[1,128,128]{2,1,0}, u32[]) dynamic-slice-start(param_0, izero, izero, izero), dynamic_slice_sizes={1,128,128}, metadata={scheduling_name="dynamic-slice-start.2"} ROOT dynamic-slice-done.2 = f32[1,128,128]{2,1,0} dynamic-slice-done(dynamic-slice-start.2), metadata={scheduling_name="dynamic-slice-done.2"} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloString)); EXPECT_TRUE(module->has_schedule()); TF_ASSERT_OK_AND_ASSIGN(bool changed, StreamAttributeAnnotator().Run(module.get())); EXPECT_TRUE(changed); const HloInstruction* ds = FindInstruction(module.get(), HloOpcode::kDynamicSlice); const HloComputation* computation = ds->parent(); EXPECT_TRUE(computation->IsFusionComputation()); const HloInstruction* fusion = computation->FusionInstruction(); EXPECT_EQ(fusion->opcode(), HloOpcode::kFusion); EXPECT_TRUE(fusion->parent()->IsAsyncComputation()); EXPECT_TRUE(fusion->has_backend_config()); TF_ASSERT_OK_AND_ASSIGN(GpuBackendConfig gpu_config, fusion->backend_config<GpuBackendConfig>()); EXPECT_EQ(gpu_config.operation_queue_id(), 1); for (const auto* comp : module->computations()) { for (const auto* instruction : comp->instructions()) { if (!instruction->metadata().scheduling_name().empty()) { EXPECT_EQ(instruction->name(), instruction->metadata().scheduling_name()); } } } constexpr absl::string_view kExpectedSchedulingName = R"( )"; TF_ASSERT_OK_AND_ASSIGN( bool filecheck_matches, RunFileCheck( module->ToString(HloPrintOptions().set_print_operand_shape(false)), kExpectedSchedulingName)); EXPECT_TRUE(filecheck_matches); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/stream_attribute_annotator.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/stream_attribute_annotator_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
aa7ea541-2e72-40b9-9fb8-7bf6fee453ec
cpp
tensorflow/tensorflow
horizontal_input_fusion
third_party/xla/xla/service/gpu/transforms/horizontal_input_fusion.cc
third_party/xla/xla/service/gpu/transforms/horizontal_input_fusion_test.cc
#include "xla/service/gpu/transforms/horizontal_input_fusion.h" #include <algorithm> #include <cstddef> #include <vector> #include "absl/container/flat_hash_set.h" #include "absl/log/log.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/gpu/gpu_fusible.h" #include "xla/service/hlo_creation_utils.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/stream_executor/device_description.h" #include "xla/util.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { Shape GetInputShapeForMultiOutputFusion(const HloInstruction& instr) { const HloInstruction* real_hero = GetRealHeroForMultiOutputFusion(instr); if (real_hero->operands().empty()) { return Shape(); } else { return real_hero->operand(0)->shape(); } } class HorizontalInputFusionImpl { public: explicit HorizontalInputFusionImpl(HloComputation* computation, const se::DeviceDescription& d) : computation_(computation), device_info_(d) {} ~HorizontalInputFusionImpl() = default; absl::StatusOr<bool> Run(); private: HloComputation* computation_; const se::DeviceDescription& device_info_; }; bool CompareShapeDimsFromLeftToRight(const Shape& shape_a, const Shape& shape_b) { if (shape_a.rank() != shape_b.rank()) { return shape_a.rank() < shape_b.rank(); } auto dims_a = shape_a.dimensions(); auto dims_b = shape_b.dimensions(); for (size_t i = 0; i < dims_a.size(); ++i) { if (dims_a[i] != dims_b[i]) { return dims_a[i] < dims_b[i]; } } return true; } std::vector<HloInstruction*> FindAndSortFusionCandidates( HloInstruction* consumer) { absl::flat_hash_set<HloInstruction*> fusion_instr_set; std::vector<HloInstruction*> fusion_instrs; for (HloInstruction* opnd : consumer->operands()) { HloInstruction* predecessor = opnd->LatestNonGteAncestor(); if (!predecessor->IsCustomFusion() && IsInputFusibleReduction(*predecessor) && IsConsumerTheOnlyNonRootUser(*predecessor, *consumer)) { if (fusion_instr_set.insert(predecessor).second) { fusion_instrs.push_back(predecessor); } } } std::sort(fusion_instrs.begin(), fusion_instrs.end(), [&](const HloInstruction* a, const HloInstruction* b) { Shape shape_a = GetInputShapeForMultiOutputFusion(*a); Shape shape_b = GetInputShapeForMultiOutputFusion(*b); if (!ShapeUtil::EqualIgnoringElementType(shape_a, shape_b)) { return CompareShapeDimsFromLeftToRight(shape_a, shape_b); } return GetInstrCountOfFusible(*a) < GetInstrCountOfFusible(*b); }); return fusion_instrs; } absl::StatusOr<bool> HorizontalInputFusionImpl::Run() { bool changed = false; XLA_VLOG_LINES(3, computation_->ToString()); std::vector<HloInstruction*> def_to_use_order = computation_->MakeInstructionPostOrder(); for (HloInstruction* consumer : def_to_use_order) { auto candidates = FindAndSortFusionCandidates(consumer); if (candidates.size() <= 1) { continue; } for (size_t j = 0; j < candidates.size(); ++j) { if (candidates[j]->opcode() != HloOpcode::kFusion) { TF_ASSIGN_OR_RETURN( HloInstruction * fusion_instr, MakeFusionInstruction(candidates[j], HloInstruction::FusionKind::kInput)); candidates[j] = fusion_instr; changed = true; } } size_t fusion_anchor_id = 0; for (size_t j = 1; j < candidates.size(); ++j) { HloInstruction* fusion_anchor = candidates[fusion_anchor_id]; HloInstruction* fused = candidates[j]; if (ShapesCompatibleForMultiOutputFusion(*fusion_anchor, *fused) && FusionFitsInBudget(*fusion_anchor, *fused, device_info_)) { VLOG(3) << "Fuse " << fused->ToString() << " into " << fusion_anchor->ToString(); fusion_anchor->MergeFusionInstructionIntoMultiOutput(fused); changed = true; } else { VLOG(3) << j - fusion_anchor_id - 1 << " instructions are fused."; fusion_anchor_id = j; } } } return changed; } } absl::StatusOr<bool> HorizontalInputFusion::RunOnComputation( HloComputation* computation) { HorizontalInputFusionImpl horizontal_fusion_impl(computation, device_info_); return horizontal_fusion_impl.Run(); } absl::StatusOr<bool> HorizontalInputFusion::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool changed = false; VLOG(2) << "Run horizontal input fusion."; for (HloComputation* comp : module->MakeNonfusionComputations(execution_threads)) { TF_ASSIGN_OR_RETURN(changed, RunOnComputation(comp)); } return changed; } } }
#include "xla/service/gpu/transforms/horizontal_input_fusion.h" #include <cstdint> #include <utility> #include <vector> #include "xla/error_spec.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/literal_util.h" #include "xla/service/gpu/gpu_device_info_for_tests.h" #include "xla/service/gpu/tests/gpu_codegen_test.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/shape_util.h" #include "xla/stream_executor/device_description.h" #include "xla/test.h" namespace xla { namespace gpu { namespace { namespace m = ::xla::match; class HorizontalInputFusionTest : public GpuCodegenTest { public: se::DeviceDescription device_description_{ TestGpuDeviceInfo::RTXA6000DeviceInfo()}; HorizontalInputFusion horizontal_input_fusion_{device_description_}; }; TEST_F(HorizontalInputFusionTest, BasicTest) { auto module = ParseAndReturnVerifiedModule(R"( HloModule BasicTest %add_f16 { %x = f16[] parameter(0) %y = f16[] parameter(1) ROOT %add = f16[] add(%x, %y) } fused_computation.1 { arg.1 = f16[1024]{0} parameter(0) constant0 = f16[] constant(0) ROOT reduce1 = f16[] reduce(arg.1, constant0), dimensions={0}, to_apply=%add_f16 } fused_computation.2 { arg.1 = f16[1024]{0} parameter(0) constant0 = f16[] constant(0) ROOT reduce1 = f16[] reduce(arg.1, constant0), dimensions={0}, to_apply=%add_f16 } ENTRY entry_computation { arg.1 = f16[1024]{0} parameter(0) arg.2 = f16[1024]{0} parameter(1) fusion.1 = f16[] fusion(arg.1), kind=kInput, calls=fused_computation.1 fusion.2 = f16[] fusion(arg.2), kind=kInput, calls=fused_computation.2 ROOT tuple.1 = (f16[], f16[]) tuple(fusion.1, fusion.2) } )") .value(); EXPECT_TRUE(horizontal_input_fusion_.Run(module.get()).value()); const HloInstruction* entry_root = module->entry_computation()->root_instruction(); const HloInstruction* fusion = nullptr; ASSERT_THAT(entry_root, GmockMatch(m::Tuple((m::GetTupleElement(m::Fusion(&fusion))), (m::GetTupleElement(m::Fusion()))))); ASSERT_TRUE(fusion->IsMultiOutputFusion()); EXPECT_THAT(fusion->fused_expression_root(), GmockMatch(m::Tuple(m::Reduce(), m::Reduce()))); } TEST_F(HorizontalInputFusionTest, ManyInputFusions) { auto module = CreateNewVerifiedModule(); HloComputation* reduce_computation; { auto embedded_builder = HloComputation::Builder("add"); auto lhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(F32, {}), "lhs")); auto rhs = embedded_builder.AddInstruction(HloInstruction::CreateParameter( 1, ShapeUtil::MakeShape(F32, {}), "rhs")); embedded_builder.AddInstruction( HloInstruction::CreateBinary(lhs->shape(), HloOpcode::kAdd, lhs, rhs)); reduce_computation = module->AddEmbeddedComputation(embedded_builder.Build()); } HloComputation::Builder builder(TestName()); std::vector<HloInstruction*> var_outs; auto input_shape = ShapeUtil::MakeShape(F32, {1024, 1024}); auto output_shape = ShapeUtil::MakeShape(F32, {1024}); for (int64_t i = 0; i < 130; ++i) { HloInstruction* param_var_in = builder.AddInstruction( HloInstruction::CreateParameter(i * 2 + 0, input_shape, "var.in")); HloInstruction* param_alpha = builder.AddInstruction(HloInstruction::CreateParameter( i * 2 + 1, ShapeUtil::MakeShape(F32, {}), "alpha")); auto alpha_broadcasted = builder.AddInstruction( HloInstruction::CreateBroadcast(input_shape, param_alpha, {})); auto mul = builder.AddInstruction(HloInstruction::CreateBinary( input_shape, HloOpcode::kMultiply, param_var_in, alpha_broadcasted)); HloInstruction* const0 = builder.AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0<float>(0))); auto reduce = builder.AddInstruction(HloInstruction::CreateReduce( output_shape, mul, const0, {1}, reduce_computation)); var_outs.push_back(reduce); } builder.AddInstruction(HloInstruction::CreateTuple(var_outs)); module->AddEntryComputation(builder.Build()); if (GetDebugOptionsForTest().xla_gpu_mlir_emitter_level() < 4) { CompileAndVerifyIr(module->Clone(), R"(CHECK: reduce-group-6)", false); } else { CompileAndVerifyIr(module->Clone(), R"(CHECK: switch {{.*}} label {{.*}} [ CHECK-NEXT: label)", false); } EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{1e-5, 1e-5})); } TEST_F(HorizontalInputFusionTest, MultiOutputFusionTest) { auto module = ParseAndReturnVerifiedModule(R"( HloModule MultiOutputFusionTest %add_f16 { %x = f16[] parameter(0) %y = f16[] parameter(1) ROOT %add = f16[] add(%x, %y) } fused_computation.1 { arg.1 = f16[1024]{0} parameter(0) constant0 = f16[] constant(0) reduce.1 = f16[] reduce(arg.1, constant0), dimensions={0}, to_apply=%add_f16 add.0 = f16[1024] add(arg.1, arg.1) ROOT tuple.1 = (f16[], f16[1024]) tuple(reduce.1, add.0) } fused_computation.2 { arg.1 = f16[1024]{0} parameter(0) constant0 = f16[] constant(0) reduce.1 = f16[] reduce(arg.1, constant0), dimensions={0}, to_apply=%add_f16 add.0 = f16[1024] add(arg.1, arg.1) ROOT tuple.1 = (f16[], f16[1024]) tuple(reduce.1, add.0) } fused_computation.3 { arg.0 = f16[1024]{0} parameter(0) arg.1 = f16[1024]{0} parameter(1) add.0 = f16[1024] add(arg.0, arg.1) mul.0 = f16[1024] multiply(arg.0, arg.1) ROOT tuple.1 = (f16[1024], f16[1024]) tuple(add.0, mul.0) } ENTRY entry_computation { arg.1 = f16[1024]{0} parameter(0) arg.2 = f16[1024]{0} parameter(1) fusion.1 = (f16[],f16[1024]) fusion(arg.1), kind=kInput, calls=fused_computation.1 fusion.2 = (f16[],f16[1024]) fusion(arg.2), kind=kInput, calls=fused_computation.2 gte.3 = f16[] get-tuple-element(fusion.1), index=0 gte.1 = f16[1024]{0} get-tuple-element(fusion.1), index=1 gte.2 = f16[1024]{0} get-tuple-element(fusion.2), index=1 gte.6 = f16[] get-tuple-element(fusion.2), index=0 fusion.3 = (f16[1024],f16[1024]) fusion(gte.1, gte.2), kind=kLoop, calls=fused_computation.3 gte.4 = f16[1024] get-tuple-element(fusion.3), index=0 gte.5 = f16[1024]{0} get-tuple-element(fusion.3), index=1 ROOT tuple.1 = (f16[], f16[1024], f16[1024]{0}, f16[]) tuple(gte.3, gte.4, gte.5, gte.6) } )") .value(); EXPECT_TRUE(horizontal_input_fusion_.Run(module.get()).value()); } TEST_F(HorizontalInputFusionTest, NonfusionInstrs) { auto module = ParseAndReturnVerifiedModule(R"( HloModule NonfusionInstrs %add_f16 { %x = f16[] parameter(0) %y = f16[] parameter(1) ROOT %add = f16[] add(%x, %y) } ENTRY entry_computation { arg.0 = f16[1024]{0} parameter(0) arg.1 = f16[1024]{0} parameter(1) constant0 = f16[] constant(0) reduce.0 = f16[] reduce(arg.0, constant0), dimensions={0}, to_apply=%add_f16 reduce.1 = f16[] reduce(arg.1, constant0), dimensions={0}, to_apply=%add_f16 ROOT tuple.0 = (f16[], f16[]) tuple(reduce.0, reduce.1) } )") .value(); EXPECT_TRUE(horizontal_input_fusion_.Run(module.get()).value()); const HloInstruction* entry_root = module->entry_computation()->root_instruction(); const HloInstruction* fusion = nullptr; ASSERT_THAT(entry_root, GmockMatch(m::Tuple((m::GetTupleElement(m::Fusion(&fusion))), (m::GetTupleElement(m::Fusion()))))); ASSERT_TRUE(fusion->IsMultiOutputFusion()); EXPECT_THAT(fusion->fused_expression_root(), GmockMatch(m::Tuple(m::Reduce(), m::Reduce()))); } TEST_F(HorizontalInputFusionTest, DoesNotFuseCustomFusions) { auto module = ParseAndReturnVerifiedModule(R"( max { p0 = f16[] parameter(0) p1 = f16[] parameter(1) ROOT max = f16[] maximum(p0, p1) } triton_a { p = f16[128,256] parameter(0) c = f16[] constant(0) ROOT n = f16[128] reduce(p, c), dimensions={1}, to_apply=max } triton_b { p = f16[128,256] parameter(0) c = f16[] constant(0) ROOT n = f16[128] reduce(p, c), dimensions={1}, to_apply=max } ENTRY entry_computation { p = f16[128,256] parameter(0) fa = f16[128] fusion(p), kind=kCustom, calls=triton_a fb = f16[128] fusion(p), kind=kCustom, calls=triton_b ROOT tuple = (f16[128], f16[128]) tuple(fa, fb) } )") .value(); EXPECT_FALSE(horizontal_input_fusion_.Run(module.get()).value()); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/horizontal_input_fusion.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/horizontal_input_fusion_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
81c927b2-02b1-48bb-865e-253ba363c819
cpp
tensorflow/tensorflow
async_collective_annotator
third_party/xla/xla/service/gpu/transforms/async_collective_annotator.cc
third_party/xla/xla/service/gpu/transforms/async_collective_annotator_test.cc
#include "xla/service/gpu/transforms/async_collective_annotator.h" #include "absl/container/flat_hash_set.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/utils/hlo_query.h" #include "xla/service/gpu/backend_configs.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { absl::StatusOr<bool> AsyncCollectiveAnnotator::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool changed = false; for (HloComputation* computation : module->MakeNonfusionComputations(execution_threads)) { for (HloInstruction* instruction : computation->instructions()) { if (!hlo_query::IsAsyncCollectiveStartOp(instruction)) { continue; } CollectiveBackendConfig config; config.set_is_sync(!is_collective_async_(instruction)); TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config, instruction->backend_config<GpuBackendConfig>()); *gpu_config.mutable_collective_backend_config() = config; TF_RETURN_IF_ERROR(instruction->set_backend_config(gpu_config)); changed = true; } } return changed; } } }
#include "xla/service/gpu/transforms/async_collective_annotator.h" #include <memory> #include <string> #include <vector> #include <gtest/gtest.h> #include "absl/container/flat_hash_set.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_query.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/tests/hlo_test_base.h" #include "xla/tests/test_macros.h" #include "xla/util.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { constexpr absl::string_view kHloString = R"( HloModule ModuleWithAsync addf32 { p0 = f32[] parameter(0) p1 = f32[] parameter(1) ROOT add = f32[] add(p0, p1) } addf16 { p0 = f16[] parameter(0) p1 = f16[] parameter(1) ROOT add = f16[] add(p0, p1) } reduce_scatterf32 { p0 = f32[2] parameter(0) ROOT result = f32[1] reduce-scatter(p0), replica_groups={}, dimensions={0}, to_apply=addf32 } ENTRY entry { pf32 = f32[1] parameter(0) pf16 = f16[1] parameter(1) arf32-start = f32[1] all-reduce-start(pf32), to_apply=addf32 arf32-done = f32[1] all-reduce-done(arf32-start) arf16-start = f16[1] all-reduce-start(pf16), to_apply=addf16 arf16-done = f16[1] all-reduce-done(arf16-start) agf32-start = (f32[1], f32[2]) all-gather-start(pf32), dimensions={0} agf32-done = f32[2] all-gather-done(agf32-start) agf16-start = (f16[1], f16[2]) all-gather-start(pf16), dimensions={0} agf16-done = f16[2] all-gather-done(agf16-start) cpf32-start = (f32[1], f32[1], u32[], u32[]) collective-permute-start(pf32), source_target_pairs={{0,1}, {1,0}} cpf32-done = f32[1] collective-permute-done(cpf32-start) cpf16-start = (f16[1], f16[1], u32[], u32[]) collective-permute-start(pf16), source_target_pairs={{0,1}, {1,0}} cpf16-done = f16[1] collective-permute-done(cpf16-start) rsf32-start = ((f32[2]), f32[1]) async-start(agf32-done), calls=reduce_scatterf32 rsf32-done = f32[1] async-done(rsf32-start), calls=reduce_scatterf32 ROOT tuple = (f32[1], f16[1], f32[2], f16[2], f32[1], f16[1], f32[1]) tuple(arf32-done, arf16-done, agf32-done, agf16-done, cpf32-done, cpf16-done, rsf32-done) } )"; struct TestCase { std::string test_name; HloPredicate is_async_predicate; absl::flat_hash_set<absl::string_view> expected_async; absl::flat_hash_set<absl::string_view> expected_sync; }; class AsyncCollectiveAnnotatorTest : public HloTestBase, public ::testing::WithParamInterface<TestCase> {}; XLA_TEST_P(AsyncCollectiveAnnotatorTest, Test) { const TestCase& test_case = GetParam(); TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloString, 2)); TF_ASSERT_OK_AND_ASSIGN( bool changed, AsyncCollectiveAnnotator(test_case.is_async_predicate).Run(module.get())); EXPECT_TRUE(changed); for (const HloInstruction* hlo : module->entry_computation()->instructions()) { if (!hlo_query::IsAsyncCollectiveStartOp(hlo)) { continue; } auto gpu_config = hlo->backend_config<GpuBackendConfig>(); ASSERT_TRUE(gpu_config.ok()); const CollectiveBackendConfig& backend_config = gpu_config.value().collective_backend_config(); if (test_case.expected_async.contains(hlo->name())) { EXPECT_FALSE(backend_config.is_sync()); } if (test_case.expected_sync.contains(hlo->name())) { EXPECT_TRUE(backend_config.is_sync()); } } } std::vector<TestCase> TestCases() { HloPredicate is_f16 = [](const HloInstruction* hlo) { return hlo->operand(0)->shape().element_type() == PrimitiveType::F16; }; return { {"all_async", HloPredicateTrue, {"arf32-start", "arf16-start", "agf32-start", "agf16-start", "cpf32-start", "cpf16-start", "rsf32-start"}, {}}, {"all_sync", HloPredicateFalse, {}, {"arf32-start", "arf16-start", "agf32-start", "agf16-start", "cpf32-start", "cpf16-start", "rsf32-start"}}, {"ar_async", HloPredicateIsOp<HloOpcode::kAllReduceStart>, {"arf32-start", "arf16-start"}, {"agf32-start", "agf16-start", "cpf32-start", "cpf16-start", "rsf32-start"}}, {"cp_async", HloPredicateIsOp<HloOpcode::kCollectivePermuteStart>, {"cpf32-start", "cpf16-start"}, {"arf32-start", "arf16-start", "agf32-start", "agf16-start", "rsf32-start"}}, {"f16_async", is_f16, {"arf16-start", "agf16-start", "cpf16-start"}, {"arf32-start", "agf32-start", "cpf32-start", "rsf32-start"}}, }; } std::string TestCaseName(const ::testing::TestParamInfo<TestCase>& test_case) { return test_case.param.test_name; } INSTANTIATE_TEST_SUITE_P(AsyncCollectiveAnnotatorTest, AsyncCollectiveAnnotatorTest, ::testing::ValuesIn(TestCases()), TestCaseName); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/async_collective_annotator.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/async_collective_annotator_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
0777f39b-1bf7-4e05-bb83-f4957bcc97b6
cpp
tensorflow/tensorflow
pgle_accuracy_checker
third_party/xla/xla/service/gpu/transforms/pgle_accuracy_checker.cc
third_party/xla/xla/service/gpu/transforms/pgle_accuracy_checker_test.cc
#include "xla/service/gpu/transforms/pgle_accuracy_checker.h" #include "absl/container/flat_hash_set.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_module.h" #include "tsl/platform/errors.h" namespace xla::gpu { absl::StatusOr<bool> PGLEAccuracyChecker::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { TF_RETURN_IF_ERROR(pgle_estimator_.CheckAccuracy(*module)); return false; } }
#include "xla/service/gpu/transforms/pgle_accuracy_checker.h" #include <memory> #include <string> #include <utility> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/status/status.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/service/gpu/gpu_latency_hiding_scheduler.h" #include "xla/service/latency_hiding_scheduler.h" #include "xla/service/profile_guided_latency_estimator.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/protobuf.h" #include "tsl/platform/status_matchers.h" #include "tsl/platform/statusor.h" namespace xla::gpu { namespace { using PGLEAccuracyCheckerTest = HloTestBase; using ::tensorflow::profiler::ProfiledInstructionsProto; using ::tsl::protobuf::TextFormat; using ::tsl::testing::StatusIs; std::unique_ptr<ProfileGuidedLatencyEstimator> GetProfileGuidedLatencyEstimator( ProfiledInstructionsProto& profile) { auto gpu_latency_estimator = std::make_unique<GpuLatencyEstimator>(8); SchedulerConfig config; auto aggregator = std::make_unique<GPUProfileStatisticsAggregator>(); return std::make_unique<ProfileGuidedLatencyEstimator>( config, std::move(gpu_latency_estimator), profile, std::move(aggregator)); } TEST_F(PGLEAccuracyCheckerTest, ReturnsOkAndNoIRChangeIfAllInstructionsAreFoundInTheProfile) { const absl::string_view kHloString = R"( HloModule m apply_op { x = f32[] parameter(0) y = f32[] parameter(1) ROOT apply_op = f32[] add(x, y) } ENTRY ar { p0 = f32[32] parameter(0) p1 = f32[32,32] parameter(1) p2 = f32[32,32] parameter(2) p3 = f32[32] parameter(3) dot0 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm" dot1 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm" add0 = f32[32,32] add(dot0, dot1) ar-start = f32[32] all-reduce-start(p0), to_apply=apply_op ar-done = f32[32] all-reduce-done(ar-start) ar-start1 = f32[32] all-reduce-start(p3), to_apply=apply_op ar-done1 = f32[32] all-reduce-done(ar-start1) ROOT _ = (f32[32],f32[32],f32[32,32]) tuple(ar-done, ar-done1, add0) })"; const std::string kProfileString = R"pb( costs { name: "dot0" cost_us: 1.0 } costs { name: "dot1" cost_us: 1.0 } costs { name: "add0" cost_us: 1.0 } costs { name: "ar-start" cost_us: 1.0 } costs { name: "ar-start1" cost_us: 1.0 } )pb"; ProfiledInstructionsProto profile; ASSERT_TRUE(TextFormat::ParseFromString(kProfileString, &profile)); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloString)); *module->mutable_config().mutable_fdo_profile() = kProfileString; auto pgle_estimator = GetProfileGuidedLatencyEstimator(profile); PGLEAccuracyChecker pgle_accuracy_checker(*pgle_estimator); TF_ASSERT_OK_AND_ASSIGN(bool changed, pgle_accuracy_checker.Run(module.get())); EXPECT_FALSE(changed); } TEST_F(PGLEAccuracyCheckerTest, ReturnsInvalidArgumentIfThereAreMissingInstructionsFromTheProfile) { const absl::string_view kHloString = R"( HloModule m apply_op { x = f32[] parameter(0) y = f32[] parameter(1) ROOT apply_op = f32[] add(x, y) } ENTRY ar { p0 = f32[32] parameter(0) p1 = f32[32,32] parameter(1) p2 = f32[32,32] parameter(2) p3 = f32[32] parameter(3) dot0 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm" dot1 = f32[32,32]{1,0} custom-call(p1, p2), custom_call_target="__cublas$gemm" add0 = f32[32,32] add(dot0, dot1) ar-start = f32[32] all-reduce-start(p0), to_apply=apply_op ar-done = f32[32] all-reduce-done(ar-start) ar-start1 = f32[32] all-reduce-start(p3), to_apply=apply_op ar-done1 = f32[32] all-reduce-done(ar-start1) ROOT _ = (f32[32],f32[32],f32[32,32]) tuple(ar-done, ar-done1, add0) })"; const std::string kProfileString = R"pb( costs { name: "dot0" cost_us: 1.0 } costs { name: "add0" cost_us: 1.0 } costs { name: "ar-start1" cost_us: 1.0 } )pb"; ProfiledInstructionsProto profile; ASSERT_TRUE(TextFormat::ParseFromString(kProfileString, &profile)); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloString)); *module->mutable_config().mutable_fdo_profile() = kProfileString; auto pgle_estimator = GetProfileGuidedLatencyEstimator(profile); PGLEAccuracyChecker pgle_accuracy_checker(*pgle_estimator); EXPECT_THAT(pgle_accuracy_checker.Run(module.get()), StatusIs(absl::StatusCode::kInvalidArgument)); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/pgle_accuracy_checker.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/pgle_accuracy_checker_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
d899afcc-2a19-49aa-8032-5f951e1034ad
cpp
tensorflow/tensorflow
reduction_dimension_grouper
third_party/xla/xla/service/gpu/transforms/reduction_dimension_grouper.cc
third_party/xla/xla/service/gpu/transforms/reduction_dimension_grouper_test.cc
#include "xla/service/gpu/transforms/reduction_dimension_grouper.h" #include <cstdint> #include <memory> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_set.h" #include "absl/container/inlined_vector.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/dfs_hlo_visitor_with_default.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/layout_util.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { class ReduceDimensionGroupVisitor : public DfsHloRewriteVisitor { public: absl::Status HandleReduce(HloInstruction *hlo) override { auto reduce = Cast<HloReduceInstruction>(hlo); VLOG(4) << "Input: " << reduce->ToString(); absl::InlinedVector<HloInstruction *, 2> reduce_inputs_grouped; std::vector<int64_t> reduced_dims_grouped; int idx = -1; for (HloInstruction *operand : reduce->inputs()) { idx++; std::vector<int64_t> new_grouped_dims; const Shape &shape = operand->shape(); CHECK(shape == LayoutUtil::GetWithDefaultLayout(shape)) << "Default layout should be enforced on reduction operand"; auto is_reduced = [&](int dim) { return absl::c_linear_search(reduce->dimensions(), dim); }; bool changed = false; int64_t next_dim_size = 1; for (int logical_dim = 0; logical_dim < shape.rank(); logical_dim++) { VLOG(5) << "Processing dimension " << logical_dim << " of size " << shape.dimensions(logical_dim); if (is_reduced(logical_dim) && logical_dim < shape.rank() - 1 && is_reduced(logical_dim + 1)) { VLOG(5) << "This and consecutive dimension are reduced, merging"; changed = true; next_dim_size *= shape.dimensions(logical_dim); continue; } if (is_reduced(logical_dim)) { new_grouped_dims.push_back(next_dim_size * shape.dimensions(logical_dim)); if (idx == 0) { reduced_dims_grouped.push_back(new_grouped_dims.size() - 1); } next_dim_size = 1; } else { new_grouped_dims.push_back(shape.dimensions(logical_dim)); } } if (!changed) { return absl::OkStatus(); } Shape grouped_shape = ShapeUtil::MakeShape(shape.element_type(), new_grouped_dims); reduce_inputs_grouped.push_back(reduce->parent()->AddInstruction( HloInstruction::CreateBitcast(grouped_shape, operand), &operand->metadata())); VLOG(5) << "Adding bitcast: " << reduce_inputs_grouped.back()->ToString(); } std::unique_ptr<HloInstruction> new_reduce = HloInstruction::CreateReduce( reduce->shape(), reduce_inputs_grouped, reduce->init_values(), reduced_dims_grouped, reduce->to_apply()); VLOG(5) << "Generated new reduction: " << new_reduce->ToString(); return ReplaceWithNewInstruction(reduce, std::move(new_reduce)); } }; absl::StatusOr<bool> ReductionDimensionGrouper::Run( HloModule *module, const absl::flat_hash_set<absl::string_view> &execution_threads) { TF_ASSIGN_OR_RETURN(bool changed, ReduceDimensionGroupVisitor().RunOnModule( module, execution_threads)); return changed; } } }
#include "xla/service/gpu/transforms/reduction_dimension_grouper.h" #include <optional> #include "absl/strings/string_view.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/test.h" namespace xla { namespace { class ReductionDimensionGrouperTest : public HloTestBase { public: void CheckDimensionGrouper(absl::string_view hlo, std::optional<absl::string_view> expected) { RunAndFilecheckHloRewrite(hlo, gpu::ReductionDimensionGrouper{}, expected); } }; TEST_F(ReductionDimensionGrouperTest, ReductionWithGrouping) { const char* hlo = R"( HloModule ReductionWithGrouping add { accum = f32[] parameter(0) op = f32[] parameter(1) ROOT out = f32[] add(accum, op) } ENTRY main { input = f32[100,10,32,3]{3,2,1,0} parameter(0) zero = f32[] constant(0) ROOT out = f32[100,10]{0,1} reduce(input, zero), dimensions={2,3}, to_apply=add } )"; CheckDimensionGrouper(hlo, R"( )"); } TEST_F(ReductionDimensionGrouperTest, ReductionWithGroupingVariadic) { const char* hlo = R"( HloModule ReductionWithGrouping argmax { running_max = f32[] parameter(0) running_max_idx = u32[] parameter(1) current_value = f32[] parameter(2) current_value_idx = u32[] parameter(3) current = (f32[], u32[]) tuple(running_max, running_max_idx) potential = (f32[], u32[]) tuple(current_value, current_value_idx) cmp_code = pred[] compare(current_value, running_max), direction=GT new_max = f32[] select(cmp_code, current_value, running_max) new_idx = u32[] select(cmp_code, current_value_idx, running_max_idx) ROOT out = (f32[], u32[]) tuple(new_max, new_idx) } ENTRY main { input = f32[100,10,32,3]{3,2,1,0} parameter(0) idxs = u32[100,10,32,3]{3,2,1,0} parameter(1) zero = f32[] constant(0) zero_idx = u32[] constant(0) ROOT out = (f32[100,10]{1,0}, u32[100,10]{1,0}) reduce(input, idxs, zero, zero_idx), dimensions={2,3}, to_apply=argmax } )"; CheckDimensionGrouper(hlo, R"( )"); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/reduction_dimension_grouper.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/reduction_dimension_grouper_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
a8876eb4-6b08-4e2c-954c-ccf51ebc946e
cpp
tensorflow/tensorflow
topk_splitter
third_party/xla/xla/service/gpu/transforms/topk_splitter.cc
third_party/xla/xla/service/gpu/transforms/topk_splitter_test.cc
#include "xla/service/gpu/transforms/topk_splitter.h" #include <algorithm> #include <cmath> #include <cstddef> #include <cstdint> #include <string> #include "absl/container/flat_hash_set.h" #include "absl/log/log.h" #include "absl/numeric/bits.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/hlo/ir/dfs_hlo_visitor_with_default.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/hlo_creation_utils.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { constexpr size_t kRequiredAlignment = 1024; constexpr size_t kMaximumBatchSize = 1024; class TopkSplitterVisitor : public DfsHloRewriteVisitor { public: explicit TopkSplitterVisitor(size_t split_threshold) : split_threshold_(split_threshold) {} absl::Status HandleCustomCall(HloInstruction* inst) override { HloCustomCallInstruction* topk = DynCast<HloCustomCallInstruction>(inst); if (topk == nullptr || topk->custom_call_target() != "TopK") { return absl::OkStatus(); } HloComputation* comp = inst->parent(); Shape data_shape = topk->operand(0)->shape(); bool has_batch = data_shape.dimensions_size() == 2; if (has_batch && data_shape.dimensions(0) != 1) { return absl::OkStatus(); } size_t n = data_shape.dimensions(has_batch ? 1 : 0); int64_t k = topk->shape().tuple_shapes(0).dimensions(has_batch ? 1 : 0); if (k > sqrt(n)) { return absl::OkStatus(); } if (n % kRequiredAlignment != 0) { return absl::OkStatus(); } if (n < split_threshold_) return absl::OkStatus(); int new_batch = std::min(absl::bit_floor(n / split_threshold_), kMaximumBatchSize); int new_n = n / new_batch; Shape split_input_shape = ShapeUtil::MakeShape(data_shape.element_type(), {new_batch, new_n}); TF_ASSIGN_OR_RETURN( HloInstruction * reshaped, MakeReshapeHlo(split_input_shape, topk->mutable_operand(0))); Shape batch_topk_shape = ShapeUtil::MakeTupleShape( {ShapeUtil::MakeShape(data_shape.element_type(), {new_batch, k}), ShapeUtil::MakeShape(S32, {new_batch, k})}); HloInstruction* batch_topk = comp->AddInstruction(HloInstruction::CreateCustomCall( batch_topk_shape, {reshaped}, topk->to_apply(), "TopK", "")); TF_ASSIGN_OR_RETURN(HloInstruction * indices, MakeGetTupleElementHlo(batch_topk, 1)); TF_ASSIGN_OR_RETURN(HloInstruction * values, MakeGetTupleElementHlo(batch_topk, 0)); Shape iota_shape = ShapeUtil::MakeShape(S32, {new_batch}); TF_ASSIGN_OR_RETURN( HloInstruction * fix, MakeBinaryHlo( HloOpcode::kMultiply, MakeIotaHlo(comp, iota_shape, 0), MakeBroadcastHlo(MakeR0ConstantHlo<int32_t>(comp, new_n), {}, iota_shape))); TF_ASSIGN_OR_RETURN( indices, MakeBinaryHlo(HloOpcode::kAdd, indices, MakeBroadcastHlo(fix, {0}, indices->shape()))); Shape linear_index_shape = ShapeUtil::MakeShape(S32, {k * new_batch}); Shape linear_shape = ShapeUtil::ChangeElementType( linear_index_shape, data_shape.element_type()); Shape linear_sort_shape = ShapeUtil::MakeTupleShape({linear_shape, linear_index_shape}); HloInstruction* aggregated_sort = comp->AddInstruction(HloInstruction::CreateSort( linear_sort_shape, 0, {*MakeReshapeHlo(linear_shape, values), *MakeReshapeHlo(linear_index_shape, indices)}, topk->to_apply(), true)); auto slice_tuple = [&](HloInstruction* sort, const size_t index) { return *MakeReshapeHlo( topk->shape().tuple_shapes(index), *MakeSliceHlo(*MakeGetTupleElementHlo(sort, index), {0}, {k}, {1})); }; return ReplaceInstruction(topk, comp->AddInstruction(HloInstruction::CreateTuple({ slice_tuple(aggregated_sort, 0), slice_tuple(aggregated_sort, 1), }))); } private: size_t split_threshold_; }; } absl::StatusOr<bool> TopKSplitter::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { return TopkSplitterVisitor(split_threshold_) .RunOnModule(module, execution_threads); } } }
#include "xla/service/gpu/transforms/topk_splitter.h" #include <stdint.h> #include <cstddef> #include <memory> #include <optional> #include <string> #include <utility> #include "absl/strings/string_view.h" #include "absl/strings/substitute.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/service/hlo_dce.h" #include "xla/service/pattern_matcher.h" #include "xla/service/topk_rewriter.h" #include "xla/tests/hlo_test_base.h" #include "xla/tests/verified_hlo_module.h" #include "tsl/platform/status_matchers.h" #include "tsl/platform/statusor.h" #include "tsl/platform/test.h" namespace m = ::xla::match; namespace xla { namespace gpu { namespace { using ::tsl::testing::IsOkAndHolds; using TopkSplitterTest = HloTestBase; constexpr absl::string_view kComparator = R"( %compare { %p.1.lhs.40628 = s32[] parameter(2) %p.1.rhs.40629 = s32[] parameter(3) %constant.40630 = pred[] constant(true) %broadcast.40631 = pred[] broadcast(pred[] %constant.40630), dimensions={} %p.0.lhs.40626 = f32[] parameter(0) %p.0.rhs.40627 = f32[] parameter(1) %compare.40632 = pred[] compare(f32[] %p.0.lhs.40626, f32[] %p.0.rhs.40627), direction=GT, type=TOTALORDER ROOT %select.40633 = pred[] select(pred[] %broadcast.40631, pred[] %compare.40632, pred[] %broadcast.40631) })"; TEST_F(TopkSplitterTest, SplitsTopK) { const std::string hlo_string = absl::Substitute(R"( HloModule module $0 ENTRY cluster { %arg.1 = f32[1,1073741824] parameter(0) ROOT %cc.2 = (f32[1,5], s32[1,5]) custom-call(%arg.1), custom_call_target= "TopK", to_apply=%compare })", kComparator); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); EXPECT_THAT(RunHloPass(TopKSplitter(), module.get()), IsOkAndHolds(true)); auto first_topk = m::CustomCall(m::Reshape(m::Parameter(0))); auto slice_result = [&](auto input, size_t i) { return m::Reshape(m::Slice(m::GetTupleElement(input, i))); }; auto index_correction = m::Broadcast(m::Multiply(m::Iota(), m::Broadcast(m::Constant()))); auto sorted = m::Sort( m::Reshape(m::GetTupleElement(first_topk, 0)), m::Reshape(m::Add(m::GetTupleElement(first_topk, 1), index_correction))); EXPECT_TRUE( Match(module->entry_computation()->root_instruction(), m::Tuple(slice_result(sorted, 0), slice_result(sorted, 1)))); } TEST_F(TopkSplitterTest, SplitsTopKNoBatchDimension) { const std::string hlo_string = absl::Substitute(R"( HloModule module $0 ENTRY cluster { %arg.1 = f32[1073741824] parameter(0) ROOT %cc.2 = (f32[5], s32[5]) custom-call(%arg.1), custom_call_target= "TopK", to_apply=%compare })", kComparator); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); EXPECT_THAT(RunHloPass(TopKSplitter(), module.get()), IsOkAndHolds(true)); auto first_topk = m::CustomCall(m::Reshape(m::Parameter(0))); auto slice_result = [&](auto input, size_t i) { return m::Reshape(m::Slice(m::GetTupleElement(input, i))); }; auto index_correction = m::Broadcast(m::Multiply(m::Iota(), m::Broadcast(m::Constant()))); auto sorted = m::Sort( m::Reshape(m::GetTupleElement(first_topk, 0)), m::Reshape(m::Add(m::GetTupleElement(first_topk, 1), index_correction))); EXPECT_TRUE( Match(module->entry_computation()->root_instruction(), m::Tuple(slice_result(sorted, 0), slice_result(sorted, 1)))); } TEST_F(TopkSplitterTest, SplitFailsUnderThreshold) { const std::string hlo_string = absl::Substitute(R"( HloModule module $0 ENTRY cluster { %arg.1 = f32[1,524288] parameter(0) ROOT %cc.2 = (f32[1,5], s32[1,5]) custom-call(%arg.1), custom_call_target= "TopK", to_apply=%compare })", kComparator); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); EXPECT_THAT( RunHloPass(TopKSplitter(1048576), module.get()), IsOkAndHolds(false)); } TEST_F(TopkSplitterTest, SplitFailsUnaligned) { const std::string hlo_string = absl::Substitute(R"( HloModule module $0 ENTRY cluster { %arg.1 = f32[1,524289] parameter(0) ROOT %cc.2 = (f32[1,5], s32[1,5]) custom-call(%arg.1), custom_call_target= "TopK", to_apply=%compare })", kComparator); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); EXPECT_THAT(RunHloPass(TopKSplitter(1024), module.get()), IsOkAndHolds(false)); } TEST_F(TopkSplitterTest, SplitFailsLargeK) { const std::string hlo_string = absl::Substitute(R"( HloModule module $0 ENTRY cluster { %arg.1 = f32[1,524288] parameter(0) ROOT %cc.2 = (f32[1,1024], s32[1,1024]) custom-call(%arg.1), custom_call_target= "TopK", to_apply=%compare })", kComparator); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); EXPECT_THAT(RunHloPass(TopKSplitter(1024), module.get()), IsOkAndHolds(false)); } TEST_F(TopkSplitterTest, Equivalent) { const std::string hlo_string = absl::Substitute(R"( HloModule module $0 ENTRY cluster { %arg.1 = f32[1,16384] parameter(0) ROOT %cc.2 = (f32[1,5], s32[1,5]) custom-call(%arg.1), custom_call_target= "TopK", to_apply=%compare })", kComparator); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); EXPECT_THAT(TopkDecomposer().Run(module.get()), IsOkAndHolds(true)); auto round_trip = [](HloModule* module) { EXPECT_THAT(TopkRewriter([](const HloSortInstruction*, int64_t) { return true; }).Run(module), IsOkAndHolds(true)); EXPECT_THAT(TopKSplitter(1024).Run(module), IsOkAndHolds(true)); EXPECT_THAT(TopkDecomposer().Run(module), IsOkAndHolds(true)); EXPECT_TRUE(HloDCE().Run(module).status().ok()); }; EXPECT_TRUE(RunAndCompare(std::move(module), std::nullopt, round_trip)); } TEST_F(TopkSplitterTest, StableSorts) { const std::string hlo_string = absl::Substitute(R"( HloModule module $0 ENTRY cluster { %constant.1 = f32[] constant(42) %broadcast.2= f32[1,16384] broadcast(f32[] %constant.1), dimensions={} ROOT %cc.3 = (f32[1,5], s32[1,5]) custom-call(%broadcast.2), custom_call_target= "TopK", to_apply=%compare })", kComparator); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_string)); EXPECT_THAT(TopkDecomposer().Run(module.get()), IsOkAndHolds(true)); auto round_trip = [](HloModule* module) { EXPECT_THAT(TopkRewriter([](const HloSortInstruction*, int64_t) { return true; }).Run(module), IsOkAndHolds(true)); EXPECT_THAT(TopKSplitter(1024).Run(module), IsOkAndHolds(true)); EXPECT_THAT(TopkDecomposer().Run(module), IsOkAndHolds(true)); EXPECT_TRUE(HloDCE().Run(module).status().ok()); }; EXPECT_TRUE(RunAndCompare(std::move(module), std::nullopt, round_trip)); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/topk_splitter.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/topk_splitter_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
834bb1af-cfe3-4222-b0e6-4f6bafc3759b
cpp
tensorflow/tensorflow
move_copy_to_users
third_party/xla/xla/service/gpu/transforms/move_copy_to_users.cc
third_party/xla/xla/service/gpu/transforms/move_copy_to_users_test.cc
#include "xla/service/gpu/transforms/move_copy_to_users.h" #include <vector> #include "absl/container/flat_hash_set.h" #include "absl/status/status.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/hlo/ir/dfs_hlo_visitor_with_default.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/layout.h" #include "xla/service/hlo_creation_utils.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" #include "tsl/platform/statusor.h" namespace xla { namespace { class MoveCopyToUsersVisitor : public DfsHloRewriteVisitor { absl::Status HandlePad(HloInstruction* hlo) override { HloInstruction* operand = hlo->mutable_operand(0); HloInstruction* c = hlo->mutable_operand(1); if (operand->opcode() == HloOpcode::kCopy) { HloInstruction* copied = operand->mutable_operand(0); TF_ASSIGN_OR_RETURN( HloInstruction * earlier_pad, MakePadHlo(copied, c, hlo->padding_config(), &hlo->metadata())); *earlier_pad->mutable_shape()->mutable_layout() = copied->shape().layout(); HloInstruction* later_copy = MakeCopyHlo(earlier_pad, hlo->shape()); TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, later_copy)); } return absl::OkStatus(); } absl::Status HandleSlice(HloInstruction* hlo) override { HloInstruction* operand = hlo->mutable_operand(0); if (operand->opcode() == HloOpcode::kCopy) { HloInstruction* copied = operand->mutable_operand(0); TF_ASSIGN_OR_RETURN( HloInstruction * earlier_slice, MakeSliceHlo(copied, hlo->slice_starts(), hlo->slice_limits(), hlo->slice_strides(), &hlo->metadata())); *earlier_slice->mutable_shape()->mutable_layout() = copied->shape().layout(); HloInstruction* later_copy = MakeCopyHlo(earlier_slice, hlo->shape()); TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, later_copy)); } return absl::OkStatus(); } absl::Status HandleDynamicSlice(HloInstruction* hlo) override { HloInstruction* operand = hlo->mutable_operand(0); if (operand->opcode() == HloOpcode::kCopy) { HloInstruction* copied = operand->mutable_operand(0); TF_ASSIGN_OR_RETURN( HloInstruction * earlier_slice, MakeDynamicSliceHlo( copied, absl::Span<HloInstruction* const>(hlo->operands()).subspan(1), hlo->dynamic_slice_sizes(), &hlo->metadata())); *earlier_slice->mutable_shape()->mutable_layout() = copied->shape().layout(); HloInstruction* later_copy = MakeCopyHlo(earlier_slice, hlo->shape()); TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, later_copy)); } return absl::OkStatus(); } absl::Status HandleReduceWindow(HloInstruction* hlo) override { HloInstruction* operand = hlo->mutable_operand(0); if (operand->opcode() == HloOpcode::kCopy) { HloInstruction* copied = operand->mutable_operand(0); TF_ASSIGN_OR_RETURN( HloInstruction * earlier_reduce_window, MakeReduceWindowHlo(copied, hlo->mutable_operand(1), hlo->window(), hlo->called_computations()[0], &hlo->metadata())); *earlier_reduce_window->mutable_shape()->mutable_layout() = copied->shape().layout(); HloInstruction* later_copy = MakeCopyHlo(earlier_reduce_window, hlo->shape()); TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, later_copy)); } return absl::OkStatus(); } absl::Status HandleReduce(HloInstruction* hlo) override { HloInstruction* operand = hlo->mutable_operand(0); if (operand->opcode() == HloOpcode::kCopy && !hlo->shape().IsTuple()) { HloInstruction* new_reduce = hlo->AddInstruction( hlo->CloneWithNewOperands(hlo->shape(), {operand->mutable_operand(0), hlo->mutable_operand(1)})); TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, new_reduce)); } return absl::OkStatus(); } absl::Status HandleBitcastConvert(HloInstruction* hlo) override { return absl::OkStatus(); } absl::Status HandleElementwiseUnary(HloInstruction* hlo) override { HloInstruction* operand = hlo->mutable_operand(0); if (hlo->opcode() == HloOpcode::kReducePrecision) { return absl::OkStatus(); } if (operand->opcode() == HloOpcode::kCopy) { HloInstruction* copied = operand->mutable_operand(0); TF_ASSIGN_OR_RETURN( HloInstruction * earlier_elementwise, MakeUnaryHlo(hlo->opcode(), copied, &hlo->metadata())); HloInstruction* later_copy = MakeCopyHlo(earlier_elementwise, hlo->shape()); TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, later_copy)); } return absl::OkStatus(); } absl::Status HandleReverse(HloInstruction* hlo) override { HloInstruction* operand = hlo->mutable_operand(0); if (operand->opcode() == HloOpcode::kCopy) { HloInstruction* copied = operand->mutable_operand(0); TF_ASSIGN_OR_RETURN( HloInstruction * earlier_reverse, MakeReverseHlo(copied, hlo->dimensions(), &hlo->metadata())); HloInstruction* later_copy = MakeCopyHlo(earlier_reverse, hlo->shape()); TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, later_copy)); } return absl::OkStatus(); } absl::Status HandleConvert(HloInstruction* hlo) override { HloInstruction* operand = hlo->mutable_operand(0); if (operand->opcode() == HloOpcode::kCopy) { HloInstruction* copied = operand->mutable_operand(0); HloInstruction* earlier_convert = MakeConvertToHlo( copied, hlo->shape().element_type(), &hlo->metadata()); HloInstruction* later_copy = MakeCopyHlo(earlier_convert, hlo->shape()); TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, later_copy)); } return absl::OkStatus(); } absl::Status HandleElementwiseBinary(HloInstruction* hlo) override { HloInstruction* a = hlo->mutable_operand(0); HloInstruction* b = hlo->mutable_operand(1); if (a->opcode() == HloOpcode::kCopy && b->opcode() == HloOpcode::kCopy) { HloInstruction* copied_a = a->mutable_operand(0); HloInstruction* copied_b = b->mutable_operand(0); if (copied_a->shape() == copied_b->shape()) { HloInstruction* earlier_elementwise; if (hlo->opcode() == HloOpcode::kCompare) { TF_ASSIGN_OR_RETURN( earlier_elementwise, MakeCompareHlo(hlo->comparison_direction(), copied_a, copied_b, &hlo->metadata())); } else { TF_ASSIGN_OR_RETURN(earlier_elementwise, MakeBinaryHlo(hlo->opcode(), copied_a, copied_b, &hlo->metadata())); } HloInstruction* later_copy = MakeCopyHlo(earlier_elementwise, hlo->shape()); TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, later_copy)); } } return absl::OkStatus(); } absl::Status HandleConcatenate(HloInstruction* hlo) override { const HloInstruction* first = hlo->operand(0); if (first->opcode() != HloOpcode::kCopy) { return absl::OkStatus(); } const HloInstruction* inner_op = first->operand(0); const Layout& inner_op_layout = inner_op->shape().layout(); std::vector<HloInstruction*> new_operands; new_operands.reserve(hlo->operand_count()); for (HloInstruction* op : hlo->mutable_operands()) { if (op->opcode() != HloOpcode::kCopy || op->operand(0)->shape().layout() != inner_op_layout) { VLOG(3) << "Mismatch between " << op->ToString() << " and expected op layout " << inner_op_layout.ToString(); return absl::OkStatus(); } new_operands.push_back(op->mutable_operand(0)); } TF_ASSIGN_OR_RETURN( HloInstruction * new_concat, MakeConcatHlo(new_operands, hlo->concatenate_dimension())); *new_concat->mutable_shape()->mutable_layout() = inner_op_layout; HloInstruction* new_copy = MakeCopyHlo(new_concat, hlo->shape()); TF_RETURN_IF_ERROR(ReplaceInstruction(hlo, new_copy)); return absl::OkStatus(); } }; } absl::StatusOr<bool> MoveCopyToUsers::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { return MoveCopyToUsersVisitor{}.RunOnModule(module, execution_threads); } }
#include "xla/service/gpu/transforms/move_copy_to_users.h" #include <optional> #include "absl/strings/string_view.h" #include "xla/service/layout_assignment.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/test.h" namespace xla { namespace { class MoveCopyToUsersTest : public HloTestBase { public: MoveCopyToUsersTest() : HloTestBase(true, true, LayoutAssignment::InstructionCanChangeLayout) {} void CheckMoveCopyToUsers(absl::string_view hlo, std::optional<absl::string_view> expected) { RunAndFilecheckHloRewrite(hlo, MoveCopyToUsers{}, expected); } }; TEST_F(MoveCopyToUsersTest, Pad) { const char* hlo = R"( HloModule module ENTRY main { input = s8[1,17,9,9]{3,1,2,0} parameter(0) copy = s8[1,17,9,9]{1,3,2,0} copy(input) constant = s8[] constant(0) ROOT pad = s8[1,32,9,9]{1,3,2,0} pad(copy, constant), padding=0_0x0_15x0_0x0_0 } )"; CheckMoveCopyToUsers(hlo, R"( )"); } TEST_F(MoveCopyToUsersTest, Unary) { const char* hlo = R"( HloModule module ENTRY main { input = f32[1,17,9,9]{3,2,1,0} parameter(0) copy = f32[1,17,9,9]{1,3,2,0} copy(input) ROOT pad = f32[1,17,9,9]{1,3,2,0} sqrt(copy) } )"; CheckMoveCopyToUsers(hlo, R"( )"); } TEST_F(MoveCopyToUsersTest, Reverse) { const char* hlo = R"( HloModule module ENTRY main { input = f32[1,17,9,9]{3,2,1,0} parameter(0) copy = f32[1,17,9,9]{1,3,2,0} copy(input) ROOT pad = f32[1,17,9,9]{1,3,2,0} reverse(copy), dimensions={1,2} } )"; CheckMoveCopyToUsers(hlo, R"( )"); } TEST_F(MoveCopyToUsersTest, Convert) { const char* hlo = R"( HloModule module ENTRY main { input = f32[1,17,9,9]{3,2,1,0} parameter(0) copy = f32[1,17,9,9]{1,3,2,0} copy(input) ROOT converted = f16[1,17,9,9]{1,3,2,0} convert(copy) } )"; CheckMoveCopyToUsers(hlo, R"( )"); } TEST_F(MoveCopyToUsersTest, Slice) { const char* hlo = R"( HloModule module ENTRY main { input = f32[1,17,9,9]{3,2,1,0} parameter(0) copy = f32[1,17,9,9]{1,3,2,0} copy(input) ROOT slice = f32[1,4,6,6]{1,3,2,0} slice(copy), slice={[0:1],[0:4],[0:6],[0:6]} } )"; CheckMoveCopyToUsers(hlo, R"( )"); } TEST_F(MoveCopyToUsersTest, DynamicSlice) { const char* hlo = R"( HloModule module ENTRY main { input = f32[1,17,9,9]{3,2,1,0} parameter(0) copy = f32[1,17,9,9]{1,3,2,0} copy(input) p0 = s32[] parameter(1) p1 = s32[] parameter(2) p2 = s32[] parameter(3) p3 = s32[] parameter(4) ROOT ds = f32[1,4,6,6]{1,3,2,0} dynamic-slice(copy, p0, p1, p2, p3), dynamic_slice_sizes={1,4,6,6} } )"; CheckMoveCopyToUsers(hlo, R"( )"); } TEST_F(MoveCopyToUsersTest, ReduceWindow) { const char* hlo = R"( HloModule R2Window mul { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT mul = f32[] multiply(lhs, rhs) } ENTRY R2Window { operand = f32[256,384]{1,0} parameter(0) c = f32[256,384]{0,1} copy(operand) constant = f32[] constant(1) ROOT reduce-window = f32[256,384]{0,1} reduce-window(c, constant), window={size=2x3 pad=0_1x1_1}, to_apply=mul } )"; CheckMoveCopyToUsers(hlo, R"( )"); } TEST_F(MoveCopyToUsersTest, Reduce) { const char* hlo = R"( HloModule R2 mul { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT mul = f32[] multiply(lhs, rhs) } ENTRY R2 { operand = f32[256,384,10]{2,1,0} parameter(0) c = f32[256,384,10]{0,1,2} copy(operand) constant = f32[] constant(1) ROOT reduce = f32[384,10]{0,1} reduce(c, constant), dimensions={0}, to_apply=mul } )"; CheckMoveCopyToUsers(hlo, R"( )"); } TEST_F(MoveCopyToUsersTest, Binary) { const char* hlo = R"( HloModule module ENTRY main { input = f32[1,17,9,9]{3,2,1,0} parameter(0) input2 = f32[1,17,9,9]{3,2,1,0} parameter(1) copy = f32[1,17,9,9]{1,3,2,0} copy(input) copy2 = f32[1,17,9,9]{1,3,2,0} copy(input2) ROOT add = f32[1,17,9,9]{1,3,2,0} add(copy, copy2) } )"; CheckMoveCopyToUsers(hlo, R"( )"); } TEST_F(MoveCopyToUsersTest, BinaryDifferentLayoutNoChange) { const char* hlo = R"( HloModule module ENTRY main { input = f32[1,17,9,9]{3,2,0,1} parameter(0) input2 = f32[1,17,9,9]{3,2,1,0} parameter(1) copy = f32[1,17,9,9]{1,3,2,0} copy(input) copy2 = f32[1,17,9,9]{1,3,2,0} copy(input2) ROOT add = f32[1,17,9,9]{1,3,2,0} add(copy, copy2) } )"; CheckMoveCopyToUsers(hlo, std::nullopt); } TEST_F(MoveCopyToUsersTest, Concat) { const char* hlo = R"( HloModule module ENTRY main { input = f32[1,17,9,9]{3,2,1,0} parameter(0) input2 = f32[5,17,9,9]{3,2,1,0} parameter(1) copy = f32[1,17,9,9]{1,3,2,0} copy(input) copy2 = f32[5,17,9,9]{1,3,2,0} copy(input2) ROOT add = f32[6,17,9,9]{1,3,2,0} concatenate(copy, copy2), dimensions={0} } )"; CheckMoveCopyToUsers(hlo, R"( )"); } TEST_F(MoveCopyToUsersTest, ConcatDifferentLayoutNoChange) { const char* hlo = R"( HloModule module ENTRY main { input = f32[1,17,9,9]{3,2,0,1} parameter(0) input2 = f32[1,17,9,9]{3,2,1,0} parameter(1) copy = f32[1,17,9,9]{1,3,2,0} copy(input) copy2 = f32[1,17,9,9]{1,3,2,0} copy(input2) ROOT add = f32[2,17,9,9]{1,3,2,0} concatenate(copy, copy2), dimensions={0} } )"; CheckMoveCopyToUsers(hlo, std::nullopt); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/move_copy_to_users.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/move_copy_to_users_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
054fd978-6e2f-44bb-8c34-d24c33a0dccc
cpp
tensorflow/tensorflow
gemm_broadcast_folding_rewriter
third_party/xla/xla/service/gpu/transforms/gemm_broadcast_folding_rewriter.cc
third_party/xla/xla/service/gpu/transforms/gemm_broadcast_folding_rewriter_test.cc
#include "xla/service/gpu/transforms/gemm_broadcast_folding_rewriter.h" #include <cstdint> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/dfs_hlo_visitor_with_default.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/cublas_cudnn.h" #include "xla/service/pattern_matcher.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace m = match; class GemmBroadcastFoldingVisitor : public DfsHloRewriteVisitor { public: absl::Status HandleCustomCall(HloInstruction *instr) override { HloInstruction *existing_gemm; HloInstruction *bcast; if (Match(instr, m::CustomCall(&existing_gemm, {kGemmCallTarget, kCublasLtMatmulCallTarget}) .WithOperand(0, m::Broadcast(&bcast, m::Op()))) || (Match(instr, m::CustomCall(&existing_gemm, {kGemmCallTarget, kCublasLtMatmulCallTarget}) .WithOperand(1, m::Broadcast(&bcast, m::Op()))))) { TF_ASSIGN_OR_RETURN(auto gpu_config, existing_gemm->backend_config<GpuBackendConfig>()); GemmBackendConfig &config = *gpu_config.mutable_gemm_backend_config(); DotDimensionNumbers *dim_nums = config.mutable_dot_dimension_numbers(); int bcast_operand_index = instr->operand_index(bcast); int num_bcast_dims = (bcast->shape().dimensions_size() - bcast->operand(0)->shape().dimensions_size()); int num_batch_dims = dim_nums->lhs_batch_dimensions_size(); const tsl::protobuf::RepeatedField<int64_t> &batch_dimensions = (bcast_operand_index == 1) ? dim_nums->rhs_batch_dimensions() : dim_nums->lhs_batch_dimensions(); for (int64_t bcast_dim : bcast->dimensions()) { if (bcast_dim < num_bcast_dims) { return absl::OkStatus(); } if (absl::c_linear_search(batch_dimensions, bcast_dim)) { return absl::OkStatus(); } } CHECK_GT(num_bcast_dims, 0); if (num_bcast_dims != num_batch_dims) { return absl::OkStatus(); } if (bcast_operand_index == 1) { CHECK_EQ(dim_nums->rhs_contracting_dimensions_size(), 1); dim_nums->set_rhs_contracting_dimensions( 0, dim_nums->rhs_contracting_dimensions(0) - num_batch_dims); dim_nums->clear_rhs_batch_dimensions(); } else { CHECK_EQ(dim_nums->lhs_contracting_dimensions_size(), 1); dim_nums->set_lhs_contracting_dimensions( 0, dim_nums->lhs_contracting_dimensions(0) - num_batch_dims); dim_nums->clear_lhs_batch_dimensions(); } TF_RETURN_IF_ERROR(existing_gemm->ReplaceOperandWithDifferentShape( bcast_operand_index, bcast->mutable_operand(0))); TF_RETURN_IF_ERROR(existing_gemm->set_backend_config(gpu_config)); MarkAsChanged(); } return absl::OkStatus(); } }; static absl::StatusOr<bool> RunOnComputation(HloComputation *computation) { GemmBroadcastFoldingVisitor visitor; TF_RETURN_IF_ERROR(computation->Accept(&visitor)); return visitor.changed(); } absl::StatusOr<bool> GemmBroadcastFoldingRewriter::Run( HloModule *module, const absl::flat_hash_set<absl::string_view> &execution_threads) { bool changed = false; for (HloComputation *computation : module->MakeNonfusionComputations(execution_threads)) { TF_ASSIGN_OR_RETURN(bool result, RunOnComputation(computation)); changed |= result; } return changed; } } }
#include "xla/service/gpu/transforms/gemm_broadcast_folding_rewriter.h" #include <memory> #include "xla/error_spec.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/service/gpu/tests/gpu_codegen_test.h" #include "xla/service/gpu/transforms/gemm_rewriter.h" #include "xla/stream_executor/semantic_version.h" #include "tsl/platform/statusor.h" #include "tsl/platform/test.h" namespace xla { namespace gpu { namespace { class GemmBroadcastFoldingRewriteTest : public GpuCodegenTest { protected: const auto& GpuComputeComp() { return backend() .default_stream_executor() ->GetDeviceDescription() .gpu_compute_capability(); } DebugOptions GetDebugOptionsForTest() override { DebugOptions debug_options = GpuCodegenTest::GetDebugOptionsForTest(); debug_options.set_xla_gpu_enable_triton_gemm(false); debug_options.set_xla_gpu_gemm_rewrite_size_threshold(0); return debug_options; } }; TEST_F(GemmBroadcastFoldingRewriteTest, BroadcastedStridedRewriteRhs) { const char* hlo_text = R"( HloModule BroadcastedInput ENTRY AddDotsFunc { x = f32[3,2,2]{2,1,0} parameter(0) y = f32[2,2]{1,0} parameter(1) y_broadcast = f32[3,2,2]{2,1,0} broadcast(y), dimensions={1,2} ROOT dot_a = f32[3,2,2]{2,1,0} dot(x, y_broadcast), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1} } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[3,2,2], {{.*}}: f32[2,2]) -> f32[3,2,2] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[3,2,2]{2,1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[2,2]{1,0} parameter(1) ; CHECK-NEXT: [[GEMM:%[^ ]+]] = {{.*}} custom-call([[P0]], [[P1]]), ; CHECK: custom_call_target="__cublas${{(lt\$matmul|gemm)}}", ; CHECK: backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["2"] ; CHECK-DAG: "rhs_contracting_dimensions":["0"] ; CHECK-DAG: "lhs_batch_dimensions":["0"] ; CHECK-DAG: "rhs_batch_dimensions":[] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_F(GemmBroadcastFoldingRewriteTest, BroadcastedStridedRewriteLhs) { const char* hlo_text = R"( HloModule BroadcastedInput ENTRY AddDotsFunc { x = f32[2,2]{1,0} parameter(0) y = f32[3,2,2]{2,1,0} parameter(1) x_broadcast = f32[3,2,2]{2,1,0} broadcast(x), dimensions={1,2} ROOT dot_a = f32[3,2,2]{2,1,0} dot(x_broadcast, y), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1} } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); MatchOptimizedHlo(hlo_text, R"( ; CHECK-LABEL: ENTRY %AddDotsFunc ({{.*}}: f32[2,2], {{.*}}: f32[3,2,2]) -> f32[3,2,2] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f32[2,2]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = f32[3,2,2]{2,1,0} parameter(1) ; CHECK-NEXT: [[GEMM:%[^ ]+]] = {{.*}} custom-call([[P0]], [[P1]]), ; CHECK : custom_call_target="__cublas${{(lt\$matmul|gemm)}}", ; CHECK : backend_config={ ; CHECK-DAG: "alpha_real":1 ; CHECK-DAG: "alpha_imag":0 ; CHECK-DAG: "beta":0 ; CHECK-DAG: "dot_dimension_numbers":{ ; CHECK-DAG: "lhs_contracting_dimensions":["1"] ; CHECK-DAG: "rhs_contracting_dimensions":["1"] ; CHECK-DAG: "lhs_batch_dimensions":[] ; CHECK-DAG: "rhs_batch_dimensions":["0"] ; CHECK-DAG: } ; CHECK-DAG: "precision_config":{ ; CHECK-DAG: "operand_precision":["DEFAULT","DEFAULT"] ; CHECK-DAG: } ; CHECK-DAG: "epilogue":"DEFAULT" ; CHECK: } )"); } TEST_F(GemmBroadcastFoldingRewriteTest, BroadcastedStridedRewriteRhsPassChanged) { const char* hlo_text = R"( HloModule BroadcastedInput ENTRY AddDotsFunc { x = f32[3,2,2]{2,1,0} parameter(0) y = f32[2,2]{1,0} parameter(1) y_broadcast = f32[3,2,2]{2,1,0} broadcast(y), dimensions={1,2} ROOT dot_a = f32[3,2,2]{2,1,0} dot(x, y_broadcast), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); GemmRewriter gemm_rewriter( GpuComputeComp(), stream_executor::SemanticVersion{12, 4, 0}); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&gemm_rewriter, module.get())); EXPECT_TRUE(changed); GemmBroadcastFoldingRewriter pass; TF_ASSERT_OK_AND_ASSIGN(changed, this->RunHloPass(&pass, module.get())); EXPECT_TRUE(changed); } TEST_F(GemmBroadcastFoldingRewriteTest, BroadcastedStridedRewriteLhsPassChanged) { const char* hlo_text = R"( HloModule BroadcastedInput ENTRY AddDotsFunc { x = f32[2,2]{1,0} parameter(0) y = f32[3,2,2]{2,1,0} parameter(1) x_broadcast = f32[3,2,2]{2,1,0} broadcast(x), dimensions={1,2} ROOT dot_a = f32[3,2,2]{2,1,0} dot(x_broadcast, y), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); GemmRewriter gemm_rewriter( GpuComputeComp(), stream_executor::SemanticVersion{12, 4, 0}); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&gemm_rewriter, module.get())); EXPECT_TRUE(changed); GemmBroadcastFoldingRewriter pass; TF_ASSERT_OK_AND_ASSIGN(changed, this->RunHloPass(&pass, module.get())); EXPECT_TRUE(changed); } TEST_F(GemmBroadcastFoldingRewriteTest, LHSBatchDimNonZero) { const char* hlo_text = R"( HloModule LHSBatchDimNonZero ENTRY %LHSBatchDimNonZero (Arg_1: f32[4,3], Arg_2: f32[4,7,3]) -> f32[4,7,7] { %Arg_1 = f32[4,3]{1,0} parameter(0) %Arg_2 = f32[4,7,3]{2,1,0} parameter(1) %broadcast.22 = f32[7,4,3]{2,1,0} broadcast(f32[4,3]{1,0} %Arg_1), dimensions={1,2} ROOT %dot.24 = f32[4,7,7]{2,1,0} dot(f32[7,4,3]{2,1,0} %broadcast.22, f32[4,7,3]{2,1,0} %Arg_2), lhs_batch_dims={1}, lhs_contracting_dims={2}, rhs_batch_dims={0}, rhs_contracting_dims={2} } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); GemmRewriter gemm_rewriter( GpuComputeComp(), stream_executor::SemanticVersion{12, 4, 0}); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&gemm_rewriter, module.get())); EXPECT_TRUE(changed); GemmBroadcastFoldingRewriter pass; TF_ASSERT_OK_AND_ASSIGN(changed, this->RunHloPass(&pass, module.get())); EXPECT_FALSE(changed); } TEST_F(GemmBroadcastFoldingRewriteTest, RHSBatchDimNonZero) { const char* hlo_text = R"( HloModule RHSBatchDimNonZero ENTRY %RHSBatchDimNonZero (Arg_1: f32[4,3], Arg_2: f32[4,7,3]) -> f32[4,7,7] { %Arg_1 = f32[4,3]{1,0} parameter(0) %Arg_2 = f32[4,7,3]{2,1,0} parameter(1) %broadcast.22 = f32[7,4,3]{2,1,0} broadcast(f32[4,3]{1,0} %Arg_1), dimensions={1,2} ROOT %dot.24 = f32[4,7,7]{2,1,0} dot(f32[4,7,3]{2,1,0} %Arg_2, f32[7,4,3]{2,1,0} %broadcast.22), lhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_batch_dims={1}, rhs_contracting_dims={2} } )"; EXPECT_TRUE(RunAndCompare(hlo_text, ErrorSpec{1e-5, 1e-5})); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_text)); GemmRewriter gemm_rewriter( GpuComputeComp(), stream_executor::SemanticVersion{12, 4, 0}); TF_ASSERT_OK_AND_ASSIGN(bool changed, this->RunHloPass(&gemm_rewriter, module.get())); EXPECT_TRUE(changed); GemmBroadcastFoldingRewriter pass; TF_ASSERT_OK_AND_ASSIGN(changed, this->RunHloPass(&pass, module.get())); EXPECT_FALSE(changed); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/gemm_broadcast_folding_rewriter.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/gemm_broadcast_folding_rewriter_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
9cb97eb8-0ab8-44ff-92e5-3ba9b6eeb3a6
cpp
tensorflow/tensorflow
cudnn_vectorize_convolutions
third_party/xla/xla/service/gpu/transforms/cudnn_vectorize_convolutions.cc
third_party/xla/xla/service/gpu/transforms/cudnn_vectorize_convolutions_test.cc
#include "xla/service/gpu/transforms/cudnn_vectorize_convolutions.h" #include <cstdint> #include <optional> #include <string> #include <tuple> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_set.h" #include "absl/container/inlined_vector.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "xla/client/xla_builder.h" #include "xla/client/xla_computation.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_clone_context.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/primitive_util.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/cublas_cudnn.h" #include "xla/service/gpu/cudnn_support_utils.h" #include "xla/service/gpu/stream_executor_util.h" #include "xla/service/hlo_module_config.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/stream_executor/device_description.h" #include "xla/stream_executor/dnn.h" #include "xla/util.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { static std::vector<HloCustomCallInstruction*> GetRelevantConvs( HloComputation* comp) { std::vector<HloCustomCallInstruction*> convs; for (HloInstruction* instr : comp->instructions()) { if (instr->opcode() != HloOpcode::kCustomCall || (instr->custom_call_target() != kCudnnConvForwardCallTarget && instr->custom_call_target() != kCudnnConvBiasActivationForwardCallTarget) || instr->operand_count() < 2) { continue; } PrimitiveType input_ty = instr->operand(0)->shape().element_type(); PrimitiveType output_ty = instr->shape().tuple_shapes(0).element_type(); if (input_ty == output_ty && (input_ty == S8 || input_ty == U8)) { convs.push_back(Cast<HloCustomCallInstruction>(instr)); } } return convs; } static absl::StatusOr<HloComputation*> BuilderToHloComputation( XlaBuilder& b, XlaOp root, HloComputation* sibling_computation) { TF_ASSIGN_OR_RETURN(XlaComputation comp, b.Build(root)); TF_ASSIGN_OR_RETURN(ProgramShape program_shape, comp.GetProgramShape()); HloModuleConfig config(program_shape); TF_ASSIGN_OR_RETURN(auto new_module, HloModule::CreateFromProto(comp.proto(), config)); HloModule* dest_module = sibling_computation->parent(); HloCloneContext context(dest_module); return dest_module->DeepCloneComputation(new_module->entry_computation(), &context); } static XlaOp SplitAtDim(XlaOp instr, int64_t dim, int64_t vect_size) { XlaBuilder& b = *instr.builder(); Shape shape = b.GetShape(instr).value(); DimensionVector new_dims(shape.dimensions().begin(), shape.dimensions().end()); CHECK_EQ(new_dims[dim] % vect_size, 0); new_dims[dim] /= vect_size; new_dims.insert(new_dims.begin() + dim + 1, vect_size); return Reshape(instr, new_dims); } static Shape SplitShapeAtDim(Shape shape, int64_t dim, int64_t vect_size) { DimensionVector new_dims(shape.dimensions().begin(), shape.dimensions().end()); CHECK_EQ(new_dims[dim] % vect_size, 0); new_dims[dim] /= vect_size; new_dims.insert(new_dims.begin() + dim + 1, vect_size); return ShapeUtil::MakeShape(shape.element_type(), new_dims); } static XlaOp MoveDim(XlaOp instr, int64_t src, int64_t dst) { XlaBuilder& b = *instr.builder(); int64_t rank = b.GetShape(instr)->dimensions_size(); DimensionVector idxs(rank); absl::c_iota(idxs, 0); if (src < dst) { idxs.insert(idxs.begin() + dst, src); idxs.erase(idxs.begin() + src); } else { idxs.erase(idxs.begin() + src); idxs.insert(idxs.begin() + dst, src); } return Transpose(instr, idxs); } static XlaOp RevectorizeInstr(XlaOp instr, int64_t dim, int64_t vect_dim, int64_t vect_size) { XlaBuilder& b = *instr.builder(); Shape shape = b.GetShape(instr).value(); auto size = [&](int64_t d) { return shape.dimensions(d); }; CHECK_LE(size(vect_dim), vect_size); CHECK_EQ(vect_size % size(vect_dim), 0); int64_t split_factor = vect_size / size(vect_dim); CHECK_EQ(size(dim) % split_factor, 0); instr = SplitAtDim(instr, dim, split_factor); if (vect_dim > dim) { vect_dim++; } instr = MoveDim(instr, dim + 1, vect_dim); if (vect_dim > dim) { vect_dim--; } return Collapse(instr, {vect_dim, vect_dim + 1}); } static XlaOp UnrevectorizeInstr(XlaOp instr, int64_t dim, int64_t vect_dim, int64_t orig_vect_size) { XlaBuilder& b = *instr.builder(); Shape shape = b.GetShape(instr).value(); auto size = [&](int64_t d) { return shape.dimensions(d); }; CHECK_GE(size(vect_dim), orig_vect_size); CHECK_EQ(size(vect_dim) % orig_vect_size, 0); instr = SplitAtDim(instr, vect_dim, orig_vect_size); if (dim > vect_dim) { dim++; } instr = MoveDim(instr, vect_dim, dim + 1); if (dim > vect_dim) { dim--; } return Collapse(instr, {dim, dim + 1}); } static ConvolutionDimensionNumbers VectorizeDnums( ConvolutionDimensionNumbers dnums, bool reordered_filter) { int64_t input_vect_dim = dnums.input_feature_dimension(); if (dnums.input_batch_dimension() > input_vect_dim) { dnums.set_input_batch_dimension(dnums.input_batch_dimension() + 1); } for (int64_t& d : *dnums.mutable_input_spatial_dimensions()) { if (d > input_vect_dim) { ++d; } } if (!reordered_filter) { int64_t kernel_vect_dim = dnums.kernel_input_feature_dimension(); if (dnums.kernel_output_feature_dimension() > kernel_vect_dim) { dnums.set_kernel_output_feature_dimension( dnums.kernel_output_feature_dimension() + 1); } for (int64_t& d : *dnums.mutable_kernel_spatial_dimensions()) { if (d > kernel_vect_dim) { ++d; } } } int64_t output_vect_dim = dnums.output_feature_dimension(); if (dnums.output_batch_dimension() > output_vect_dim) { dnums.set_output_batch_dimension(dnums.output_batch_dimension() + 1); } for (int64_t& d : *dnums.mutable_output_spatial_dimensions()) { if (d > output_vect_dim) { ++d; } } return dnums; } absl::Status ReorderInt8NchwVect(HloCustomCallInstruction* conv, XlaOp* operands) { bool has_bias = conv->operand_count() > 2; VLOG(1) << "Reordering filter" << (has_bias ? " and bias" : "") << " (replacement for cudnnReorderFilterAndBias)"; auto builder = operands->builder(); ConvolutionDimensionNumbers dnums = conv->convolution_dimension_numbers(); TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config, conv->backend_config<GpuBackendConfig>()); CudnnConvBackendConfig& config = *gpu_config.mutable_cudnn_conv_backend_config(); config.set_reordered_int8_nchw_vect(true); TF_RETURN_IF_ERROR(conv->set_backend_config(gpu_config)); TF_ASSIGN_OR_RETURN(Shape filter_shape, builder->GetShape(operands[1])); TF_ASSIGN_OR_RETURN(auto reorder, CudnnInferTransposeForFilterReordering( filter_shape, dnums)); XlaOp reshape = Reshape(reorder.transpose_shape, operands[1]); XlaOp transpose = Transpose(reshape, reorder.permutation); operands[1] = Reshape(reorder.result_shape, transpose); dnums.set_kernel_output_feature_dimension(0); dnums.set_kernel_input_feature_dimension(1); dnums.set_kernel_spatial_dimensions(0, 2); dnums.set_kernel_spatial_dimensions(1, 3); conv->set_convolution_dimension_numbers(dnums); if (has_bias) { TF_ASSIGN_OR_RETURN(Shape bias_shape, builder->GetShape(operands[2])); TF_ASSIGN_OR_RETURN(reorder, CudnnInferTransposeForBiasReordering(bias_shape)); reshape = Reshape(reorder.transpose_shape, operands[2]); transpose = Transpose(reshape, reorder.permutation); operands[2] = Reshape(reorder.result_shape, transpose); } return absl::OkStatus(); } static absl::StatusOr<bool> TryRevectorizeConv( const se::CudaComputeCapability& compute_capability, const se::dnn::VersionInfo& cudnn_version, HloCustomCallInstruction* conv, int vect_size) { const Shape& input_shape = conv->operand(0)->shape(); const Shape& kernel_shape = conv->operand(1)->shape(); const Shape& output_shape = conv->shape().tuple_shapes(0); const ConvolutionDimensionNumbers* dnums = &conv->convolution_dimension_numbers(); std::optional<int64_t> input_vect_dim; std::optional<int64_t> kernel_vect_dim; std::optional<int64_t> output_vect_dim; std::tie(input_vect_dim, kernel_vect_dim, output_vect_dim) = FindVectorizedFeatureDims(*dnums, input_shape, kernel_shape, output_shape); if (!input_vect_dim.has_value() || !kernel_vect_dim.has_value() || !output_vect_dim.has_value()) { return false; } int64_t input_feat_size = input_shape.dimensions(dnums->input_feature_dimension()); int64_t output_feat_size = output_shape.dimensions(dnums->output_feature_dimension()); int64_t input_vect_size = input_shape.dimensions(*input_vect_dim); int64_t output_vect_size = output_shape.dimensions(*output_vect_dim); if (vect_size % input_vect_size != 0 || vect_size % output_vect_size != 0 || input_feat_size % (vect_size / input_vect_size) != 0 || output_feat_size % (vect_size / output_vect_size) != 0) { return false; } if (primitive_util::IsIntegralType(input_shape.element_type())) { TF_ASSIGN_OR_RETURN(bool supported_target_vectorization, CudnnSupportsOptimizedIntegerConvolution( compute_capability, *conv, vect_size)); if (!supported_target_vectorization) { VLOG(3) << "Skipping re-vectorization of conv to vector size: " << vect_size << ": " << conv->ToString(); return false; } } VLOG(1) << "Re-vectorizing conv channels from " << input_shape.dimensions(*input_vect_dim) << " to " << vect_size << ": " << conv->ToString(); XlaBuilder b(absl::StrCat(conv->name(), ".revectorized")); b.SetOpMetadata(conv->metadata()); XlaOp filter = Parameter(&b, 1, conv->operand(1)->shape(), "filter"); absl::InlinedVector<XlaOp, 4> new_operands = { RevectorizeInstr(Parameter(&b, 0, conv->operand(0)->shape(), "input"), dnums->input_feature_dimension(), *input_vect_dim, vect_size), RevectorizeInstr(filter, dnums->kernel_input_feature_dimension(), *kernel_vect_dim, vect_size), }; if (conv->operand_count() > 2) { new_operands.push_back(Parameter(&b, 2, conv->operand(2)->shape(), "bias")); } if (conv->operand_count() > 3) { new_operands.push_back(RevectorizeInstr( Parameter(&b, 3, conv->operand(3)->shape(), "side_input"), dnums->input_feature_dimension(), *input_vect_dim, vect_size)); } if (conv->operand_count() > 4) { return InvalidArgument( "Don't understand a conv with more than 4 arguments: %s", conv->ToString()); } const auto& debug_options = conv->GetModule()->config().debug_options(); bool use_reordering = input_shape.element_type() == xla::S8 && vect_size == 32 && debug_options.xla_gpu_enable_cudnn_int8x32_convolution_reordering() && cudnn_version >= se::dnn::VersionInfo{8, 3, 0}; if (use_reordering) { int64_t kernel_vect_size = kernel_shape.dimensions(*kernel_vect_dim); if (kernel_vect_size == 4 || kernel_vect_size == 32) { new_operands[1] = filter; } TF_RETURN_IF_ERROR(ReorderInt8NchwVect(conv, new_operands.data())); dnums = &conv->convolution_dimension_numbers(); } DimensionVector new_output_dims(output_shape.dimensions().begin(), output_shape.dimensions().end()); new_output_dims[dnums->output_feature_dimension()] /= (vect_size / output_vect_size); new_output_dims[*output_vect_dim] = vect_size; XlaOp new_conv = CustomCallWithConvDnums( &b, conv->custom_call_target(), new_operands, ShapeUtil::MakeTupleShape( {ShapeUtil::MakeShape(output_shape.element_type(), new_output_dims), ShapeUtil::MakeShape(U8, {0})}), {}, conv->raw_backend_config_string(), false, {}, nullptr, conv->window(), *dnums); XlaOp new_conv_result = GetTupleElement(new_conv, 0); XlaOp new_conv_scratch = GetTupleElement(new_conv, 1); XlaOp new_conv_result_unrevectorized = UnrevectorizeInstr( new_conv_result, dnums->output_feature_dimension(), *output_vect_dim, output_shape.dimensions(*output_vect_dim)); TF_ASSIGN_OR_RETURN( HloComputation * new_conv_comp, BuilderToHloComputation( b, Tuple(&b, {new_conv_result_unrevectorized, new_conv_scratch}), conv->parent())); auto new_conv_comp_instrs = new_conv_comp->instructions(); auto new_conv_it = absl::c_find_if(new_conv_comp_instrs, [](HloInstruction* instr) { return instr->opcode() == HloOpcode::kCustomCall; }); if (new_conv_it != new_conv_comp_instrs.end()) { new_conv_comp->parent()->SetAndUniquifyInstrName(*new_conv_it, conv->name()); } VLOG(1) << "Re-vectorized conv to " << new_conv_comp->ToString(); TF_RETURN_IF_ERROR(conv->parent()->ReplaceWithNewInstruction( conv, HloInstruction::CreateCall(conv->shape(), conv->operands(), new_conv_comp))); return true; } static absl::StatusOr<bool> TryVectorizeConv( const se::CudaComputeCapability& compute_capability, const se::dnn::VersionInfo& cudnn_version, HloCustomCallInstruction* conv, int64_t vect_size) { const Shape& input_shape = conv->operand(0)->shape(); const Shape& output_shape = conv->shape().tuple_shapes(0); const ConvolutionDimensionNumbers* dnums = &conv->convolution_dimension_numbers(); int64_t in_channels = input_shape.dimensions(dnums->input_feature_dimension()); int64_t out_channels = output_shape.dimensions(dnums->output_feature_dimension()); if (in_channels % vect_size != 0 || out_channels % vect_size != 0) { return false; } if (input_shape.dimensions_size() > 2 + dnums->input_spatial_dimensions_size()) { return false; } if (primitive_util::IsIntegralType(input_shape.element_type())) { TF_ASSIGN_OR_RETURN(bool supported_target_vectorization, CudnnSupportsOptimizedIntegerConvolution( compute_capability, *conv, vect_size)); if (!supported_target_vectorization) { VLOG(3) << "Skipping vectorization of conv to vector size: " << vect_size << ": " << conv->ToString(); return false; } } VLOG(1) << "Vectorizing conv channels by " << vect_size << ": " << conv->ToString(); XlaBuilder b(absl::StrCat(conv->name(), ".revectorized")); b.SetOpMetadata(conv->metadata()); XlaOp filter = Parameter(&b, 1, conv->operand(1)->shape(), "filter"); absl::InlinedVector<XlaOp, 4> new_operands = { SplitAtDim(Parameter(&b, 0, conv->operand(0)->shape(), "input"), dnums->input_feature_dimension(), vect_size), SplitAtDim(filter, dnums->kernel_input_feature_dimension(), vect_size), }; if (conv->operand_count() > 2) { new_operands.push_back(Parameter(&b, 2, conv->operand(2)->shape(), "bias")); } if (conv->operand_count() > 3) { new_operands.push_back( SplitAtDim(Parameter(&b, 3, conv->operand(3)->shape(), "side_input"), dnums->output_feature_dimension(), vect_size)); } if (conv->operand_count() > 4) { return InvalidArgument( "Don't understand a conv with more than 4 arguments: %s", conv->ToString()); } const auto& debug_options = conv->GetModule()->config().debug_options(); bool use_reordering = input_shape.element_type() == xla::S8 && vect_size == 32 && debug_options.xla_gpu_enable_cudnn_int8x32_convolution_reordering() && cudnn_version >= se::dnn::VersionInfo{8, 3, 0}; if (use_reordering) { new_operands[1] = filter; TF_RETURN_IF_ERROR(ReorderInt8NchwVect(conv, new_operands.data())); dnums = &conv->convolution_dimension_numbers(); } Shape new_output_shape = SplitShapeAtDim( output_shape, dnums->output_feature_dimension(), vect_size); XlaOp new_conv = CustomCallWithConvDnums( &b, conv->custom_call_target(), new_operands, ShapeUtil::MakeTupleShape( {new_output_shape, ShapeUtil::MakeShape(U8, {0})}), {}, conv->raw_backend_config_string(), false, {}, nullptr, conv->window(), VectorizeDnums(*dnums, use_reordering)); XlaOp new_conv_result = GetTupleElement(new_conv, 0); XlaOp new_conv_scratch = GetTupleElement(new_conv, 1); XlaOp conv_result_collapsed = Collapse(new_conv_result, {dnums->output_feature_dimension(), dnums->output_feature_dimension() + 1}); TF_ASSIGN_OR_RETURN( HloComputation * new_conv_comp, BuilderToHloComputation( b, Tuple(&b, {conv_result_collapsed, new_conv_scratch}), conv->parent())); VLOG(1) << "Vectorized conv to: " << new_conv_comp->ToString(); TF_RETURN_IF_ERROR(conv->parent()->ReplaceWithNewInstruction( conv, HloInstruction::CreateCall(conv->shape(), conv->operands(), new_conv_comp))); return true; } } absl::StatusOr<bool> CudnnVectorizeConvolutions::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool changed = false; for (HloComputation* comp : module->MakeNonfusionComputations(execution_threads)) { for (HloCustomCallInstruction* conv : GetRelevantConvs(comp)) { bool local_changed = false; if (compute_capability_.IsAtLeast(7, 5)) { TF_ASSIGN_OR_RETURN( local_changed, TryRevectorizeConv(compute_capability_, cudnn_version_, conv, 32)); if (!local_changed) { TF_ASSIGN_OR_RETURN( local_changed, TryVectorizeConv(compute_capability_, cudnn_version_, conv, 32)); } } if (!local_changed) { TF_ASSIGN_OR_RETURN( local_changed, TryVectorizeConv(compute_capability_, cudnn_version_, conv, 4)); } changed |= local_changed; } } return changed; } } }
#include "xla/service/gpu/transforms/cudnn_vectorize_convolutions.h" #include <cstdint> #include <utility> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/algorithm/container.h" #include "absl/status/statusor.h" #include "xla/service/call_inliner.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/cublas_cudnn.h" #include "xla/service/hlo_parser.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/stream_executor/device_description.h" #include "xla/stream_executor/dnn.h" #include "xla/tests/hlo_test_base.h" #include "xla/util.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { namespace m = ::xla::match; class CudnnVectorizeConvolutionsTest : public HloTestBase { protected: absl::StatusOr<bool> Run(std::pair<int, int> compute_capability, HloModule* module) { CudnnVectorizeConvolutions pass( se::CudaComputeCapability{compute_capability.first, compute_capability.second}, se::dnn::VersionInfo(8, 3, 0)); TF_ASSIGN_OR_RETURN(bool changed, RunHloPass(&pass, module)); CallInliner inliner; TF_RETURN_IF_ERROR(RunHloPass(&inliner, module).status()); return changed; } }; TEST_F(CudnnVectorizeConvolutionsTest, VectorizeTo4) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[10,20,30,40] parameter(0) filter = s8[2,2,40,44] parameter(1) ROOT result = (s8[10,20,30,44], u8[0]) custom-call(input, filter), window={size=2x2}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward", backend_config="{bar: 0}" })") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get())); EXPECT_TRUE(changed); SCOPED_TRACE(module->ToString()); auto* root = module->entry_computation()->root_instruction(); const HloInstruction* conv = nullptr; ASSERT_THAT( root, GmockMatch(m::Tuple( m::Reshape(m::GetTupleElement( m::CustomCall(&conv, {kCudnnConvForwardCallTarget}, m::Reshape(m::Parameter(0)) .WithShape(S8, {10, 20, 30, 10, 4}), m::Reshape(m::Parameter(1)) .WithShape(S8, {2, 2, 10, 4, 44})) .WithConvDnums("b01f?_01i?o->b01f?")) .WithShape(S8, {10, 20, 30, 11, 4})), m::Op()))); EXPECT_EQ(conv->raw_backend_config_string(), "{bar: 0}"); } TEST_F(CudnnVectorizeConvolutionsTest, NoVectorizeTo4UnsupportedFilterType) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[10,20,30,40] parameter(0) filter = f32[2,2,40,44] parameter(1) ROOT result = (s8[10,20,30,44], u8[0]) custom-call(input, filter), window={size=2x2}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward", backend_config="{bar: 0}" })") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get())); EXPECT_FALSE(changed); } TEST_F(CudnnVectorizeConvolutionsTest, VectorizeTo4NCHW) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[10,48,20,30] parameter(0) filter = s8[48,44,2,2] parameter(1) ROOT result = (s8[10,44,20,30], u8[0]) custom-call(input, filter), window={size=2x2}, dim_labels=bf01_io01->bf01, custom_call_target="__cudnn$convForward" })") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get())); EXPECT_TRUE(changed); SCOPED_TRACE(module->ToString()); auto* root = module->entry_computation()->root_instruction(); const HloInstruction* conv = nullptr; ASSERT_THAT( root, GmockMatch(m::Tuple( m::Reshape(m::GetTupleElement( m::CustomCall(&conv, {kCudnnConvForwardCallTarget}, m::Reshape(m::Parameter(0)) .WithShape(S8, {10, 12, 4, 20, 30}), m::Reshape(m::Parameter(1)) .WithShape(S8, {12, 4, 44, 2, 2})) .WithConvDnums("bf?01_i?o01->bf?01")) .WithShape(S8, {10, 11, 4, 20, 30})), m::Op()))); } TEST_F(CudnnVectorizeConvolutionsTest, IncrementAllDnums) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[16,16,16,16] parameter(0) filter = s8[16,16,3,3] parameter(1) ROOT result = (s8[16,16,16,16], u8[0]) custom-call(input, filter), window={size=2x2}, dim_labels=fb01_i01o->fb01, custom_call_target="__cudnn$convForward" })") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get())); EXPECT_TRUE(changed); SCOPED_TRACE(module->ToString()); auto* root = module->entry_computation()->root_instruction(); const HloInstruction* conv = nullptr; ASSERT_THAT( root, GmockMatch(m::Tuple( m::Reshape(m::GetTupleElement( m::CustomCall(&conv, {kCudnnConvForwardCallTarget}, m::Reshape(m::Parameter(0)) .WithShape(S8, {4, 4, 16, 16, 16}), m::Reshape(m::Parameter(1)) .WithShape(S8, {4, 4, 16, 3, 3})) .WithConvDnums("f?b01_i?01o->f?b01")) .WithShape(S8, {4, 4, 16, 16, 16})), m::Op()))); } TEST_F(CudnnVectorizeConvolutionsTest, FilterDnums) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[1,20,9,9] parameter(0) filter = s8[3,3,20,32] parameter(1) ROOT result = (s8[1,32,9,9], u8[0]) custom-call(s8[1,20,9,9] input, s8[3,3,20,32] filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, custom_call_target="__cudnn$convForward" })") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get())); EXPECT_TRUE(changed); SCOPED_TRACE(module->ToString()); auto* root = module->entry_computation()->root_instruction(); const HloInstruction* conv = nullptr; ASSERT_THAT( root, GmockMatch(m::Tuple( m::Reshape(m::GetTupleElement( m::CustomCall(&conv, {kCudnnConvForwardCallTarget}, m::Reshape(m::Parameter(0)) .WithShape(S8, {1, 5, 4, 9, 9}), m::Reshape(m::Parameter(1)) .WithShape(S8, {3, 3, 5, 4, 32})) .WithConvDnums("bf?01_01i?o->bf?01")) .WithShape(S8, {1, 8, 4, 9, 9})), m::Op()))); } TEST_F(CudnnVectorizeConvolutionsTest, NoVectorizeTo4) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[10,20,30,41] parameter(0) filter = s8[2,2,41,44] parameter(1) ROOT result = (s8[10,20,30,44], u8[0]) custom-call(input, filter), window={size=2x2}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" })") .value(); CudnnVectorizeConvolutions pass( {7, 5}, se::dnn::VersionInfo{8, 3, 0}); TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get())); SCOPED_TRACE(module->ToString()); EXPECT_FALSE(changed); } TEST_F(CudnnVectorizeConvolutionsTest, NoVectorizeTo4IfOutputIsS32) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[10,20,30,41] parameter(0) filter = s8[2,2,41,44] parameter(1) ROOT result = (s32[10,20,30,44], u8[0]) custom-call(input, filter), window={size=2x2}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" })") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get())); SCOPED_TRACE(module->ToString()); EXPECT_FALSE(changed); } TEST_F(CudnnVectorizeConvolutionsTest, NoVectorizeTo4IfOutputIsF32) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[10,20,30,41] parameter(0) filter = s8[2,2,41,44] parameter(1) ROOT result = (f32[10,20,30,44], u8[0]) custom-call(input, filter), window={size=2x2}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" })") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get())); SCOPED_TRACE(module->ToString()); EXPECT_FALSE(changed); } TEST_F(CudnnVectorizeConvolutionsTest, VectorizeTo32) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[10,20,30,64] parameter(0) filter = s8[2,2,64,128] parameter(1) ROOT result = (s8[10,20,30,128], u8[0]) custom-call(input, filter), window={size=2x2}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" })") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get())); EXPECT_TRUE(changed); SCOPED_TRACE(module->ToString()); auto* root = module->entry_computation()->root_instruction(); const HloInstruction* conv = nullptr; ASSERT_THAT( root, GmockMatch(m::Tuple( m::Reshape( m::GetTupleElement( m::CustomCall( &conv, {kCudnnConvForwardCallTarget}, m::Reshape(m::Parameter(0)) .WithShape(S8, {10, 20, 30, 2, 32}), m::Reshape( m::Transpose( m::Reshape(m::Parameter(1)) .WithShape(S8, {2, 2, 2, 8, 4, 16, 4, 2})) .WithShape(S8, {2, 2, 2, 16, 2, 8, 4, 4}) .WithPredicate([](const HloInstruction* instr) { return absl::c_equal( instr->dimensions(), std::vector<int64_t>{2, 0, 1, 5, 7, 3, 6, 4}); })) .WithShape(S8, {128, 2, 2, 2, 32}))) .WithShape(S8, {10, 20, 30, 4, 32})), m::Op()))); EXPECT_TRUE(conv->backend_config<GpuBackendConfig>() ->cudnn_conv_backend_config() .reordered_int8_nchw_vect()); } TEST_F(CudnnVectorizeConvolutionsTest, BiasAndSideInput) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[10,20,30,64] parameter(0) filter = s8[2,2,64,128] parameter(1) bias = f32[128] parameter(2) side_input = s8[10,20,30,64] parameter(3) ROOT result = (s8[10,20,30,128], u8[0]) custom-call(input, filter, bias, side_input), window={size=2x2}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" })") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get())); EXPECT_TRUE(changed); SCOPED_TRACE(module->ToString()); auto* root = module->entry_computation()->root_instruction(); const HloInstruction* conv = nullptr; ASSERT_THAT( root, GmockMatch(m::Tuple( m::Reshape( m::GetTupleElement( m::CustomCall( &conv, {kCudnnConvForwardCallTarget}, m::Reshape(m::Parameter(0)) .WithShape(S8, {10, 20, 30, 2, 32}), m::Reshape(m::Transpose(m::Reshape(m::Parameter(1)))) .WithShape(S8, {128, 2, 2, 2, 32}), m::Reshape( m::Transpose(m::Reshape(m::Parameter(2)) .WithShape(F32, {4, 4, 2, 4})) .WithShape(F32, {4, 2, 4, 4}) .WithPredicate([](const HloInstruction* instr) { return absl::c_equal( instr->dimensions(), std::vector<int64_t>{0, 2, 1, 3}); })) .WithShape(F32, {128}), m::Reshape(m::Parameter(3)) .WithShape(S8, {10, 20, 30, 2, 32}))) .WithShape(S8, {10, 20, 30, 4, 32})), m::Op()))); EXPECT_TRUE(conv->backend_config<GpuBackendConfig>() ->cudnn_conv_backend_config() .reordered_int8_nchw_vect()); } TEST_F(CudnnVectorizeConvolutionsTest, InputNHWC_OutputNCHW) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[10,20,30,64] parameter(0) filter = s8[2,2,64,128] parameter(1) bias = f32[128] parameter(2) side_input = s8[10,128,20,30] parameter(3) ROOT result = (s8[10,128,20,30], u8[0]) custom-call(input, filter, bias, side_input), window={size=2x2}, dim_labels=b01f_01io->bf01, custom_call_target="__cudnn$convForward" })") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get())); EXPECT_TRUE(changed); SCOPED_TRACE(module->ToString()); auto* root = module->entry_computation()->root_instruction(); const HloInstruction* conv = nullptr; ASSERT_THAT( root, GmockMatch(m::Tuple( m::Reshape( m::GetTupleElement( m::CustomCall( &conv, {kCudnnConvForwardCallTarget}, m::Reshape(m::Parameter(0)) .WithShape(S8, {10, 20, 30, 2, 32}), m::Reshape(m::Transpose(m::Reshape(m::Parameter(1)))) .WithShape(S8, {128, 2, 2, 2, 32}), m::Reshape( m::Transpose(m::Reshape(m::Parameter(2)) .WithShape(F32, {4, 4, 2, 4})) .WithShape(F32, {4, 2, 4, 4}) .WithPredicate([](const HloInstruction* instr) { return absl::c_equal( instr->dimensions(), std::vector<int64_t>{0, 2, 1, 3}); })) .WithShape(F32, {128}), m::Reshape(m::Parameter(3)) .WithShape(S8, {10, 4, 32, 20, 30}))) .WithShape(S8, {10, 4, 32, 20, 30})), m::Op()))); EXPECT_TRUE(conv->backend_config<GpuBackendConfig>() ->cudnn_conv_backend_config() .reordered_int8_nchw_vect()); } TEST_F(CudnnVectorizeConvolutionsTest, NoVectorizeTo32) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[10,20,30,64] parameter(0) filter = s8[2,2,64,128] parameter(1) ROOT result = (s8[10,20,30,128], u8[0]) custom-call(input, filter), window={size=2x2}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" })") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 0}, module.get())); EXPECT_TRUE(changed); SCOPED_TRACE(module->ToString()); auto* root = module->entry_computation()->root_instruction(); const HloInstruction* conv = nullptr; ASSERT_THAT( root, GmockMatch(m::Tuple( m::Reshape(m::GetTupleElement( m::CustomCall(&conv, {kCudnnConvForwardCallTarget}, m::Reshape(m::Parameter(0)) .WithShape(S8, {10, 20, 30, 16, 4}), m::Reshape(m::Parameter(1)) .WithShape(S8, {2, 2, 16, 4, 128}))) .WithShape(S8, {10, 20, 30, 32, 4})), m::Op()))); EXPECT_FALSE(conv->backend_config<GpuBackendConfig>() ->cudnn_conv_backend_config() .reordered_int8_nchw_vect()); } TEST_F(CudnnVectorizeConvolutionsTest, Vectorize4To32) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[10,20,30,16,4] parameter(0) filter = s8[3,5,16,192,4] parameter(1) bias = f32[64] parameter(2) side_input = s8[10,20,30,16,4] parameter(3) ROOT result = (s8[10,20,30,48,4], u8[0]) custom-call(input, filter, bias, side_input), window={size=3x5}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" })") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get())); EXPECT_TRUE(changed); SCOPED_TRACE(module->ToString()); auto* root = module->entry_computation()->root_instruction(); const HloInstruction* conv = nullptr; auto conv_pat = m::GetTupleElement( m::CustomCall( &conv, {kCudnnConvForwardCallTarget}, m::Reshape(m::Transpose(m::Reshape(m::Parameter(0)) .WithShape(S8, {10, 20, 30, 2, 8, 4})) .WithShape(S8, {10, 20, 30, 2, 8, 4})) .WithShape(S8, {10, 20, 30, 2, 32}), m::Reshape( m::Transpose(m::Reshape(m::Parameter(1)) .WithShape(S8, {3, 5, 2, 8, 24, 4, 2, 4})) .WithShape(S8, {2, 3, 5, 24, 2, 8, 4, 4}) .WithPredicate([](const HloInstruction* instr) { return absl::c_equal( instr->dimensions(), std::vector<int64_t>{2, 0, 1, 4, 6, 3, 5, 7}); })) .WithShape(S8, {192, 2, 3, 5, 32}), m::Reshape(m::Transpose(m::Reshape(m::Parameter(2)))), m::Reshape(m::Transpose(m::Reshape(m::Parameter(3)) .WithShape(S8, {10, 20, 30, 2, 8, 4})) .WithShape(S8, {10, 20, 30, 2, 8, 4})) .WithShape(S8, {10, 20, 30, 2, 32})) .WithConvDnums("b01f?_oi01?->b01f?")) .WithShape(S8, {10, 20, 30, 6, 32}); ASSERT_THAT(root, GmockMatch(m::Tuple( m::Reshape(m::Transpose(m::Reshape(conv_pat).WithShape( S8, {10, 20, 30, 6, 8, 4})) .WithShape(S8, {10, 20, 30, 6, 8, 4})) .WithShape(S8, {10, 20, 30, 48, 4}), m::Op()))); EXPECT_TRUE(conv->backend_config<GpuBackendConfig>() ->cudnn_conv_backend_config() .reordered_int8_nchw_vect()); } TEST_F(CudnnVectorizeConvolutionsTest, Vectorize4To32NCHW) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[10,16,20,30,4] parameter(0) filter = s8[16,128,2,2,4] parameter(1) bias = f32[64] parameter(2) side_input = s8[10,16,20,30,4] parameter(3) ROOT result = (s8[10,32,20,30,4], u8[0]) custom-call(input, filter, bias, side_input), window={size=2x2}, dim_labels=bf01_io01->bf01, custom_call_target="__cudnn$convForward" })") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get())); EXPECT_TRUE(changed); SCOPED_TRACE(module->ToString()); auto* root = module->entry_computation()->root_instruction(); const HloInstruction* conv = nullptr; auto conv_pat = m::GetTupleElement( m::CustomCall( &conv, {kCudnnConvForwardCallTarget}, m::Reshape(m::Transpose(m::Reshape(m::Parameter(0)) .WithShape(S8, {10, 2, 8, 20, 30, 4})) .WithShape(S8, {10, 2, 20, 30, 8, 4})) .WithShape(S8, {10, 2, 20, 30, 32}), m::Reshape( m::Transpose(m::Reshape(m::Parameter(1)) .WithShape(S8, {2, 8, 16, 4, 2, 2, 2, 4})) .WithShape(S8, {2, 2, 2, 16, 2, 8, 4, 4}) .WithPredicate([](const HloInstruction* instr) { return absl::c_equal( instr->dimensions(), std::vector<int64_t>{0, 5, 6, 2, 4, 1, 3, 7}); })) .WithShape(S8, {128, 2, 2, 2, 32}), m::Reshape(m::Transpose(m::Reshape(m::Parameter(2)))), m::Reshape(m::Transpose(m::Reshape(m::Parameter(3)) .WithShape(S8, {10, 2, 8, 20, 30, 4})) .WithShape(S8, {10, 2, 20, 30, 8, 4})) .WithShape(S8, {10, 2, 20, 30, 32})) .WithConvDnums("bf01_oi01->bf01")) .WithShape(S8, {10, 4, 20, 30, 32}); ASSERT_THAT(root, GmockMatch(m::Tuple( m::Reshape(m::Transpose(m::Reshape(conv_pat).WithShape( S8, {10, 4, 20, 30, 8, 4})) .WithShape(S8, {10, 4, 8, 20, 30, 4})) .WithShape(S8, {10, 32, 20, 30, 4}), m::Op()))); EXPECT_TRUE(conv->backend_config<GpuBackendConfig>() ->cudnn_conv_backend_config() .reordered_int8_nchw_vect()); } TEST_F(CudnnVectorizeConvolutionsTest, Vectorize4To32VectorDimFirst) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[4,10,20,30,16] parameter(0) filter = s8[4,3,5,16,192] parameter(1) bias = f32[64] parameter(2) side_input = s8[4,10,20,30,16] parameter(3) ROOT result = (s8[4,10,20,30,48], u8[0]) custom-call(input, filter, bias, side_input), window={size=3x5}, dim_labels=?b01f_?01io->?b01f, custom_call_target="__cudnn$convForward" })") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get())); EXPECT_TRUE(changed); SCOPED_TRACE(module->ToString()); auto* root = module->entry_computation()->root_instruction(); const HloInstruction* conv = nullptr; auto conv_pat = m::GetTupleElement( m::CustomCall( &conv, {kCudnnConvForwardCallTarget}, m::Reshape(m::Transpose(m::Reshape(m::Parameter(0)) .WithShape(S8, {4, 10, 20, 30, 2, 8})) .WithShape(S8, {8, 4, 10, 20, 30, 2})) .WithShape(S8, {32, 10, 20, 30, 2}), m::Reshape( m::Transpose(m::Reshape(m::Parameter(1)) .WithShape(S8, {4, 3, 5, 2, 8, 24, 4, 2})) .WithShape(S8, {2, 3, 5, 24, 2, 8, 4, 4}) .WithPredicate([](const HloInstruction* instr) { return absl::c_equal( instr->dimensions(), std::vector<int64_t>{3, 1, 2, 5, 7, 4, 6, 0}); })) .WithShape(S8, {192, 2, 3, 5, 32}), m::Reshape(m::Transpose(m::Reshape(m::Parameter(2)))), m::Reshape(m::Transpose(m::Reshape(m::Parameter(3)) .WithShape(S8, {4, 10, 20, 30, 2, 8})) .WithShape(S8, {8, 4, 10, 20, 30, 2})) .WithShape(S8, {32, 10, 20, 30, 2})) .WithConvDnums("?b01f_oi01->?b01f")) .WithShape(S8, {32, 10, 20, 30, 6}); ASSERT_THAT(root, GmockMatch(m::Tuple( m::Reshape(m::Transpose(m::Reshape(conv_pat).WithShape( S8, {8, 4, 10, 20, 30, 6})) .WithShape(S8, {4, 10, 20, 30, 6, 8})) .WithShape(S8, {4, 10, 20, 30, 48}), m::Op()))); EXPECT_TRUE(conv->backend_config<GpuBackendConfig>() ->cudnn_conv_backend_config() .reordered_int8_nchw_vect()); } TEST_F(CudnnVectorizeConvolutionsTest, NoVectorize4To32) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[10,20,30,16,4] parameter(0) filter = s8[2,2,16,128,4] parameter(1) bias = f32[10] parameter(2) side_input = s8[10,20,30,16,4] parameter(3) ROOT result = (s8[10,20,30,32,4], u8[0]) custom-call(input, filter, bias, side_input), window={size=2x2}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" })") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 0}, module.get())); EXPECT_FALSE(changed); } TEST_F(CudnnVectorizeConvolutionsTest, Vectorize16To32) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[10,20,30,4,16] parameter(0) filter = s8[3,5,4,192,16] parameter(1) ROOT result = (s8[10,20,30,12,16], u8[0]) custom-call(input, filter), window={size=3x5}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" })") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get())); EXPECT_TRUE(changed); SCOPED_TRACE(module->ToString()); auto* root = module->entry_computation()->root_instruction(); const HloInstruction* conv = nullptr; auto filter_pat = m::Reshape( m::Transpose( m::Reshape(m::Parameter(1)).WithShape(S8, {3, 5, 2, 2, 192, 16})) .WithShape(S8, {3, 5, 2, 192, 2, 16})) .WithShape(S8, {3, 5, 2, 192, 32}); auto conv_pat = m::GetTupleElement( m::CustomCall( &conv, {kCudnnConvForwardCallTarget}, m::Reshape( m::Transpose(m::Reshape(m::Parameter(0)) .WithShape(S8, {10, 20, 30, 2, 2, 16})) .WithShape(S8, {10, 20, 30, 2, 2, 16})) .WithShape(S8, {10, 20, 30, 2, 32}), m::Reshape( m::Transpose(m::Reshape(filter_pat) .WithShape(S8, {3, 5, 2, 24, 4, 2, 8, 4})) .WithShape(S8, {2, 3, 5, 24, 2, 8, 4, 4})) .WithShape(S8, {192, 2, 3, 5, 32})) .WithConvDnums("b01f_oi01->b01f")) .WithShape(S8, {10, 20, 30, 6, 32}); ASSERT_THAT(root, GmockMatch(m::Tuple( m::Reshape(m::Transpose(m::Reshape(conv_pat).WithShape( S8, {10, 20, 30, 6, 2, 16})) .WithShape(S8, {10, 20, 30, 6, 2, 16})) .WithShape(S8, {10, 20, 30, 12, 16}), m::Op()))); EXPECT_TRUE(conv->backend_config<GpuBackendConfig>() ->cudnn_conv_backend_config() .reordered_int8_nchw_vect()); } TEST_F(CudnnVectorizeConvolutionsTest, VectorizeMixedTo32) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { input = s8[10,20,30,8,8] parameter(0) filter = s8[3,5,2,192,32] parameter(1) ROOT result = (s8[10,20,30,96,2], u8[0]) custom-call(input, filter), window={size=3x5}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convForward" })") .value(); TF_ASSERT_OK_AND_ASSIGN(bool changed, Run({7, 5}, module.get())); EXPECT_TRUE(changed); SCOPED_TRACE(module->ToString()); auto* root = module->entry_computation()->root_instruction(); const HloInstruction* conv = nullptr; auto conv_pat = m::GetTupleElement( m::CustomCall( &conv, {kCudnnConvForwardCallTarget}, m::Reshape(m::Transpose(m::Reshape(m::Parameter(0)) .WithShape(S8, {10, 20, 30, 2, 4, 8})) .WithShape(S8, {10, 20, 30, 2, 4, 8})) .WithShape(S8, {10, 20, 30, 2, 32}), m::Reshape( m::Transpose(m::Reshape(m::Parameter(1)) .WithShape(S8, {3, 5, 2, 24, 4, 2, 8, 4})) .WithShape(S8, {2, 3, 5, 24, 2, 8, 4, 4})) .WithShape(S8, {192, 2, 3, 5, 32})) .WithConvDnums("b01f_oi01->b01f")) .WithShape(S8, {10, 20, 30, 6, 32}); ASSERT_THAT(root, GmockMatch(m::Tuple( m::Reshape(m::Transpose(m::Reshape(conv_pat).WithShape( S8, {10, 20, 30, 6, 16, 2})) .WithShape(S8, {10, 20, 30, 6, 16, 2})) .WithShape(S8, {10, 20, 30, 96, 2}), m::Op()))); EXPECT_TRUE(conv->backend_config<GpuBackendConfig>() ->cudnn_conv_backend_config() .reordered_int8_nchw_vect()); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/cudnn_vectorize_convolutions.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/cudnn_vectorize_convolutions_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
d9f6523b-dc16-44b5-a56f-9428db116e51
cpp
tensorflow/tensorflow
collective_permute_cycle_decomposer
third_party/xla/xla/service/gpu/transforms/collective_permute_cycle_decomposer.cc
third_party/xla/xla/service/gpu/transforms/collective_permute_cycle_decomposer_test.cc
#include "xla/service/gpu/transforms/collective_permute_cycle_decomposer.h" #include <cstdint> #include <string> #include <utility> #include <vector> #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_join.h" #include "absl/strings/string_view.h" #include "xla/comparison_util.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_query.h" #include "xla/literal_util.h" #include "xla/service/collective_ops_utils.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/hlo_parser.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" namespace xla { namespace { using SourceTargetPair = std::pair<int64_t, int64_t>; using SourceTargetPairs = std::vector<SourceTargetPair>; enum class CycleType { kUnknown, kForward, kBackward }; CycleType ShouldDecomposeWithCycleType( const HloCollectivePermuteInstruction& collective_permute, int64_t threshold_in_bytes) { if (collective_permute.operand_count() != 1) { return CycleType::kUnknown; } const Shape& result_shape = collective_permute.shape(); if (result_shape.IsTuple()) { return CycleType::kUnknown; } CHECK(result_shape.IsArray()); if (ShapeUtil::ByteSizeOf(result_shape) < threshold_in_bytes) { return CycleType::kUnknown; } const SourceTargetPairs& pairs = collective_permute.source_target_pairs(); if (pairs.size() == 1) { return CycleType::kUnknown; } return IsForwardCycle(pairs) ? CycleType::kForward : IsBackwardCycle(pairs) ? CycleType::kBackward : CycleType::kUnknown; } absl::Status GetFrontendAttributes(HloCollectivePermuteInstruction* cp, CycleType cycle_type, xla::FrontendAttributes& cp1_attr, xla::FrontendAttributes& cp2_attr) { cp1_attr = cp->frontend_attributes(); cp2_attr = cp->frontend_attributes(); auto validation_it = cp->frontend_attributes().map().find(kSendRecvValidationAttr); if (validation_it == cp->frontend_attributes().map().end() || validation_it->second == "invalid") { return absl::OkStatus(); } auto statusor_bounds = ParseReplicaGroupsOnly(validation_it->second); if (!statusor_bounds.ok()) { return statusor_bounds.status(); } const std::vector<ReplicaGroup>& bounds = statusor_bounds.value(); if (bounds.size() < 2) { return Internal("Invalid number of replica groups"); } int64_t num_pairs = bounds.size(); auto backedge_start = cycle_type == CycleType::kBackward ? bounds.begin() : bounds.begin() + num_pairs - 1; auto other_edges_start = cycle_type == CycleType::kBackward ? bounds.begin() + 1 : bounds.begin(); std::vector<ReplicaGroup> cp1_bounds(backedge_start, backedge_start + 1); std::vector<ReplicaGroup> cp2_bounds(other_edges_start, other_edges_start + num_pairs - 1); auto bounds_to_string = [](const std::vector<ReplicaGroup> groups) { return "{" + absl::StrJoin(groups, ",", [](std::string* out, const ReplicaGroup& value) { absl::StrAppend(out, "{", value.replica_ids(0), ",", value.replica_ids(1), "}"); }) + "}"; }; std::string cp1_validation_str = bounds_to_string(cp1_bounds); std::string cp2_validation_str = bounds_to_string(cp2_bounds); (*cp1_attr.mutable_map())[kSendRecvValidationAttr] = cp1_validation_str; (*cp2_attr.mutable_map())[kSendRecvValidationAttr] = cp2_validation_str; return absl::OkStatus(); } absl::Status DecomposeCollectivePermuteCycle( HloCollectivePermuteInstruction* cp, HloComputation* computation, HloModule* module, int64_t next_channel_id, CycleType cycle_type) { const SourceTargetPairs& pairs = cp->source_target_pairs(); int64_t num_pairs = pairs.size(); auto backedge_start = cycle_type == CycleType::kBackward ? pairs.begin() : pairs.begin() + num_pairs - 1; auto other_edges_start = cycle_type == CycleType::kBackward ? pairs.begin() + 1 : pairs.begin(); SourceTargetPairs backedge(backedge_start, backedge_start + 1); SourceTargetPairs other_edges(other_edges_start, other_edges_start + num_pairs - 1); const OpMetadata& metadata = cp->metadata(); xla::FrontendAttributes cp1_attr, cp2_attr; TF_RETURN_IF_ERROR(GetFrontendAttributes(cp, cycle_type, cp1_attr, cp2_attr)); TF_ASSIGN_OR_RETURN( CollectiveOpGroupMode mode, GetCollectiveOpGroupMode(cp->channel_id().has_value(), std::nullopt)); HloInstruction* cp1 = computation->AddInstruction( HloInstruction::CreateCollectivePermute( cp->shape(), cp->mutable_operand(0), backedge, cp->channel_id()), "cp.backward"); cp1->set_metadata(metadata); cp1->set_frontend_attributes(cp1_attr); int64_t bwd_recv_id = backedge.back().second; bool is_cross_partition = (mode == CollectiveOpGroupMode::kCrossPartition); HloInstruction* cp2 = computation->AddInstruction( HloInstruction::CreateCollectivePermute( cp->shape(), cp->mutable_operand(0), other_edges, is_cross_partition ? std::optional(next_channel_id) : std::nullopt), "cp.forward"); cp2->set_metadata(metadata); cp2->set_frontend_attributes(cp2_attr); HloInstruction* partition_or_replica = nullptr; switch (mode) { case CollectiveOpGroupMode::kCrossReplica: partition_or_replica = computation->AddInstruction(HloInstruction::CreateReplicaId()); break; case CollectiveOpGroupMode::kCrossPartition: partition_or_replica = computation->AddInstruction(HloInstruction::CreatePartitionId()); break; case CollectiveOpGroupMode::kCrossReplicaAndPartition: case CollectiveOpGroupMode::kFlattenedID: return absl::InternalError(absl::StrFormat( "Unexpected collective group mode for %s", cp->name())); }; HloInstruction* constant = computation->AddInstruction( HloInstruction::CreateConstant(LiteralUtil::CreateR0(U32, bwd_recv_id))); HloInstruction* compare = computation->AddInstruction(HloInstruction::CreateCompare( ShapeUtil::MakeShape(PRED, {}), partition_or_replica, constant, Comparison::Direction::kEq)); HloInstruction* recv_data = computation->AddInstruction(HloInstruction::CreateTernary( cp1->shape(), HloOpcode::kSelect, compare, cp1, cp2)); TF_RETURN_IF_ERROR(cp->ReplaceAllUsesWith(recv_data)); TF_RETURN_IF_ERROR(computation->RemoveInstructionAndUnusedOperands(cp)); return absl::OkStatus(); } } absl::StatusOr<bool> CollectivePermuteCycleDecomposer::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool changed = false; int64_t next_channel_id; for (auto comp : module->computations(execution_threads)) { for (auto hlo : comp->MakeInstructionPostOrder()) { if (hlo->opcode() != HloOpcode::kCollectivePermute) { continue; } auto collective_permute = Cast<HloCollectivePermuteInstruction>(hlo); CycleType cycle_type = ShouldDecomposeWithCycleType(*collective_permute, threshold_in_bytes_); if (cycle_type != CycleType::kUnknown) { if (changed == false) { next_channel_id = hlo_query::NextChannelId(*module); changed = true; } TF_RETURN_IF_ERROR(DecomposeCollectivePermuteCycle( collective_permute, comp, module, next_channel_id++, cycle_type)); } } } return changed; } }
#include "xla/service/gpu/transforms/collective_permute_cycle_decomposer.h" #include <memory> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/service/hlo_parser.h" #include "xla/tests/filecheck.h" #include "xla/tests/hlo_test_base.h" #include "xla/tests/test_utils.h" #include "tsl/platform/statusor.h" namespace xla { namespace { using ::testing::HasSubstr; using CollectivePermuteCycleDecomposerTest = HloTestBase; TEST_F(CollectivePermuteCycleDecomposerTest, TrivialNotTransformed) { const absl::string_view kModuleStr = R"( HloModule test ENTRY test_computation { p = u32[8,8] parameter(0) ROOT start = u32[8,8] collective-permute(p), channel_id=1, source_target_pairs={{0,0}} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule((kModuleStr))); CollectivePermuteCycleDecomposer decomposer(0); TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get())); EXPECT_FALSE(changed); } TEST_F(CollectivePermuteCycleDecomposerTest, BelowThresholdNotTransformed) { const absl::string_view kModuleStr = R"( HloModule test ENTRY test_computation { p = u32[4,2] parameter(0) ROOT start = u32[4,2] collective-permute(p), channel_id=1, source_target_pairs={{0,1},{1,2},{2,3},{3,0}} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule((kModuleStr))); CollectivePermuteCycleDecomposer decomposer(33); TF_ASSERT_OK_AND_ASSIGN( bool changed, RunHloPass(CollectivePermuteCycleDecomposer(33), module.get())); EXPECT_FALSE(changed); TF_ASSERT_OK_AND_ASSIGN( changed, RunHloPass(CollectivePermuteCycleDecomposer(16), module.get())); EXPECT_TRUE(changed); } TEST_F(CollectivePermuteCycleDecomposerTest, ForwardCycle) { const absl::string_view kModuleStr = R"( HloModule test ENTRY test_computation { p = u32[8,8] parameter(0) ROOT start = u32[8,8] collective-permute(p), channel_id=1, source_target_pairs={{0,1},{1,2},{2,3},{3,0}}, frontend_attributes={_xla_send_recv_validation="{{0,7},{1,8},{2,9},{3,10}}"}, metadata={op_name="op1/op2/add" source_file="foo/bar/mysource.py" source_line=35} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule((kModuleStr))); CollectivePermuteCycleDecomposer decomposer(0); TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get())); EXPECT_TRUE(changed); TF_CHECK_OK(VerifyHloModule(module.get(), false, true)); HloPrintOptions options; options.set_print_operand_shape(false); options.set_include_layout_in_shapes(false); EXPECT_TRUE(*RunFileCheck(module->ToString(options), R"( )")); } TEST_F(CollectivePermuteCycleDecomposerTest, ForwardCycleNoChannel) { const absl::string_view kModuleStr = R"( HloModule test ENTRY test_computation { p = u32[8,8] parameter(0) ROOT start = u32[8,8] collective-permute(p), source_target_pairs={{0,1},{1,2},{2,3},{3,0}} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule((kModuleStr))); CollectivePermuteCycleDecomposer decomposer(0); TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get())); EXPECT_TRUE(changed); TF_CHECK_OK(VerifyHloModule(module.get(), false, true)); HloPrintOptions options; options.set_print_operand_shape(false); options.set_include_layout_in_shapes(false); EXPECT_TRUE(*RunFileCheck(module->ToString(options), R"( )")); } TEST_F(CollectivePermuteCycleDecomposerTest, ForwardCycleWithMatmul) { const absl::string_view kModuleStr = R"( HloModule test while_cond { param = (u32[], f32[2,2], f32[2,2]) parameter(0) iter = u32[] get-tuple-element(param), index=0 max_iter = u32[] constant(3) ROOT cmp = pred[] compare(iter, max_iter), direction=LT } while_body { param = (u32[], f32[2,2], f32[2,2]) parameter(0) iter = u32[] get-tuple-element(param), index=0 data = f32[2,2] get-tuple-element(param), index=1 weights = f32[2,2] get-tuple-element(param), index=2 cp = f32[2,2] collective-permute(data), channel_id=1, source_target_pairs={{0,1}, {1,2}, {2,3}, {3,0}}, frontend_attributes={_xla_send_recv_validation="{{0,7},{1,8},{2,9},{3,10}}"} matmul = f32[2,2] dot(weights, cp), lhs_contracting_dims={1}, rhs_contracting_dims={0} iter_increment = u32[] constant(1) next_iter = u32[] add(iter, iter_increment) ROOT result = (u32[], f32[2,2], f32[2,2]) tuple(next_iter, matmul, weights) } ENTRY test_computation { iter = u32[] constant(0) data = f32[2,2] parameter(0) weights = f32[2,2] parameter(1) input = (u32[], f32[2,2], f32[2,2]) tuple(iter, data, weights) while_res = (u32[], f32[2,2], f32[2,2]) while(input), condition=while_cond, body=while_body ROOT data_out = f32[2,2] get-tuple-element(while_res), index=1 })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule((kModuleStr))); CollectivePermuteCycleDecomposer decomposer(0); TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get())); EXPECT_TRUE(changed); HloCollectivePermuteInstruction* cp1 = DynCast<HloCollectivePermuteInstruction>( FindInstruction(module.get(), "cp.backward")); HloCollectivePermuteInstruction* cp2 = DynCast<HloCollectivePermuteInstruction>( FindInstruction(module.get(), "cp.forward")); EXPECT_THAT(cp1->ToString(), HasSubstr("source_target_pairs={{3,0}}")); EXPECT_THAT(cp1->ToString(), HasSubstr("_xla_send_recv_validation={{3,10}}")); EXPECT_THAT(cp2->ToString(), HasSubstr("source_target_pairs={{0,1},{1,2},{2,3}}")); EXPECT_THAT(cp2->ToString(), HasSubstr("_xla_send_recv_validation={{0,7},{1,8},{2,9}}")); } TEST_F(CollectivePermuteCycleDecomposerTest, BackwardCycle) { const absl::string_view kModuleStr = R"( HloModule test ENTRY test_computation { p = u32[8,8] parameter(0) ROOT start = u32[8,8] collective-permute(p), channel_id=1, source_target_pairs={{0,3},{1,0},{2,1},{3,2}}, frontend_attributes={_xla_send_recv_validation="{{0,7},{1,8},{2,9},{3,10}}"}, metadata={op_name="op1/op2/add" source_file="foo/bar/mysource.py" source_line=35} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule((kModuleStr))); TF_ASSERT_OK_AND_ASSIGN( bool changed, RunHloPass(CollectivePermuteCycleDecomposer(0), module.get())); EXPECT_TRUE(changed); TF_CHECK_OK(VerifyHloModule(module.get(), true, false)); HloPrintOptions options; options.set_print_operand_shape(false); options.set_include_layout_in_shapes(false); EXPECT_TRUE(*RunFileCheck(module->ToString(options), R"( )")); } TEST_F(CollectivePermuteCycleDecomposerTest, BackwardCycleNoChannel) { const absl::string_view kModuleStr = R"( HloModule test ENTRY test_computation { p = u32[8,8] parameter(0) ROOT start = u32[8,8] collective-permute(p), source_target_pairs={{0,3},{1,0},{2,1},{3,2}}, frontend_attributes={_xla_send_recv_validation="{{0,7},{1,8},{2,9},{3,10}}"} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule((kModuleStr))); CollectivePermuteCycleDecomposer decomposer(0); TF_ASSERT_OK_AND_ASSIGN(bool changed, decomposer.Run(module.get())); EXPECT_TRUE(changed); HloPrintOptions options; options.set_print_operand_shape(false); options.set_include_layout_in_shapes(false); TF_CHECK_OK(VerifyHloModule(module.get(), false, true)); EXPECT_TRUE(*RunFileCheck(module->ToString(options), R"( )")); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/collective_permute_cycle_decomposer.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/collective_permute_cycle_decomposer_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
ac17a735-6f0a-4a24-a913-698d9809b098
cpp
tensorflow/tensorflow
conv_padding_legalization
third_party/xla/xla/service/gpu/transforms/conv_padding_legalization.cc
third_party/xla/xla/service/gpu/transforms/conv_padding_legalization_test.cc
#include "xla/service/gpu/transforms/conv_padding_legalization.h" #include <algorithm> #include <cstddef> #include <cstdint> #include <cstdlib> #include <vector> #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/literal_util.h" #include "xla/service/gpu/cublas_cudnn.h" #include "xla/service/hlo_creation_utils.h" #include "xla/service/shape_inference.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/util.h" #include "xla/window_util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/status.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { bool IsForwardConvolutionCanonical(const HloInstruction& conv) { CHECK(conv.custom_call_target() == kCudnnConvForwardCallTarget || conv.custom_call_target() == kCudnnConvBiasActivationForwardCallTarget || conv.custom_call_target() == kCudnnConvForwardGraphCallTarget); return window_util::HasSymmetricPadding(conv.window()) && !window_util::HasNegativePadding(conv.window()) && !window_util::HasDilation(conv.window()); } HloInstruction* MaybePaddedAndSlicedInput( Window* conv_window, const ConvolutionDimensionNumbers& conv_dnums, HloInstruction* input) { HloComputation* computation = input->parent(); if (!window_util::HasSymmetricPadding(*conv_window) || window_util::HasBaseDilation(*conv_window)) { PaddingConfig padding_config = MakeNoPaddingConfig(input->shape().dimensions_size()); for (size_t i = 0; i < conv_dnums.input_spatial_dimensions().size(); ++i) { int64_t dim = conv_dnums.input_spatial_dimensions(i); if (conv_window->dimensions(i).padding_low() > 0) { padding_config.mutable_dimensions(dim)->set_edge_padding_low( conv_window->dimensions(i).padding_low()); conv_window->mutable_dimensions(i)->set_padding_low(0); } if (conv_window->dimensions(i).padding_high() > 0) { padding_config.mutable_dimensions(dim)->set_edge_padding_high( conv_window->dimensions(i).padding_high()); conv_window->mutable_dimensions(i)->set_padding_high(0); } if (conv_window->dimensions(i).base_dilation() != 1) { padding_config.mutable_dimensions(dim)->set_interior_padding( conv_window->dimensions(i).base_dilation() - 1); conv_window->mutable_dimensions(i)->set_base_dilation(1); } } PrimitiveType element_type = input->shape().element_type(); HloInstruction* padding = computation->AddInstruction( HloInstruction::CreateConstant(LiteralUtil::Zero(element_type))); input = MakePadHlo(input, padding, padding_config, &input->metadata()).value(); } if (window_util::HasNegativePadding(*conv_window)) { std::vector<int64_t> start_indices(input->shape().dimensions_size(), 0); std::vector<int64_t> limit_indices(input->shape().dimensions().begin(), input->shape().dimensions().end()); std::vector<int64_t> strides(input->shape().dimensions_size(), 1); for (size_t i = 0; i < conv_dnums.input_spatial_dimensions().size(); ++i) { int64_t dim = conv_dnums.input_spatial_dimensions(i); if (conv_window->dimensions(i).padding_low() < 0) { start_indices[dim] += -conv_window->dimensions(i).padding_low(); conv_window->mutable_dimensions(i)->set_padding_low(0); } if (conv_window->dimensions(i).padding_high() < 0) { limit_indices[dim] -= -conv_window->dimensions(i).padding_high(); conv_window->mutable_dimensions(i)->set_padding_high(0); } } input = MakeSliceHlo(input, start_indices, limit_indices, strides).value(); } return input; } HloInstruction* MaybePaddedKernel(const Window& conv_window, const ConvolutionDimensionNumbers& conv_dnums, HloInstruction* kernel) { if (!window_util::HasWindowDilation(conv_window)) { return kernel; } PaddingConfig padding_config; padding_config.mutable_dimensions()->Reserve( kernel->shape().dimensions_size()); for (size_t i = 0; i < kernel->shape().dimensions_size(); ++i) { padding_config.add_dimensions(); } for (size_t i = 0; i < conv_dnums.kernel_spatial_dimensions().size(); ++i) { int64_t dim = conv_dnums.kernel_spatial_dimensions(i); padding_config.mutable_dimensions(dim)->set_interior_padding( conv_window.dimensions(i).window_dilation() - 1); } HloComputation* computation = kernel->parent(); PrimitiveType element_type = kernel->shape().element_type(); HloInstruction* padding = computation->AddInstruction( HloInstruction::CreateConstant(LiteralUtil::Zero(element_type))); return MakePadHlo(kernel, padding, padding_config, &kernel->metadata()) .value(); } } bool ConvPaddingLegalization::CanonicalizeForwardConvolution( HloInstruction* conv) { if (IsForwardConvolutionCanonical(*conv)) { return false; } Window new_conv_window = conv->window(); HloInstruction* new_input = MaybePaddedAndSlicedInput( &new_conv_window, conv->convolution_dimension_numbers(), conv->mutable_operand(0)); HloInstruction* new_kernel = MaybePaddedKernel(new_conv_window, conv->convolution_dimension_numbers(), conv->mutable_operand(1)); for (size_t i = 0; i < new_conv_window.dimensions_size(); ++i) { WindowDimension* dim = new_conv_window.mutable_dimensions(i); dim->set_size(new_kernel->shape().dimensions( conv->convolution_dimension_numbers().kernel_spatial_dimensions(i))); dim->set_window_dilation(1); } VLOG(1) << "Canonicalizing forward conv"; std::vector<HloInstruction*> operands(conv->operands().begin(), conv->operands().end()); operands[0] = new_input; operands[1] = new_kernel; auto new_conv = conv->parent()->AddInstruction( conv->CloneWithNewOperands(conv->shape(), operands)); new_conv->set_window(new_conv_window); VLOG(1) << "Replacing:\n " << conv->ToString() << "\nwith:\n " << new_conv->ToString(); TF_CHECK_OK(conv->parent()->ReplaceInstruction(conv, new_conv)); return true; } namespace { void IncreasePaddingLowBy(int64_t delta, WindowDimension* window_dim) { window_dim->set_padding_low(window_dim->padding_low() + delta); } void IncreasePaddingHighBy(int64_t delta, WindowDimension* window_dim) { window_dim->set_padding_high(window_dim->padding_high() + delta); } } bool ConvPaddingLegalization::CanonicalizeBackwardFilterConvolution( HloInstruction* backward_conv) { CHECK_EQ(backward_conv->custom_call_target(), kCudnnConvBackwardFilterCallTarget); if (window_util::HasSymmetricPadding(backward_conv->window())) { return false; } HloInstruction* input = backward_conv->mutable_operand(0); Window new_backward_conv_window = backward_conv->window(); PaddingConfig input_padding_config = MakeNoPaddingConfig(input->shape().rank()); ConvolutionDimensionNumbers backward_conv_dnums = backward_conv->convolution_dimension_numbers(); for (size_t i = 0; i < backward_conv->window().dimensions_size(); ++i) { int64_t padding_low = backward_conv->window().dimensions(i).padding_low(); int64_t padding_high = backward_conv->window().dimensions(i).padding_high(); if (padding_low < 0 || padding_high < 0) { return false; } int64_t new_conv_padding = std::min(padding_low, padding_high); int64_t dim = backward_conv_dnums.input_spatial_dimensions(i); input_padding_config.mutable_dimensions(dim)->set_edge_padding_low( padding_low - new_conv_padding); input_padding_config.mutable_dimensions(dim)->set_edge_padding_high( padding_high - new_conv_padding); auto* new_dim = new_backward_conv_window.mutable_dimensions(i); new_dim->set_padding_low(new_conv_padding); new_dim->set_padding_high(new_conv_padding); } HloComputation* computation = backward_conv->parent(); HloInstruction* output = backward_conv->mutable_operand(1); HloInstruction* padding = computation->AddInstruction(HloInstruction::CreateConstant( LiteralUtil::Zero(input->shape().element_type()))); HloInstruction* padded_input = MakePadHlo(input, padding, input_padding_config).value(); HloInstruction* new_backward_conv = computation->AddInstruction(backward_conv->CloneWithNewOperands( backward_conv->shape(), {padded_input, output})); new_backward_conv->set_window(new_backward_conv_window); VLOG(1) << "Canonicalizing backward filter conv"; VLOG(1) << "Replacing:\n " << backward_conv->ToString() << "\nwith:\n " << new_backward_conv->ToString(); TF_CHECK_OK( computation->ReplaceInstruction(backward_conv, new_backward_conv)); return true; } bool ConvPaddingLegalization::CanonicalizeBackwardInputConvolution( HloInstruction* backward_conv) { if (window_util::HasSymmetricPadding(backward_conv->window())) { return false; } Window new_backward_conv_window = backward_conv->window(); ConvolutionDimensionNumbers backward_conv_dnums = backward_conv->convolution_dimension_numbers(); Shape backward_conv_shape = backward_conv->shape().tuple_shapes(0); Shape new_backward_conv_shape = backward_conv_shape; for (size_t i = 0; i < backward_conv->window().dimensions_size(); ++i) { int64_t padding_low = backward_conv->window().dimensions(i).padding_low(); int64_t padding_high = backward_conv->window().dimensions(i).padding_high(); if (padding_low < 0 || padding_high < 0) { return false; } if (padding_low > padding_high) { IncreasePaddingLowBy(padding_high - padding_low, new_backward_conv_window.mutable_dimensions(i)); } else if (padding_low < padding_high) { IncreasePaddingHighBy(padding_low - padding_high, new_backward_conv_window.mutable_dimensions(i)); } int64_t dim = backward_conv_dnums.input_spatial_dimensions(i); new_backward_conv_shape.set_dimensions( dim, new_backward_conv_shape.dimensions(dim) + std::abs(padding_low - padding_high)); } HloComputation* computation = backward_conv->parent(); HloInstruction* output = backward_conv->mutable_operand(0); HloInstruction* filter = backward_conv->mutable_operand(1); HloInstruction* new_backward_conv_call = computation->AddInstruction(backward_conv->CloneWithNewOperands( ShapeUtil::MakeTupleShape( {new_backward_conv_shape, ShapeUtil::MakeShape(U8, {0})}), {output, filter})); new_backward_conv_call->set_window(new_backward_conv_window); HloInstruction* new_backward_conv = computation->AddInstruction(HloInstruction::CreateGetTupleElement( new_backward_conv_shape, new_backward_conv_call, 0)); HloInstruction* new_backward_conv_scratch = computation->AddInstruction(HloInstruction::CreateGetTupleElement( new_backward_conv_call->shape().tuple_shapes(1), new_backward_conv_call, 1)); std::vector<int64_t> start_indices( new_backward_conv->shape().dimensions_size(), 0LL); std::vector<int64_t> limit_indices( new_backward_conv->shape().dimensions().begin(), new_backward_conv->shape().dimensions().end()); std::vector<int64_t> strides(new_backward_conv->shape().dimensions_size(), 1LL); for (size_t i = 0; i < backward_conv->window().dimensions_size(); ++i) { int64_t padding_low = backward_conv->window().dimensions(i).padding_low(); int64_t padding_high = backward_conv->window().dimensions(i).padding_high(); int64_t dim = backward_conv_dnums.input_spatial_dimensions(i); if (padding_low > padding_high) { start_indices[dim] += padding_low - padding_high; } else if (padding_low < padding_high) { limit_indices[dim] -= padding_high - padding_low; } } Shape slice_shape = ShapeInference::InferSliceShape(new_backward_conv->shape(), start_indices, limit_indices, strides) .value(); CHECK(ShapeUtil::Compatible(slice_shape, backward_conv_shape)) << ShapeUtil::HumanString(slice_shape) << " vs " << ShapeUtil::HumanString(backward_conv_shape); HloInstruction* slice = computation->AddInstruction( HloInstruction::CreateSlice(backward_conv_shape, new_backward_conv, start_indices, limit_indices, strides)); HloInstruction* new_tuple = computation->AddInstruction( HloInstruction::CreateTuple({slice, new_backward_conv_scratch})); VLOG(1) << "Canonicalizing backward input conv"; VLOG(1) << "Replacing:\n " << backward_conv->ToString() << "\nwith:\n " << new_tuple->ToString(); TF_CHECK_OK(computation->ReplaceInstruction(backward_conv, new_tuple)); return true; } absl::StatusOr<bool> ConvPaddingLegalization::RunOnComputation( HloComputation* computation) { bool changed = false; std::vector<HloCustomCallInstruction*> convs; for (auto* instr : computation->instructions()) { if (IsCustomCallToDnnConvolution(*instr)) { convs.push_back(Cast<HloCustomCallInstruction>(instr)); } } for (HloCustomCallInstruction* instruction : convs) { TF_ASSIGN_OR_RETURN(auto kind, GetCudnnConvKind(instruction)); changed |= [&] { switch (kind) { case CudnnConvKind::kForward: case CudnnConvKind::kForwardActivation: case CudnnConvKind::kForwardGraph: return CanonicalizeForwardConvolution(instruction); case CudnnConvKind::kBackwardInput: return CanonicalizeBackwardInputConvolution(instruction); case CudnnConvKind::kBackwardFilter: return CanonicalizeBackwardFilterConvolution(instruction); } }(); } return changed; } absl::StatusOr<bool> ConvPaddingLegalization::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool changed = false; for (HloComputation* computation : module->MakeNonfusionComputations(execution_threads)) { TF_ASSIGN_OR_RETURN(bool result, RunOnComputation(computation)); changed |= result; } return changed; } } }
#include "xla/service/gpu/transforms/conv_padding_legalization.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/service/gpu/cublas_cudnn.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/test.h" #include "xla/tests/hlo_test_base.h" #include "xla/xla_data.pb.h" #include "tsl/platform/test.h" namespace xla { namespace gpu { namespace { namespace m = ::xla::match; using ConvPaddingLegalizationTest = HloTestBase; TEST_F(ConvPaddingLegalizationTest, BackwardInputConvolve) { auto module = ParseAndReturnVerifiedModule(R"( HloModule convolution_module ENTRY %convolution (operand f64[2,2,2,3]{3,2,1,0}) -> (f64[2,2,4,4]{3,2,1,0}, u8[0]) { %operand = f64[2,2,2,3]{3,2,1,0} parameter(0) %kernel = f64[2,3,2,3]{3,2,1,0} constant( { { { { 0.29629629629629628, 0.30246913580246915, 0.30864197530864196 }, { 0.31481481481481483, 0.32098765432098764, 0.3271604938271605 } }, { { 0.25925925925925924, 0.26543209876543211, 0.27160493827160492 }, { 0.27777777777777779, 0.2839506172839506, 0.29012345679012347 } }, { { 0.22222222222222221, 0.22839506172839505, 0.23456790123456789 }, { 0.24074074074074073, 0.24691358024691357, 0.25308641975308643 } } }, { { { 0.18518518518518517, 0.19135802469135801, 0.19753086419753085 }, { 0.20370370370370369, 0.20987654320987653, 0.21604938271604937 } }, { { 0.14814814814814814, 0.15432098765432098, 0.16049382716049382 }, { 0.16666666666666666, 0.1728395061728395, 0.17901234567901234 } }, { { 0.1111111111111111, 0.11728395061728394, 0.12345679012345678 }, { 0.12962962962962962, 0.13580246913580246, 0.1419753086419753 } } } }) %reverse = f64[2,3,2,3]{3,2,1,0} reverse(%kernel), dimensions={0,1} ROOT %custom-call = (f64[2,2,4,4]{3,2,1,0}, u8[0]{0}) custom-call(f64[2,2,2,3]{3,2,1,0} %operand, f64[2,3,2,3]{3,2,1,0} %reverse), window={size=2x3 stride=2x2 pad=0_0x0_1}, dim_labels=bf01_01io->b01f, custom_call_target="__cudnn$convBackwardInput", backend_config="{\"algorithm\":\"0\",\"tensor_ops_enabled\":false,\"conv_result_scale\":1,\"activation_mode\":\"0\",\"side_input_scale\":0}" } )") .value(); ASSERT_TRUE(ConvPaddingLegalization().Run(module.get()).value()); auto root = module->entry_computation()->root_instruction(); EXPECT_THAT(root, GmockMatch(m::Tuple( m::Slice(m::GetTupleElement( m::CustomCall({kCudnnConvBackwardInputCallTarget}, m::Op(), m::Reverse(m::Constant())), 0)), m::GetTupleElement()))); auto slice = root->operand(0); Shape expected_slice_shape = ShapeUtil::MakeShape(F64, {2, 2, 4, 4}); EXPECT_TRUE(ShapeUtil::Equal(slice->shape(), expected_slice_shape)); auto conv = slice->operand(0); Shape expected_conv_shape = ShapeUtil::MakeShape(F64, {2, 2, 4, 5}); EXPECT_TRUE(ShapeUtil::Equal(conv->shape(), expected_conv_shape)); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/conv_padding_legalization.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/conv_padding_legalization_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
7f4f04cd-ce97-42db-8c4c-727b29e637dd
cpp
tensorflow/tensorflow
cudnn_fused_conv_rewriter
third_party/xla/xla/service/gpu/transforms/cudnn_fused_conv_rewriter.cc
third_party/xla/xla/service/gpu/transforms/cudnn_fused_conv_rewriter_test.cc
#include "xla/service/gpu/transforms/cudnn_fused_conv_rewriter.h" #include <algorithm> #include <array> #include <cstdint> #include <functional> #include <limits> #include <optional> #include <string> #include <tuple> #include <utility> #include <variant> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/container/inlined_vector.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" #include "absl/strings/str_join.h" #include "absl/strings/string_view.h" #include "xla/comparison_util.h" #include "xla/debug_options_flags.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/literal.h" #include "xla/primitive_util.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/cublas_cudnn.h" #include "xla/service/hlo_creation_utils.h" #include "xla/service/pattern_matcher.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/stream_executor/device_description.h" #include "xla/stream_executor/dnn.h" #include "xla/stream_executor/semantic_version.h" #include "xla/stream_executor/stream_executor.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/ml_dtypes.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { namespace m = match; bool IsConvCustomCall(const HloInstruction* instr) { return instr->opcode() == HloOpcode::kCustomCall && (instr->custom_call_target() == kCudnnConvForwardCallTarget || instr->custom_call_target() == kCudnnConvBiasActivationForwardCallTarget); } bool IsConvDepthwise(const HloInstruction* instr) { int64_t feature_group_count = instr->feature_group_count(); if (feature_group_count == 1) { return false; } const HloInstruction* input = instr->operand(0); int64_t input_feature_dimension = instr->convolution_dimension_numbers().input_feature_dimension(); int64_t input_feature_count = input->shape().dimensions(input_feature_dimension); return input_feature_count == feature_group_count; } bool IsNonDepthwiseConvCustomCall(const HloInstruction* instr) { return IsConvCustomCall(instr) && !IsConvDepthwise(instr); } bool IsROCm(se::GpuComputeCapability cc) { return std::holds_alternative<se::RocmComputeCapability>(cc); } bool ShouldUseCudnnRuntimeFusion(const DebugOptions& debug_opts, se::GpuComputeCapability cc) { const auto* cuda_cc = std::get_if<se::CudaComputeCapability>(&cc); if (cuda_cc != nullptr) return debug_opts.xla_gpu_use_runtime_fusion() && cuda_cc->IsAtLeast(7, 5); else return true; } bool IsSuitableForCudnnRuntimeFusion(HloInstruction* conv) { if (conv->operands().size() > 3) { return false; } if (conv->operand(0)->shape().element_type() != F16) { return false; } const Shape& shape = conv->operand(1)->shape(); int64_t num_input_features = shape.dimensions( conv->convolution_dimension_numbers().kernel_input_feature_dimension()); int64_t num_output_features = shape.dimensions( conv->convolution_dimension_numbers().kernel_output_feature_dimension()); if (num_input_features % 2 != 0 || num_output_features % 2 != 0) { return false; } return true; } bool IsLosslesslyConvertibleTo(const HloInstruction* instr, PrimitiveType dst_ty) { if (instr->shape().element_type() == dst_ty) { return true; } if (Match(instr, m::Convert(m::Op().WithElementType(dst_ty)))) { return primitive_util::CastPreservesValues(dst_ty, instr->shape().element_type()); } if (instr->opcode() == HloOpcode::kConstant) { if (!instr->shape().IsArray()) { return false; } PrimitiveType orig_ty = instr->shape().element_type(); absl::StatusOr<Literal> converted1 = instr->literal().Convert(dst_ty); if (!converted1.ok()) { return false; } absl::StatusOr<Literal> converted2 = converted1->Convert(orig_ty); if (!converted2.ok()) { return false; } return instr->literal() == *converted2; } if (instr->opcode() == HloOpcode::kBroadcast || instr->opcode() == HloOpcode::kReshape || instr->opcode() == HloOpcode::kTranspose) { return IsLosslesslyConvertibleTo(instr->operand(0), dst_ty); } return false; } bool IsLosslesslyConvertibleToS8(const HloInstruction* instr) { return IsLosslesslyConvertibleTo(instr, S8); } bool IsLosslesslyConvertibleToF16(const HloInstruction* instr) { return IsLosslesslyConvertibleTo(instr, F16); } absl::StatusOr<HloInstruction*> EnsureIsConvBiasActivation( HloInstruction* conv) { CHECK_EQ(conv->opcode(), HloOpcode::kCustomCall); if (conv->custom_call_target() == kCudnnConvBiasActivationForwardCallTarget) { return conv; } if (conv->custom_call_target() == kCudnnConvForwardCallTarget) { HloComputation* comp = conv->parent(); const Shape& shape = conv->shape().tuple_shapes(0); int64_t num_output_features = shape.dimensions( conv->convolution_dimension_numbers().output_feature_dimension()); PrimitiveType bias_ty; if (primitive_util::IsIntegralType(shape.element_type())) { bias_ty = F32; } else { bias_ty = shape.element_type(); } auto bias = BroadcastZeros(comp, bias_ty, {num_output_features}); absl::InlinedVector<HloInstruction*, 3> new_operands( conv->operands().begin(), conv->operands().end()); new_operands.push_back(bias); HloInstruction* new_conv = comp->AddInstruction( conv->CloneWithNewOperands(conv->shape(), new_operands)); TF_RETURN_IF_ERROR(comp->ReplaceInstruction(conv, new_conv)); new_conv->set_custom_call_target(kCudnnConvBiasActivationForwardCallTarget); comp->parent()->SetAndUniquifyInstrName(new_conv, "cudnn-conv-bias-activation"); return new_conv; } return FailedPrecondition("Unsupported conv: %s", conv->ToString()); } absl::StatusOr<bool> FuseConvertTypeIntoConv(HloComputation* comp, PrimitiveType conv_type, PrimitiveType cvt_type) { bool changed = false; for (auto instr : comp->MakeInstructionPostOrder()) { HloInstruction* conv = nullptr; auto tuple_elem = m::GetTupleElement(m::Op(&conv).WithPredicate(IsConvCustomCall), 0) .WithElementType(conv_type); auto pattern = m::Convert(tuple_elem.WithOneUser()).WithElementType(cvt_type); if (!Match(instr, pattern)) { continue; } if (!ConsumeFuel("cudnn-fused-convolution-rewriter", [&] { return absl::StrCat("FuseConvertTypeIntoConv: ", conv->ToString()); })) { continue; } Shape new_shape = conv->shape(); new_shape.mutable_tuple_shapes(0)->set_element_type(cvt_type); HloInstruction* new_conv = comp->AddInstruction(conv->CloneWithNewShape(new_shape)); comp->parent()->SetAndUniquifyInstrName(new_conv, conv->name()); TF_ASSIGN_OR_RETURN(HloInstruction * new_gte, MakeGetTupleElementHlo(new_conv, 0)); TF_RETURN_IF_ERROR(comp->ReplaceInstruction(instr, new_gte)); changed = true; } return changed; } struct ConvConvertTypes { PrimitiveType convolution_type; PrimitiveType conversion_type; }; absl::StatusOr<bool> FuseRemoveConvertInConv(HloComputation* comp) { bool changed = false; std::array<ConvConvertTypes, 3> types{{ {S32, F32}, {S8, F32}, {F32, S8}, }}; for (auto [conv_type, cvt_type] : types) { TF_ASSIGN_OR_RETURN(bool curr_change, FuseConvertTypeIntoConv(comp, conv_type, cvt_type)); changed |= curr_change; } return changed; } absl::StatusOr<bool> FuseConvAlpha(HloComputation* comp) { bool changed = false; for (auto instr : comp->MakeInstructionPostOrder()) { HloInstruction* conv = nullptr; HloInstruction* gte = nullptr; HloInstruction* alpha = nullptr; auto pattern = m::MultiplyAnyOrder( m::GetTupleElement( &gte, m::Op(&conv).WithPredicate(IsNonDepthwiseConvCustomCall), 0) .WithOneUse(), m::Broadcast(m::ConstantEffectiveScalar(&alpha))); if (!Match(instr, pattern)) { continue; } PrimitiveType alpha_ty = gte->shape().element_type() == F64 ? F64 : F32; if (!IsLosslesslyConvertibleTo(alpha, alpha_ty)) { continue; } TF_ASSIGN_OR_RETURN(auto gpu_config, conv->backend_config<GpuBackendConfig>()); CudnnConvBackendConfig& config = *gpu_config.mutable_cudnn_conv_backend_config(); if (config.conv_result_scale() != 1) { continue; } if (!ConsumeFuel("cudnn-fused-convolution-rewriter", [&] { return absl::StrCat("FuseConvAlpha: ", conv->ToString()); })) { continue; } TF_ASSIGN_OR_RETURN(conv, EnsureIsConvBiasActivation(conv)); TF_ASSIGN_OR_RETURN(Literal alpha_f64, alpha->literal().Convert(F64)); config.set_conv_result_scale(alpha_f64.GetFirstElement<double>()); TF_RETURN_IF_ERROR(conv->set_backend_config(gpu_config)); TF_RETURN_IF_ERROR(conv->parent()->ReplaceInstruction(instr, gte)); changed = true; } return changed; } class GraphString { public: GraphString() = default; bool AppendOp(std::string op_name, HloInstruction* op, std::vector<HloInstruction*> operands = {}) { std::optional<int64_t> operand_uid; int num_operands_in_graph = 0; for (HloInstruction* operand : operands) { if (OpInGraph(operand->unique_id())) { num_operands_in_graph++; if (num_operands_in_graph > 1) { return false; } operand_uid = operand->unique_id(); } } graph_.emplace_back(OpDescriptor( {op->unique_id(), op->shape().element_type(), op_name, operand_uid})); return true; } void ChangeDataType(PrimitiveType type) { DCHECK(!graph_.empty()); graph_.back().output_type = type; } std::string Graph() const { std::string graph; for (OpDescriptor op : graph_) { graph.append(std::to_string(op.uid)); graph.append(":[" + primitive_util::LowercasePrimitiveTypeName(op.output_type) + "]"); graph.append(op.name); graph.append("("); if (op.operand.has_value()) { graph.append(std::to_string(*op.operand)); } graph.append(");"); } return graph; } bool OpInGraph(int64_t uid, std::string op_name = "") const { auto op_filter = [&](OpDescriptor op) -> bool { if (op_name.empty()) { return op.uid == uid; } else { return op.uid == uid && op.name == op_name; } }; return std::find_if(graph_.begin(), graph_.end(), op_filter) != graph_.end(); } private: struct OpDescriptor { int64_t uid; PrimitiveType output_type; std::string name; std::optional<int64_t> operand; }; std::vector<OpDescriptor> graph_; }; bool IsF8Type(const HloInstruction* instr) { return primitive_util::IsF8Type(instr->shape().element_type()); } bool IsScalar(const HloInstruction* instr) { return ShapeUtil::IsScalar(instr->shape()); } std::optional<PrimitiveType> IsSaturatingCastToF8(HloInstruction* instr) { HloInstruction *op, *clamp_lower, *clamp_upper; if (Match(instr, m::Convert( &op, m::Clamp(m::Broadcast(m::ConstantScalar(&clamp_lower)), m::Op(), m::Broadcast(m::ConstantScalar(&clamp_upper))))) && ((op->shape().element_type() == F8E4M3FN && clamp_lower->literal().IsAllFloat(static_cast<float>( std::numeric_limits<tsl::float8_e4m3fn>::lowest())) && clamp_upper->literal().IsAllFloat(static_cast<float>( std::numeric_limits<tsl::float8_e4m3fn>::max()))) || (op->shape().element_type() == F8E5M2 && clamp_lower->literal().IsAllFloat(static_cast<float>( std::numeric_limits<tsl::float8_e5m2>::lowest())) && clamp_upper->literal().IsAllFloat(static_cast<float>( std::numeric_limits<tsl::float8_e5m2>::max()))))) { return op->shape().element_type(); } return std::nullopt; } bool AppliesMaxReduce(HloInstruction* op) { HloComputation* reduce_comp = op->to_apply(); HloInstruction* reduce_comp_root = reduce_comp->root_instruction(); return ShapeUtil::IsScalar(op->shape()) && ShapeUtil::IsScalar(op->operand(1)->shape()) && op->operand(1)->IsConstant() && op->operand(1)->literal().GetAsDouble({}) <= 0. && reduce_comp_root->opcode() == HloOpcode::kMaximum && reduce_comp_root->operand(0)->opcode() == HloOpcode::kParameter && reduce_comp_root->operand(1)->opcode() == HloOpcode::kParameter; } void CaptureConvGraphRecursive(HloInstruction* instr, std::vector<HloInstruction*>& operands, std::vector<HloInstruction*>& aux_outputs, GraphString& graph_string, absl::flat_hash_set<int>& visited_instrs, HloInstruction*& final_instr) { if (!visited_instrs.emplace(instr->unique_id()).second) { return; } final_instr = instr; GraphString init_graph_string = graph_string; std::vector<HloInstruction*> init_operands = operands, init_aux_outputs = aux_outputs; int num_linear_users = 0, num_nonlinear_users = 0; for (HloInstruction* user : instr->users()) { HloInstruction *op, *operand0, *operand1; if (Match(user, m::AddAnyOrder(&op, m::Op(&operand0), m::Op(&operand1)))) { if (graph_string.AppendOp("add", op, {operand0, operand1})) { operands.push_back(operand0 == instr ? operand1 : operand0); num_linear_users++; CaptureConvGraphRecursive(user, operands, aux_outputs, graph_string, visited_instrs, final_instr); } continue; } if (Match(user, m::MultiplyAnyOrder(&op, m::Op(&operand0), m::Broadcast(m::Op(&operand1)))) && ShapeUtil::IsScalar(operand1->shape())) { if (graph_string.AppendOp("scale", op, {operand0, operand1})) { operands.push_back(operand1); num_linear_users++; CaptureConvGraphRecursive(user, operands, aux_outputs, graph_string, visited_instrs, final_instr); } continue; } if (Match(user, m::Divide(&op, m::Op(&operand0), m::Broadcast(m::Op(&operand1)))) && ShapeUtil::IsScalar(operand1->shape())) { if (graph_string.AppendOp("invscale", op, {operand0, operand1})) { operands.push_back(operand1); num_linear_users++; CaptureConvGraphRecursive(user, operands, aux_outputs, graph_string, visited_instrs, final_instr); } continue; } if (Match(user, m::MaximumAnyOrder(&op, m::Op(&operand0), m::Broadcast(m::ConstantScalar(0))))) { if (graph_string.AppendOp("relu", op, {operand0})) { num_linear_users++; CaptureConvGraphRecursive(user, operands, aux_outputs, graph_string, visited_instrs, final_instr); } continue; } if (Match(user, m::Reduce(&op, m::Op(&operand0), m::Op())) && graph_string.OpInGraph(operand0->unique_id(), "relu") && AppliesMaxReduce(op)) { if (graph_string.AppendOp("amax", op, {operand0})) { aux_outputs.emplace_back(op); num_nonlinear_users++; } continue; } if (!user->users().empty()) { HloInstruction* users_user = user->users()[0]; std::optional<PrimitiveType> f8_type = IsSaturatingCastToF8(users_user); if (f8_type.has_value()) { graph_string.ChangeDataType(f8_type.value()); num_linear_users++; CaptureConvGraphRecursive(users_user, operands, aux_outputs, graph_string, visited_instrs, final_instr); continue; } if (Match(users_user, m::Reduce(&op, m::Abs(m::Op(&operand0)), m::Op())) && AppliesMaxReduce(op)) { if (graph_string.AppendOp("amax", op, {operand0})) { aux_outputs.emplace_back(op); num_nonlinear_users++; } continue; } } } if (num_linear_users > 1 || num_nonlinear_users > 1 || num_linear_users + num_nonlinear_users < instr->user_count()) { graph_string = init_graph_string; operands = init_operands; aux_outputs = init_aux_outputs; final_instr = instr; } } absl::StatusOr< std::tuple<std::vector<HloInstruction*>, std::vector<HloInstruction*>, GraphString, HloInstruction*>> CaptureConvGraph(HloInstruction* instr, HloInstruction* convolution, HloInstruction* wide_input, HloInstruction* wide_filter, HloInstruction* input_scale, HloInstruction* filter_scale, bool x_mult_scale, bool w_mult_scale) { GraphString graph_string; graph_string.AppendOp("conv", instr); HloInstruction *input_scaled_conv, *filter_scaled_conv; if (input_scale) { TF_RETURN_IF_ERROR(convolution->ReplaceOperandWith(0, wide_input)); HloInstruction* bcast_input_scale = instr->AddInstruction( HloInstruction::CreateBroadcast(instr->shape(), input_scale, {})); input_scaled_conv = instr->AddInstruction(HloInstruction::CreateBinary( instr->shape(), x_mult_scale ? HloOpcode::kMultiply : HloOpcode::kDivide, instr, bcast_input_scale)); TF_RETURN_IF_ERROR(instr->ReplaceAllUsesWith(input_scaled_conv)); } if (filter_scale) { TF_RETURN_IF_ERROR(convolution->ReplaceOperandWith(1, wide_filter)); HloInstruction* bcast_filter_scale = instr->AddInstruction( HloInstruction::CreateBroadcast(instr->shape(), filter_scale, {})); filter_scaled_conv = instr->AddInstruction(HloInstruction::CreateBinary( instr->shape(), w_mult_scale ? HloOpcode::kMultiply : HloOpcode::kDivide, input_scale ? input_scaled_conv : instr, bcast_filter_scale)); TF_RETURN_IF_ERROR((input_scale ? input_scaled_conv : instr) ->ReplaceAllUsesWith(filter_scaled_conv)); } std::vector<HloInstruction*> operands, aux_outputs; absl::flat_hash_set<int> visited_instrs; HloInstruction* final_instr; CaptureConvGraphRecursive(instr, operands, aux_outputs, graph_string, visited_instrs, final_instr); return std::make_tuple(operands, aux_outputs, graph_string, final_instr); } absl::StatusOr<bool> F8GraphConv(HloComputation* comp, se::CudaComputeCapability cc, se::dnn::VersionInfo dnn_version, const se::SemanticVersion& toolkit_version) { bool changed = false; if (dnn_version < se::dnn::VersionInfo(8, 9, 0)) { return false; } if (toolkit_version < se::SemanticVersion{12, 0, 0}) { return false; } if (!cc.IsAtLeast(se::CudaComputeCapability::HOPPER)) { return false; } for (auto instr : comp->MakeInstructionPostOrder()) { HloInstruction *convolution, *gte, *input, *filter, *input_scale = nullptr, *filter_scale = nullptr, *input_scale_op = nullptr, *filter_scale_op = nullptr, *wide_input = nullptr, *wide_filter = nullptr; auto conv_operand_maybe_scaled = [](HloInstruction** operand, HloInstruction** wide_operand, HloInstruction** scale_op, HloInstruction** scale) { return m::AnyOf<HloInstruction>( m::Op(operand).WithPredicate(IsF8Type), m::Convert(wide_operand, m::Op(operand).WithPredicate(IsF8Type)), m::Divide( scale_op, m::Convert(wide_operand, m::Op(operand).WithPredicate(IsF8Type)), m::Broadcast(m::Op(scale).WithPredicate(IsScalar))), m::MultiplyAnyOrder( scale_op, m::Convert(wide_operand, m::Op(operand).WithPredicate(IsF8Type)), m::Broadcast(m::Op(scale).WithPredicate(IsScalar)))); }; auto pattern = m::GetTupleElement( &gte, m::CustomCall( &convolution, conv_operand_maybe_scaled(&input, &wide_input, &input_scale_op, &input_scale), conv_operand_maybe_scaled(&filter, &wide_filter, &filter_scale_op, &filter_scale)) .WithPredicate(IsConvCustomCall), 0); if (Match(instr, pattern)) { if (!ConsumeFuel("cudnn-fused-convolution-rewriter", [&] { return absl::StrCat("F8GraphConv: ", convolution->ToString()); })) { continue; } std::vector<HloInstruction*> operands, aux_outputs; GraphString graph_string; HloInstruction* final_instr; TF_ASSIGN_OR_RETURN( std::tie(operands, aux_outputs, graph_string, final_instr), CaptureConvGraph( instr, convolution, wide_input, wide_filter, input_scale, filter_scale, input_scale_op ? input_scale_op->opcode() == HloOpcode::kMultiply : false, filter_scale_op ? filter_scale_op->opcode() == HloOpcode::kMultiply : false)); TF_ASSIGN_OR_RETURN(auto gpu_config, convolution->backend_config<GpuBackendConfig>()); CudnnConvBackendConfig& config = *gpu_config.mutable_cudnn_conv_backend_config(); config.set_serialized_graph(graph_string.Graph()); operands.insert(operands.begin(), input); operands.insert(operands.begin() + 1, filter); std::vector<Shape> output_shapes; output_shapes.emplace_back(ShapeUtil::ChangeElementType( ShapeUtil::GetTupleElementShape(convolution->shape(), 0), final_instr->shape().element_type())); for (HloInstruction* aux_output : aux_outputs) { output_shapes.emplace_back(aux_output->shape()); } output_shapes.emplace_back( ShapeUtil::GetTupleElementShape(convolution->shape(), 1)); HloInstruction* new_convolution = comp->AddInstruction(convolution->CloneWithNewOperands( ShapeUtil::MakeTupleShape(output_shapes), operands)); new_convolution->set_custom_call_target(kCudnnConvForwardGraphCallTarget); TF_RETURN_IF_ERROR(new_convolution->set_backend_config(gpu_config)); TF_ASSIGN_OR_RETURN(HloInstruction * new_gte, MakeGetTupleElementHlo(new_convolution, 0)); TF_RETURN_IF_ERROR(comp->ReplaceInstruction(final_instr, new_gte)); for (int i = 0; i < aux_outputs.size(); ++i) { TF_ASSIGN_OR_RETURN(HloInstruction * new_gte, MakeGetTupleElementHlo(new_convolution, i + 1)); TF_RETURN_IF_ERROR(comp->ReplaceInstruction(aux_outputs[i], new_gte)); } changed = true; } } return changed; } absl::StatusOr<bool> FuseBiasOrSideInput(HloComputation* comp) { bool changed = false; for (auto instr : comp->MakeInstructionPostOrder()) { HloInstruction* conv = nullptr; HloInstruction* gte = nullptr; HloInstruction* addend = nullptr; auto pattern = m::AddAnyOrder( m::GetTupleElement(&gte, m::Op(&conv) .WithPredicate(IsNonDepthwiseConvCustomCall) .WithOneUse(), 0) .WithOneUse(), m::Op(&addend)); if (!Match(instr, pattern)) { continue; } if (!ConsumeFuel("cudnn-fused-convolution-rewriter", [&] { return absl::StrCat("FuseBiasOrSideInput: ", conv->ToString()); })) { continue; } if (conv->custom_call_target() == kCudnnConvForwardCallTarget) { TF_ASSIGN_OR_RETURN(conv, EnsureIsConvBiasActivation(conv)); } TF_ASSIGN_OR_RETURN(auto gpu_config, conv->backend_config<GpuBackendConfig>()); CudnnConvBackendConfig& config = *gpu_config.mutable_cudnn_conv_backend_config(); if (config.activation_mode() != se::dnn::kNone) { continue; } bool can_accept_bias = Match(conv->operand(2), m::Broadcast(m::ConstantEffectiveScalar(0))); bool can_accept_side_input = conv->operand_count() < 4; PrimitiveType conv_ty = gte->shape().element_type(); PrimitiveType bias_ty = primitive_util::IsFloatingPointType(conv_ty) ? conv_ty : F32; bool addend_may_be_rank1_bias = addend->opcode() == HloOpcode::kBroadcast && addend->dimensions().size() == 1 && addend->dimensions(0) == conv->convolution_dimension_numbers().output_feature_dimension() && IsLosslesslyConvertibleTo(addend, bias_ty); bool addend_may_be_rank0_bias = addend->opcode() == HloOpcode::kBroadcast && addend->dimensions().empty() && IsLosslesslyConvertibleTo(addend, bias_ty); absl::InlinedVector<HloInstruction*, 4> new_operands( conv->operands().begin(), conv->operands().end()); if (can_accept_bias && addend_may_be_rank1_bias) { new_operands[2] = MakeConvertToHlo(addend->mutable_operand(0), bias_ty, &addend->operand(0)->metadata()); } else if (can_accept_bias && addend_may_be_rank0_bias) { new_operands[2] = MakeBroadcastHlo( MakeConvertToHlo(addend->mutable_operand(0), bias_ty, &addend->operand(0)->metadata()), {}, {gte->shape().dimensions(conv->convolution_dimension_numbers() .output_feature_dimension())}); } else if (can_accept_side_input) { CHECK_EQ(new_operands.size(), 3); new_operands.push_back(addend); config.set_side_input_scale(1); } else { continue; } HloInstruction* new_conv = comp->AddInstruction( conv->CloneWithNewOperands(conv->shape(), new_operands)); comp->parent()->SetAndUniquifyInstrName(new_conv, conv->name()); TF_RETURN_IF_ERROR(new_conv->set_backend_config(gpu_config)); TF_ASSIGN_OR_RETURN(HloInstruction * new_instr, MakeGetTupleElementHlo(new_conv, 0)); TF_RETURN_IF_ERROR(comp->ReplaceInstruction(instr, new_instr)); changed = true; } return changed; } absl::StatusOr<bool> FuseSideInputAlpha(HloComputation* comp) { bool changed = false; for (HloInstruction* instr : comp->MakeInstructionPostOrder()) { HloInstruction* conv; HloInstruction* side_input; auto pattern = m::Op(&conv) .WithPredicate(IsConvCustomCall) .WithOperand(3, m::Op(&side_input)); if (!Match(instr, pattern)) { continue; } TF_ASSIGN_OR_RETURN(auto gpu_config, conv->backend_config<GpuBackendConfig>()); CudnnConvBackendConfig& config = *gpu_config.mutable_cudnn_conv_backend_config(); if (config.side_input_scale() != 1) { continue; } HloInstruction* before_reshape = side_input; while (before_reshape->opcode() == HloOpcode::kReshape || before_reshape->opcode() == HloOpcode::kTranspose) { before_reshape = before_reshape->mutable_operand(0); } PrimitiveType conv_ty = conv->shape().tuple_shapes(0).element_type(); PrimitiveType alpha_ty = conv_ty == F64 ? F64 : F32; HloInstruction* base; HloInstruction* alpha; if (!Match( before_reshape, m::MultiplyAnyOrder( m::Op(&base), m::Broadcast(m::ConstantEffectiveScalar(&alpha).WithPredicate( [&](const HloInstruction* instr) { return IsLosslesslyConvertibleTo(instr, alpha_ty); }))))) { continue; } if (!ConsumeFuel("cudnn-fused-convolution-rewriter", [&] { return absl::StrCat("FuseSideInputAlpha: ", conv->ToString()); })) { continue; } std::function<HloInstruction*(const HloInstruction*)> clone = [&](const HloInstruction* instr) { if (instr == before_reshape) { return base; } CHECK(instr->opcode() == HloOpcode::kReshape || instr->opcode() == HloOpcode::kTranspose) << "Must be reshape or transpose: " << instr->ToString(); return comp->AddInstruction(instr->CloneWithNewOperands( instr->shape(), {clone(instr->operand(0))})); }; absl::InlinedVector<HloInstruction*, 4> new_operands( conv->operands().begin(), conv->operands().end()); new_operands[3] = clone(side_input); HloInstruction* new_conv = comp->AddInstruction( conv->CloneWithNewOperands(conv->shape(), new_operands)); comp->parent()->SetAndUniquifyInstrName(new_conv, conv->name()); TF_ASSIGN_OR_RETURN(Literal alpha_f64, alpha->literal().Convert(F64)); config.set_side_input_scale(alpha_f64.GetFirstElement<double>()); TF_RETURN_IF_ERROR(new_conv->set_backend_config(gpu_config)); TF_RETURN_IF_ERROR(comp->ReplaceInstruction(conv, new_conv)); changed = true; } return changed; } absl::StatusOr<bool> FuseElu(HloComputation* comp, se::GpuComputeCapability cc) { if (!ShouldUseCudnnRuntimeFusion(comp->parent()->config().debug_options(), cc)) { return false; } bool changed = false; for (HloInstruction* instr : comp->MakeInstructionPostOrder()) { HloInstruction *gte1, *gte2, *gte3; HloInstruction* conv; HloInstruction* expm1; if (!Match(instr, m::Select(m::Compare(m::GetTupleElement(&gte1, m::Op()), m::Broadcast(m::ConstantEffectiveScalar(0))) .WithComparisonDirection(ComparisonDirection::kGt) .WithOneUse(), m::GetTupleElement( &gte2, m::Op(&conv) .WithPredicate(IsNonDepthwiseConvCustomCall) .WithOneUse(), 0) .WithElementType(F16), m::Op(&expm1) .WithOpcode(HloOpcode::kExpm1) .WithOperand(0, m::GetTupleElement(&gte3, m::Op())) .WithOneUse()))) { continue; } if (gte1 != gte2 || gte2 != gte3 || gte1->user_count() != 3) { continue; } if (!IsSuitableForCudnnRuntimeFusion(conv)) { continue; } TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config, conv->backend_config<GpuBackendConfig>()); CudnnConvBackendConfig& config = *gpu_config.mutable_cudnn_conv_backend_config(); if (config.activation_mode() != se::dnn::kNone) { continue; } if (!ConsumeFuel("cudnn-fused-convolution-rewriter", [&] { return absl::StrCat("FuseElu: ", conv->ToString()); })) { continue; } TF_ASSIGN_OR_RETURN(conv, EnsureIsConvBiasActivation(conv)); config.set_activation_mode(se::dnn::kElu); TF_RETURN_IF_ERROR(conv->set_backend_config(gpu_config)); TF_RETURN_IF_ERROR(comp->ReplaceInstruction(instr, gte1)); changed = true; } return changed; } absl::StatusOr<bool> FuseRelu(HloComputation* comp) { bool changed = false; for (HloInstruction* instr : comp->MakeInstructionPostOrder()) { HloInstruction* gte; HloInstruction* conv; if (!Match(instr, m::MaximumAnyOrder( m::Broadcast(m::ConstantEffectiveScalar(0)), m::GetTupleElement( &gte, m::Op(&conv) .WithPredicate(IsNonDepthwiseConvCustomCall) .WithOneUse()) .WithOneUse()))) { continue; } TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config, conv->backend_config<GpuBackendConfig>()); CudnnConvBackendConfig& config = *gpu_config.mutable_cudnn_conv_backend_config(); if (config.activation_mode() != se::dnn::kNone) { continue; } if (!ConsumeFuel("cudnn-fused-convolution-rewriter", [&] { return absl::StrCat("FuseRelu: ", conv->ToString()); })) { continue; } TF_ASSIGN_OR_RETURN(conv, EnsureIsConvBiasActivation(conv)); config.set_activation_mode(se::dnn::kRelu); TF_RETURN_IF_ERROR(conv->set_backend_config(gpu_config)); TF_RETURN_IF_ERROR(comp->ReplaceInstruction(instr, gte)); changed = true; } return changed; } absl::StatusOr<bool> FuseRelu6(HloComputation* comp, se::GpuComputeCapability cc) { if (!ShouldUseCudnnRuntimeFusion(comp->parent()->config().debug_options(), cc)) { return false; } bool changed = false; for (HloInstruction* instr : comp->MakeInstructionPostOrder()) { HloInstruction *gte, *conv; if (!Match( instr, m::Clamp(m::Broadcast(m::ConstantEffectiveScalar(0)), m::GetTupleElement( &gte, m::Op(&conv) .WithPredicate(IsNonDepthwiseConvCustomCall) .WithOneUse()) .WithElementType(F16) .WithOneUse(), m::Broadcast(m::ConstantEffectiveScalar(6))))) { continue; } TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config, conv->backend_config<GpuBackendConfig>()); CudnnConvBackendConfig& config = *gpu_config.mutable_cudnn_conv_backend_config(); if (config.activation_mode() != se::dnn::kNone) { continue; } if (!IsSuitableForCudnnRuntimeFusion(conv)) { continue; } if (!ConsumeFuel("cudnn-fused-convolution-rewriter", [&] { return absl::StrCat("FuseRelu6: ", conv->ToString()); })) { continue; } TF_ASSIGN_OR_RETURN(conv, EnsureIsConvBiasActivation(conv)); config.set_activation_mode(se::dnn::kRelu6); TF_RETURN_IF_ERROR(conv->set_backend_config(gpu_config)); TF_RETURN_IF_ERROR(comp->ReplaceInstruction(instr, gte)); changed = true; } return changed; } absl::StatusOr<bool> FuseLeakyRelu(HloComputation* comp, se::GpuComputeCapability cc) { if (!ShouldUseCudnnRuntimeFusion(comp->parent()->config().debug_options(), cc)) { return false; } bool changed = false; for (HloInstruction* instr : comp->MakeInstructionPostOrder()) { HloInstruction *gte1, *gte2, *gte3, *conv, *alpha; if (!Match(instr, m::Select( m::Compare(m::GetTupleElement(&gte1, m::Op()), m::Broadcast(m::ConstantEffectiveScalar(0))) .WithComparisonDirection(ComparisonDirection::kGt) .WithOneUse(), m::GetTupleElement( &gte2, m::Op(&conv) .WithPredicate(IsNonDepthwiseConvCustomCall) .WithOneUse()) .WithElementType(F16), m::Multiply(m::GetTupleElement(&gte3, m::Op()), m::Broadcast(m::ConstantEffectiveScalar(&alpha))) .WithOneUse()))) { continue; } if (gte1 != gte2 || gte2 != gte3 || gte1->user_count() != 3) { continue; } TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config, conv->backend_config<GpuBackendConfig>()); CudnnConvBackendConfig& config = *gpu_config.mutable_cudnn_conv_backend_config(); if (config.activation_mode() != se::dnn::kNone) { continue; } if (!IsSuitableForCudnnRuntimeFusion(conv)) { continue; } if (!ConsumeFuel("cudnn-fused-convolution-rewriter", [&] { return absl::StrCat("FuseLeakyRelu: ", conv->ToString()); })) { continue; } TF_ASSIGN_OR_RETURN(conv, EnsureIsConvBiasActivation(conv)); config.set_activation_mode(se::dnn::kLeakyRelu); TF_ASSIGN_OR_RETURN(Literal alpha_f64, alpha->literal().Convert(F64)); config.set_leakyrelu_alpha(alpha_f64.GetFirstElement<double>()); TF_RETURN_IF_ERROR(conv->set_backend_config(gpu_config)); TF_RETURN_IF_ERROR(comp->ReplaceInstruction(instr, gte1)); changed = true; } return changed; } absl::StatusOr<bool> FuseConvertToF16(HloComputation* comp) { bool changed = false; for (HloInstruction* instr : comp->MakeInstructionPostOrder()) { HloInstruction* gte = nullptr; HloInstruction* conv = nullptr; auto f32_convertible_to_f16_pat = m::Op().WithElementType(F32).WithPredicate( IsLosslesslyConvertibleToF16); if (!MatchAndLogIfFailed( instr, "f16 conv", m::Convert( m::GetTupleElement( &gte, m::Op(&conv) .WithPredicate(IsConvCustomCall) .WithOperand(0, f32_convertible_to_f16_pat) .WithOperand(1, f32_convertible_to_f16_pat) .WithOperandIfPresent(2, f32_convertible_to_f16_pat) .WithOperandIfPresent(3, f32_convertible_to_f16_pat), 0) .WithOneUse()) .WithElementType(F16), VLOG_IS_ON(3), m::Op().WithOperand(0, m::GetTupleElement(m::Op().WithPredicate( IsConvCustomCall))))) { continue; } if (!ConsumeFuel("cudnn-fused-convolution-rewriter", [&] { return absl::StrCat("FuseConvertToF16: ", conv->ToString()); })) { continue; } VLOG(2) << "Matched fp16 conv: " << conv->ToString(); absl::InlinedVector<HloInstruction*, 4> new_operands; for (HloInstruction* operand : conv->operands()) { new_operands.push_back( MakeConvertToHlo(operand, F16, &operand->metadata())); } Shape new_shape = conv->shape(); new_shape.mutable_tuple_shapes(0)->set_element_type(F16); HloInstruction* new_conv = comp->AddInstruction( conv->CloneWithNewOperands(new_shape, new_operands)); comp->parent()->SetAndUniquifyInstrName(new_conv, conv->name()); TF_ASSIGN_OR_RETURN(HloInstruction * new_instr, MakeGetTupleElementHlo(new_conv, 0)); TF_RETURN_IF_ERROR(comp->ReplaceInstruction(instr, new_instr)); changed = true; } return changed; } absl::StatusOr<bool> FuseConvertToS8(HloComputation* comp, se::GpuComputeCapability cc) { if (IsROCm(cc)) return false; bool changed = false; for (HloInstruction* instr : comp->MakeInstructionPostOrder()) { HloInstruction* gte = nullptr; HloInstruction* conv = nullptr; auto conv_pattern = m::Op(&conv) .WithPredicate(IsConvCustomCall) .WithOperand(0, m::Op().WithPredicate(IsLosslesslyConvertibleToS8)) .WithOperand(1, m::Op().WithPredicate(IsLosslesslyConvertibleToS8)); PrimitiveType conv_output_ty; if (MatchAndLogIfFailed( instr, "s8->s8 conv", m::Convert(m::Clamp(m::Broadcast(m::ConstantEffectiveScalar(-128)), m::GetTupleElement( &gte, conv_pattern.WithOperandIfPresent( 3, m::Op().WithPredicate( IsLosslesslyConvertibleToS8)), 0) .WithOneUse(), m::Broadcast(m::ConstantEffectiveScalar(127)))) .WithElementType(S8), VLOG_IS_ON(3), m::Convert(m::Clamp(m::Op(), m::GetTupleElement( m::Op().WithPredicate(IsConvCustomCall)), m::Op())) .WithElementType(S8))) { conv_output_ty = S8; } else if (MatchAndLogIfFailed( instr, "s8->f32 conv", m::GetTupleElement(&gte, conv_pattern.WithOperandIfPresent( 3, m::Op().WithElementType(F32)), 0) .WithElementType(F32), VLOG_IS_ON(3), m::GetTupleElement(m::Op().WithPredicate(IsConvCustomCall)) .WithElementType(F32))) { conv_output_ty = F32; } else { continue; } if (!ConsumeFuel("cudnn-fused-convolution-rewriter", [&] { return absl::StrCat("FuseConvertToS8: ", conv->ToString()); })) { continue; } absl::InlinedVector<HloInstruction*, 4> new_operands( conv->operands().begin(), conv->operands().end()); new_operands[0] = MakeConvertToHlo(new_operands[0], S8, &new_operands[0]->metadata()); new_operands[1] = MakeConvertToHlo(new_operands[1], S8, &new_operands[1]->metadata()); if (new_operands.size() >= 4) { new_operands[3] = MakeConvertToHlo(new_operands[3], conv_output_ty, &new_operands[3]->metadata()); } Shape new_shape = conv->shape(); new_shape.mutable_tuple_shapes(0)->set_element_type(conv_output_ty); HloInstruction* new_conv = comp->AddInstruction( conv->CloneWithNewOperands(new_shape, new_operands)); comp->parent()->SetAndUniquifyInstrName(new_conv, conv->name()); TF_ASSIGN_OR_RETURN(HloInstruction * new_instr, MakeGetTupleElementHlo(new_conv, 0)); TF_RETURN_IF_ERROR(comp->ReplaceInstruction(instr, new_instr)); changed = true; } return changed; } absl::Status CheckNoIllegalIntegerConvs(HloComputation* comp) { auto is_integral_not_s8 = [](const Shape& s) { return primitive_util::IsIntegralType(s.element_type()) && s.element_type() != S8; }; std::vector<HloInstruction*> bad_convs; for (HloInstruction* instr : comp->instructions()) { if (!IsConvCustomCall(instr)) { continue; } if (is_integral_not_s8(instr->shape().tuple_shapes(0)) || is_integral_not_s8(instr->operand(0)->shape()) || is_integral_not_s8(instr->operand(1)->shape()) || (instr->operand_count() >= 4 && is_integral_not_s8(instr->operand(3)->shape()))) { bad_convs.push_back(instr); } } if (bad_convs.empty()) { return absl::OkStatus(); } return Unimplemented( R"( Can't lower one or more integer convolutions to idioms supported by CuDNN. CuDNN integer convolutions must have: - s8 input and filter, - f32 bias (if present), - s8 or f32 output, and - s8 side_input (if present) if output is s8. For each of the unsupported convs below, we weren't able to lower one of the operands or the output to the appropriate type. See specific HLO idioms in cudnn_fused_conv_rewriter.h, and see cudnn semantics: https: https: Unsupported convs: %s ******* Full HLO module ******* %s )", absl::StrJoin(bad_convs, "\n", [](std::string* out, HloInstruction* instr) { absl::StrAppend(out, " - ", instr->ToString()); }), comp->parent()->ToString()); } void VlogStats(HloModule* module) { if (!VLOG_IS_ON(1)) { return; } VLOG(1) << "Results of CudnnFusedConvRewriter for " << module->name(); absl::flat_hash_map<std::string, int> stats; for (HloComputation* comp : module->MakeNonfusionComputations()) { for (HloInstruction* instr : comp->instructions()) { if (!Match(instr, m::Op().WithPredicate(IsConvCustomCall))) { continue; } VLOG(3) << instr->ToString(); if (instr->custom_call_target() == kCudnnConvForwardCallTarget) { ++stats["01 non-fused forward convs"]; } else if (instr->custom_call_target() == kCudnnConvBiasActivationForwardCallTarget) { ++stats["02 fused forward convs"]; } PrimitiveType conv_in_ty = instr->operand(0)->shape().element_type(); PrimitiveType conv_out_ty = instr->shape().tuple_shapes(0).element_type(); if (conv_in_ty == F32) { ++stats["10 f32 convs"]; } else if (conv_in_ty == F16) { ++stats["11 f16 convs"]; } else if (conv_in_ty == S8) { if (conv_out_ty == S8) { ++stats["12 s8->s8 convs"]; } else if (conv_out_ty == F32) { ++stats["13 s8->f32 convs"]; } else { LOG(ERROR) << "Unexpected conv: " << instr->ToString(); } } if (instr->operand_count() > 2) { ++stats["20 convs with bias"]; if (Match(instr->operand(2), m::Broadcast(m::ConstantEffectiveScalar(0)))) { ++stats["21 convs with 0 bias"]; } } if (instr->operand_count() > 3) { ++stats["22 convs with side-input"]; } auto gpu_config = instr->backend_config<GpuBackendConfig>(); if (!gpu_config.ok()) { LOG(ERROR) << "Couldn't parse backend config for " << instr->ToString(); continue; } const CudnnConvBackendConfig& config = gpu_config->cudnn_conv_backend_config(); if (config.conv_result_scale() != 1) { ++stats["30 convs with result scale"]; } if (config.side_input_scale() != 0 && config.side_input_scale() != 1) { ++stats["31 convs with side-input scale"]; } ++stats[absl::StrCat( "32 convs with activation mode ", se::dnn::ActivationMode_Name(config.activation_mode()))]; } } std::vector<std::pair<std::string, int>> stats_sorted(stats.begin(), stats.end()); absl::c_sort(stats_sorted); for (const auto& kv : stats_sorted) { VLOG(1) << absl::StreamFormat("%4d %s", kv.second, absl::string_view(kv.first).substr(3)); } } } absl::StatusOr<bool> CudnnFusedConvRewriter::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool any_changed = false; for (HloComputation* comp : module->MakeNonfusionComputations(execution_threads)) { bool changed = false; if (!IsROCm(compute_capability_)) { auto cc = std::get<se::CudaComputeCapability>(compute_capability_); TF_ASSIGN_OR_RETURN( changed, F8GraphConv(comp, cc, dnn_version_, toolkit_version_)); if (changed) { return changed; } } TF_ASSIGN_OR_RETURN(changed, FuseRemoveConvertInConv(comp)); any_changed |= changed; TF_ASSIGN_OR_RETURN(changed, FuseConvAlpha(comp)); any_changed |= changed; TF_ASSIGN_OR_RETURN(changed, FuseBiasOrSideInput(comp)); any_changed |= changed; TF_ASSIGN_OR_RETURN(changed, FuseBiasOrSideInput(comp)); any_changed |= changed; TF_ASSIGN_OR_RETURN(changed, FuseSideInputAlpha(comp)); any_changed |= changed; TF_ASSIGN_OR_RETURN(changed, FuseRelu(comp)); any_changed |= changed; TF_ASSIGN_OR_RETURN(changed, FuseElu(comp, compute_capability_)); any_changed |= changed; TF_ASSIGN_OR_RETURN(changed, FuseRelu6(comp, compute_capability_)); any_changed |= changed; TF_ASSIGN_OR_RETURN(changed, FuseLeakyRelu(comp, compute_capability_)); any_changed |= changed; TF_ASSIGN_OR_RETURN(changed, FuseConvertToF16(comp)); any_changed |= changed; TF_ASSIGN_OR_RETURN(changed, FuseConvertToS8(comp, compute_capability_)); any_changed |= changed; TF_ASSIGN_OR_RETURN(changed, FuseBiasOrSideInput(comp)); any_changed |= changed; TF_ASSIGN_OR_RETURN(changed, FuseBiasOrSideInput(comp)); any_changed |= changed; TF_ASSIGN_OR_RETURN(changed, FuseSideInputAlpha(comp)); any_changed |= changed; TF_ASSIGN_OR_RETURN(changed, FuseRelu(comp)); any_changed |= changed; TF_ASSIGN_OR_RETURN(changed, FuseElu(comp, compute_capability_)); any_changed |= changed; TF_ASSIGN_OR_RETURN(changed, FuseRelu6(comp, compute_capability_)); any_changed |= changed; TF_ASSIGN_OR_RETURN(changed, FuseLeakyRelu(comp, compute_capability_)); any_changed |= changed; TF_RETURN_IF_ERROR(CheckNoIllegalIntegerConvs(comp)); } VlogStats(module); return any_changed; } } }
#include "xla/service/gpu/transforms/cudnn_fused_conv_rewriter.h" #include <array> #include <initializer_list> #include <memory> #include <string> #include <utility> #include <variant> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/container/flat_hash_map.h" #include "absl/status/statusor.h" #include "absl/strings/str_format.h" #include "absl/strings/str_replace.h" #include "absl/strings/string_view.h" #include "xla/comparison_util.h" #include "xla/error_spec.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/pass/hlo_pass_fix.h" #include "xla/hlo/pass/hlo_pass_pipeline.h" #include "xla/service/algebraic_simplifier.h" #include "xla/service/convert_mover.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/cublas_cudnn.h" #include "xla/service/gpu/stream_executor_util.h" #include "xla/service/gpu/tests/gpu_codegen_test.h" #include "xla/service/gpu/transforms/conv_rewriter.h" #include "xla/service/hlo_constant_folding.h" #include "xla/service/hlo_module_config.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/service/reshape_mover.h" #include "xla/stream_executor/device_description.h" #include "xla/stream_executor/dnn.h" #include "xla/stream_executor/semantic_version.h" #include "xla/tests/filecheck.h" #include "xla/tests/hlo_test_base.h" #include "xla/tests/verified_hlo_module.h" #include "xla/tsl/lib/core/status_test_util.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { namespace m = match; using ::testing::HasSubstr; using ::testing::Not; static const std::initializer_list<absl::string_view> kf16f32f64{"f16", "f32", "f64"}; static const std::initializer_list<absl::string_view> kf16f32{"f16", "f32"}; class CudnnFusedConvRewriterHloTest : public HloTestBase { public: bool IsCuda() const { return std::holds_alternative<se::CudaComputeCapability>( backend() .default_stream_executor() ->GetDeviceDescription() .gpu_compute_capability()); } se::CudaComputeCapability GetCudaComputeCapability() const { return backend() .default_stream_executor() ->GetDeviceDescription() .cuda_compute_capability(); } stream_executor::dnn::VersionInfo GetDnnVersion() const { return GetDnnVersionInfoOrDefault(backend().default_stream_executor()); } se::SemanticVersion GetToolkitVersion() const { return backend() .default_stream_executor() ->GetDeviceDescription() .runtime_version(); } CudnnFusedConvRewriterHloTest() : HloTestBase(false, false, {}) {} }; class CudnnFusedConvRewriterTest : public GpuCodegenTest { public: bool IsCuda() const { return std::holds_alternative<se::CudaComputeCapability>( backend() .default_stream_executor() ->GetDeviceDescription() .gpu_compute_capability()); } se::CudaComputeCapability GetCudaComputeCapability() const { return backend() .default_stream_executor() ->GetDeviceDescription() .cuda_compute_capability(); } stream_executor::dnn::VersionInfo GetDnnVersion() const { return GetDnnVersionInfoOrDefault(backend().default_stream_executor()); } stream_executor::SemanticVersion GetToolkitVersion() const { return backend() .default_stream_executor() ->GetDeviceDescription() .runtime_version(); } protected: std::string GetOptimizedHlo(absl::string_view hlo_string) { HloModuleConfig config = GetModuleConfigForTest(); DebugOptions debug_opts = config.debug_options(); debug_opts.add_xla_disable_hlo_passes("cudnn_vectorize_convolutions"); debug_opts.set_xla_gpu_use_runtime_fusion(true); config.set_debug_options(debug_opts); auto result = backend().compiler()->RunHloPasses( ParseAndReturnVerifiedModule(hlo_string, config).value(), backend().default_stream_executor(), backend().memory_allocator()); if (!result.status().ok()) { TF_EXPECT_OK(result.status()) << "HLO compilation failed: " << result.status(); return ""; } HloPrintOptions print_opts; print_opts.set_print_operand_shape(false); return (*result)->ToString(print_opts); } void TestMatchWithAllTypes(absl::string_view hlo_string) { for (absl::string_view type : IsCuda() ? kf16f32f64 : kf16f32) { const std::string hlo_with_new_type = absl::StrReplaceAll(hlo_string, {{"TYPE", type}}); std::string optimized_hlo_string = GetOptimizedHlo(hlo_with_new_type); EXPECT_THAT(optimized_hlo_string, Not(HasSubstr(kCudnnConvForwardCallTarget))) << optimized_hlo_string; EXPECT_THAT(optimized_hlo_string, HasSubstr(kCudnnConvBiasActivationForwardCallTarget)); TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_with_new_type)); DebugOptions debug_opts = module->config().debug_options(); debug_opts.set_xla_gpu_use_runtime_fusion(true); module->mutable_config().set_debug_options(debug_opts); EXPECT_TRUE(RunAndCompare(std::move(module), ErrorSpec{0.01})) << optimized_hlo_string; } } void TestClamp(absl::string_view pre_hlo_string, absl::string_view post_hlo_string) { std::string alpha_conv_scalar, alpha_side_input_scalar; std::string elementwise_type; std::string optimized_hlo_string = GetOptimizedHlo(pre_hlo_string); EXPECT_THAT(optimized_hlo_string, Not(HasSubstr("Convert"))); EXPECT_THAT(optimized_hlo_string, HasSubstr("__cudnn$conv")); EXPECT_TRUE(RunAndCompare(pre_hlo_string, ErrorSpec{0.01})) << pre_hlo_string; absl::StatusOr<bool> filecheck_result = RunFileCheck(optimized_hlo_string, post_hlo_string); ASSERT_TRUE(filecheck_result.ok()) << filecheck_result.status(); EXPECT_TRUE(*filecheck_result); } void TestNotMatchWithAllTypes(absl::string_view hlo_string) { for (absl::string_view type : IsCuda() ? kf16f32f64 : kf16f32) { const std::string hlo_with_new_type = absl::StrReplaceAll(hlo_string, {{"TYPE", type}}); std::string optimized_hlo_string = GetOptimizedHlo(hlo_with_new_type); SCOPED_TRACE(optimized_hlo_string); EXPECT_THAT(optimized_hlo_string, HasSubstr(kCudnnConvForwardCallTarget)); EXPECT_THAT(optimized_hlo_string, Not(HasSubstr(kCudnnConvBiasActivationForwardCallTarget))); } } void TestF8(std::string pre_hlo_string, std::string custom_call_string, std::string serialized_graph_string) { if (!IsCuda()) return; if (GetCudaComputeCapability().IsAtLeast( se::CudaComputeCapability::HOPPER)) { std::string optimized_hlo_string = GetOptimizedHlo(pre_hlo_string); EXPECT_THAT(optimized_hlo_string, Not(HasSubstr("Convert"))); EXPECT_THAT(optimized_hlo_string, HasSubstr("__cudnn$conv")); EXPECT_TRUE(RunAndCompare(pre_hlo_string, ErrorSpec{0.15, 0.15})) << pre_hlo_string; absl::StatusOr<bool> filecheck_result = RunFileCheck(optimized_hlo_string, custom_call_string); ASSERT_TRUE(filecheck_result.ok()) << filecheck_result.status(); EXPECT_TRUE(*filecheck_result); filecheck_result = RunFileCheck(optimized_hlo_string, serialized_graph_string); ASSERT_TRUE(filecheck_result.ok()) << filecheck_result.status(); EXPECT_TRUE(*filecheck_result); } else { std::string::size_type p0 = custom_call_string.find(':'); std::string::size_type p1 = custom_call_string.find("custom-call"); custom_call_string.erase(p0 + 1, p1 - p0 - 2); p0 = custom_call_string.find(", dim_labels"); custom_call_string.erase(p0); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(pre_hlo_string)); TF_ASSERT_OK_AND_ASSIGN( bool changed, RunHloPass(ConvRewriter(se::CudaComputeCapability{ se::CudaComputeCapability::HOPPER, 0}), module.get())); EXPECT_TRUE(changed); RunAndFilecheckHloRewrite( module->ToString(HloPrintOptions{}.set_print_operand_shape(false)), CudnnFusedConvRewriter( se::CudaComputeCapability{se::CudaComputeCapability::HOPPER, 0}, GetDnnVersion(), GetToolkitVersion()), custom_call_string); RunAndFilecheckHloRewrite( module->ToString(HloPrintOptions{}.set_print_operand_shape(false)), CudnnFusedConvRewriter( se::CudaComputeCapability{se::CudaComputeCapability::HOPPER, 0}, GetDnnVersion(), GetToolkitVersion()), serialized_graph_string); } } void TestF8Parameterized(std::string template_pre_hlo_string, std::string template_custom_call_string, std::string template_serialized_graph_string) { std::array<absl::string_view, 2> types = {"f8e4m3fn", "f8e5m2"}; std::array<absl::string_view, 2> clamp_lower = {"-448.", "-57344."}; std::array<absl::string_view, 2> clamp_upper = {"448.", "57344."}; absl::flat_hash_map<absl::string_view, absl::string_view> replacements; for (int i = 0; i < 2; ++i) { replacements["<<InputType>>"] = types[i]; for (int j = 0; j < 2; ++j) { replacements["<<FilterType>>"] = types[j]; for (int k = 0; k < 2; ++k) { replacements["<<OutputType>>"] = types[k]; replacements["<<ClampLower>>"] = clamp_lower[k]; replacements["<<ClampUpper>>"] = clamp_upper[k]; TestF8(absl::StrReplaceAll(template_pre_hlo_string, replacements), absl::StrReplaceAll(template_custom_call_string, replacements), absl::StrReplaceAll(template_serialized_graph_string, replacements)); } } } } }; #define MAYBE_SKIP_TEST(CAUSE) \ do { \ if (absl::string_view(CAUSE) == "F8" && IsCuda() && \ (GetToolkitVersion() < se::SemanticVersion{12, 0, 0} || \ GetDnnVersion() < se::dnn::VersionInfo(8, 9, 0))) { \ GTEST_SKIP() << "FP8 convolutions require CUDA 12 and cuDNN 8.9."; \ } \ if (!IsCuda()) { \ GTEST_SKIP() << CAUSE " fusion is only supported on CUDA."; \ } \ } while (0) TEST_F(CudnnFusedConvRewriterTest, TestConvOnly) { TestMatchWithAllTypes(R"( HloModule Test ENTRY Test { zero = TYPE[] constant(0) zeros = TYPE[1,32,9,9] broadcast(zero), dimensions={} input = TYPE[1,17,9,9] parameter(0) filter = TYPE[3,3,17,32] parameter(1) conv = TYPE[1,32,9,9] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1 ROOT relu = TYPE[1,32,9,9] maximum(zeros, conv) })"); } TEST_F(CudnnFusedConvRewriterTest, DontFuseReluWithDepthwiseConv) { TestNotMatchWithAllTypes(R"( HloModule Test ENTRY Test { zero = TYPE[] constant(0) zeros = TYPE[1,17,9,9] broadcast(zero), dimensions={} input = TYPE[1,17,9,9] parameter(0) filter = TYPE[3,3,1,17] parameter(1) conv = TYPE[1,17,9,9] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=17 ROOT relu = TYPE[1,17,9,9] maximum(zeros, conv) })"); } TEST_F(CudnnFusedConvRewriterTest, TestBias) { TestMatchWithAllTypes(R"( HloModule Test ENTRY Test { zero = TYPE[] constant(0) zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={} input = TYPE[1,3,3,64] parameter(0) filter = TYPE[3,3,64,64] parameter(1) bias = TYPE[64] parameter(2) conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1 broadcasted_bias = TYPE[1,3,3,64] broadcast(bias), dimensions={3} add1 = TYPE[1,3,3,64] add(conv, broadcasted_bias) ROOT relu = TYPE[1,3,3,64] maximum(zeros, add1) })"); } TEST_F(CudnnFusedConvRewriterTest, Test3D) { std::string body = R"( HloModule Test ENTRY Test { zero = TYPE[] constant(0) zeros = TYPE[1,3,5,7,64] broadcast(zero), dimensions={} input = TYPE[1,3,5,7,64] parameter(0) filter = TYPE[3,3,3,64,64] parameter(1) bias = TYPE[64] parameter(2) conv = TYPE[1,3,5,7,64] convolution(input, filter), window={size=3x3x3 pad=1_1x1_1x1_1}, dim_labels=b012f_012io->b012f, feature_group_count=1 broadcasted_bias = TYPE[1,3,5,7,64] broadcast(bias), dimensions={4} add1 = TYPE[1,3,5,7,64] add(conv, broadcasted_bias) )"; std::string relu = R"( ROOT relu = TYPE[1,3,5,7,64] maximum(zeros, add1) })"; std::string elu = R"( cmp = pred[1,3,5,7,64] compare(add1, zeros), direction=GT expm1 = TYPE[1,3,5,7,64] exponential-minus-one(add1) ROOT elu = TYPE[1,3,5,7,64] select(cmp, add1, expm1) })"; TestMatchWithAllTypes(body + relu); if (!IsCuda()) TestMatchWithAllTypes(body + elu); } TEST_F(CudnnFusedConvRewriterTest, TestBiasMultiCall) { std::string code = R"( HloModule Test ENTRY Test { zero = TYPE[] constant(0) zeros = TYPE[1,<<<format>>>,64] broadcast(zero), dimensions={} input = TYPE[1,<<<format>>>,64] parameter(0) filter = TYPE[3,3,64,64] parameter(1) bias = TYPE[64] parameter(2) conv = TYPE[1,<<<format>>>,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1 broadcasted_bias = TYPE[1,<<<format>>>,64] broadcast(bias), dimensions={3} add1 = TYPE[1,<<<format>>>,64] add(conv, broadcasted_bias) ROOT relu = TYPE[1,<<<format>>>,64] maximum(zeros, add1) })"; absl::flat_hash_map<absl::string_view, absl::string_view> replacements; replacements["<<<format>>>"] = "3,3"; TestMatchWithAllTypes(absl::StrReplaceAll(code, replacements)); replacements["<<<format>>>"] = "5,5"; TestMatchWithAllTypes(absl::StrReplaceAll(code, replacements)); replacements["<<<format>>>"] = "3,3"; TestMatchWithAllTypes(absl::StrReplaceAll(code, replacements)); } TEST_F(CudnnFusedConvRewriterTest, TestBiasNoRelu) { TestMatchWithAllTypes(R"( HloModule Test ENTRY Test { input = TYPE[1,3,3,64] parameter(0) filter = TYPE[3,3,64,64] parameter(1) bias = TYPE[64] parameter(2) conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1 broadcasted_bias = TYPE[1,3,3,64] broadcast(bias), dimensions={3} ROOT add1 = TYPE[1,3,3,64] add(conv, broadcasted_bias) })"); } TEST_F(CudnnFusedConvRewriterTest, DontFuseBiasWithDepthwiseConv) { TestNotMatchWithAllTypes(R"( HloModule Test ENTRY Test { zero = TYPE[] constant(0) zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={} input = TYPE[1,3,3,64] parameter(0) filter = TYPE[3,3,1,64] parameter(1) bias = TYPE[64] parameter(2) conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=64 broadcasted_bias = TYPE[1,3,3,64] broadcast(bias), dimensions={3} add1 = TYPE[1,3,3,64] add(conv, broadcasted_bias) ROOT relu = TYPE[1,3,3,64] maximum(zeros, add1) })"); } TEST_F(CudnnFusedConvRewriterTest, TestElu) { TestMatchWithAllTypes(R"( HloModule Test ENTRY Test { zero = TYPE[] constant(0) zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={} input = TYPE[1,3,3,64] parameter(0) filter = TYPE[3,3,64,64] parameter(1) bias = TYPE[64] parameter(2) conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1 broadcasted_bias = TYPE[1,3,3,64] broadcast(bias), dimensions={3} sum = TYPE[1,3,3,64] add(conv, broadcasted_bias) cmp = pred[1,3,3,64] compare(sum, zeros), direction=GT expm1 = TYPE[1,3,3,64] exponential-minus-one(sum) ROOT elu = TYPE[1,3,3,64] select(cmp, sum, expm1) })"); } TEST_F(CudnnFusedConvRewriterTest, DontFuseEluWithDepthwiseConv) { TestNotMatchWithAllTypes(R"( HloModule Test ENTRY Test { zero = TYPE[] constant(0) zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={} input = TYPE[1,3,3,64] parameter(0) filter = TYPE[3,3,1,64] parameter(1) bias = TYPE[64] parameter(2) conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=64 broadcasted_bias = TYPE[1,3,3,64] broadcast(bias), dimensions={3} sum = TYPE[1,3,3,64] add(conv, broadcasted_bias) cmp = pred[1,3,3,64] compare(sum, zeros), direction=GT expm1 = TYPE[1,3,3,64] exponential-minus-one(sum) ROOT elu = TYPE[1,3,3,64] select(cmp, sum, expm1) })"); } TEST_F(CudnnFusedConvRewriterTest, TestRelu6) { if (IsCuda() && !GetCudaComputeCapability().IsAtLeast( se::CudaComputeCapability::AMPERE)) { GTEST_SKIP() << "Conv-Bias-Relu6 fusion is supported and recommended with " "the Nvidia Ampere+ GPUs."; } TestMatchWithAllTypes(R"( HloModule Test ENTRY Test { zero = TYPE[] constant(0) zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={} six = TYPE[] constant(6) sixes = TYPE[1,3,3,64] broadcast(six), dimensions={} input = TYPE[1,3,3,64] parameter(0) filter = TYPE[3,3,64,64] parameter(1) bias = TYPE[64] parameter(2) conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1 broadcasted_bias = TYPE[1,3,3,64] broadcast(bias), dimensions={3} sum = TYPE[1,3,3,64] add(conv, broadcasted_bias) ROOT relu6 = TYPE[1,3,3,64] clamp(zeros, sum, sixes) })"); } TEST_F(CudnnFusedConvRewriterTest, TestRelu6OddChannels) { if (IsCuda() && !GetCudaComputeCapability().IsAtLeast( se::CudaComputeCapability::AMPERE)) { GTEST_SKIP() << "Conv-Bias-Relu6 fusion is supported and recommended with " "the Nvidia Ampere+ GPUs."; } TestMatchWithAllTypes(R"( HloModule Test ENTRY Test { zeros = TYPE[1,384,1024,32] broadcast(TYPE[] constant(0)), dimensions={} sixes = TYPE[1,384,1024,32] broadcast(TYPE[] constant(6)), dimensions={} input = TYPE[1,769,2049,3] parameter(0) filter = TYPE[32,3,3,3] parameter(1) bias = TYPE[32] parameter(2) conv = TYPE[1,384,1024,32] convolution(input, filter), window={size=3x3 stride=2x2}, dim_labels=b01f_o01i->b01f broadcasted_bias = TYPE[1,384,1024,32] broadcast(bias), dimensions={3} sum = add(conv, broadcasted_bias) ROOT relu6 = clamp(zeros, sum, sixes) })"); } TEST_F(CudnnFusedConvRewriterTest, TestLeakyRelu) { if (IsCuda() && !GetCudaComputeCapability().IsAtLeast( se::CudaComputeCapability::AMPERE)) { GTEST_SKIP() << "Conv-Bias-LeakyRelu fusion is supported and recommended with " "the Nvidia Ampere+ GPUs."; } TestMatchWithAllTypes(R"( HloModule Test ENTRY Test { zero = TYPE[] constant(0) zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={} alpha = TYPE[] constant(0.2) alphas = TYPE[1,3,3,64] broadcast(alpha), dimensions={} input = TYPE[1,3,3,64] parameter(0) filter = TYPE[3,3,64,64] parameter(1) bias = TYPE[64] parameter(2) conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1 broadcasted_bias = TYPE[1,3,3,64] broadcast(bias), dimensions={3} sum = TYPE[1,3,3,64] add(conv, broadcasted_bias) cmp = pred[1,3,3,64] compare(sum, zeros), direction=GT mul = TYPE[1,3,3,64] multiply(sum, alphas) ROOT elu = TYPE[1,3,3,64] select(cmp, sum, mul) })"); } TEST_F(CudnnFusedConvRewriterTest, TestSideInputOnly) { TestMatchWithAllTypes(R"( HloModule Test ENTRY Test { zero = TYPE[] constant(0) zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={} input = TYPE[1,3,3,64] parameter(0) filter = TYPE[3,3,64,64] parameter(1) side_input = TYPE[1,3,3,64] parameter(2) conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1 add1 = TYPE[1,3,3,64] add(conv, side_input) ROOT relu = TYPE[1,3,3,64] maximum(zeros, add1) })"); } TEST_F(CudnnFusedConvRewriterTest, DontFuseSideInputWithDepthwiseConv) { TestNotMatchWithAllTypes(R"( HloModule Test ENTRY Test { zero = TYPE[] constant(0) zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={} input = TYPE[1,3,3,64] parameter(0) filter = TYPE[3,3,1,64] parameter(1) side_input = TYPE[1,3,3,64] parameter(2) conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=64 add1 = TYPE[1,3,3,64] add(conv, side_input) ROOT relu = TYPE[1,3,3,64] maximum(zeros, add1) })"); } TEST_F(CudnnFusedConvRewriterTest, TestBiasAndSideInput) { TestMatchWithAllTypes(R"( HloModule Test ENTRY Test { zero = TYPE[] constant(0) zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={} input = TYPE[1,3,3,64] parameter(0) filter = TYPE[3,3,64,64] parameter(1) side_input = TYPE[1,3,3,64] parameter(2) bias = TYPE[64] parameter(3) conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1 broadcasted_bias = TYPE[1,3,3,64] broadcast(bias), dimensions={3} add1 = TYPE[1,3,3,64] add(conv, broadcasted_bias) add2 = TYPE[1,3,3,64] add(add1, side_input) ROOT relu = TYPE[1,3,3,64] maximum(zeros, add2) })"); } TEST_F(CudnnFusedConvRewriterTest, TestScaledConv) { TestMatchWithAllTypes(R"( HloModule Test ENTRY Test { zero = TYPE[] constant(0) zeros = TYPE[1,32,9,9] broadcast(zero), dimensions={} alpha_conv_scalar = TYPE[] constant(0.999994934) input = TYPE[1,17,9,9] parameter(0) filter = TYPE[3,3,17,32] parameter(1) conv = TYPE[1,32,9,9] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1 alpha_conv = TYPE[1,32,9,9] broadcast(alpha_conv_scalar), dimensions={} scaled_conv = TYPE[1,32,9,9] multiply(conv, alpha_conv) ROOT relu = TYPE[1,32,9,9] maximum(zeros, scaled_conv) })"); } TEST_F(CudnnFusedConvRewriterTest, DontFuseScaledDepthwiseConv) { TestNotMatchWithAllTypes(R"( HloModule Test ENTRY Test { zero = TYPE[] constant(0) zeros = TYPE[1,17,9,9] broadcast(zero), dimensions={} alpha_conv_scalar = TYPE[] constant(0.999994934) input = TYPE[1,17,9,9] parameter(0) filter = TYPE[3,3,1,17] parameter(1) conv = TYPE[1,17,9,9] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=17 alpha_conv = TYPE[1,17,9,9] broadcast(alpha_conv_scalar), dimensions={} scaled_conv = TYPE[1,17,9,9] multiply(conv, alpha_conv) ROOT relu = TYPE[1,17,9,9] maximum(zeros, scaled_conv) })"); } TEST_F(CudnnFusedConvRewriterTest, TestNoCrashOnInf) { EXPECT_TRUE(RunAndCompare(R"( HloModule Test ENTRY Test { zero = f32[] constant(inf) zeros = f32[1,32,9,9] broadcast(zero), dimensions={} alpha_conv_scalar = f32[] constant(0.999994934) input = f32[1,17,9,9] parameter(0) filter = f32[3,3,17,32] parameter(1) conv = f32[1,32,9,9] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1 alpha_conv = f32[1,32,9,9] broadcast(alpha_conv_scalar), dimensions={} scaled_conv = f32[1,32,9,9] multiply(conv, alpha_conv) ROOT relu = f32[1,32,9,9] maximum(zeros, scaled_conv) })", ErrorSpec{0.01})); } TEST_F(CudnnFusedConvRewriterTest, TestConvAndScaledSideInput) { TestMatchWithAllTypes(R"( HloModule Test ENTRY Test { zero = TYPE[] constant(0) zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={} alpha_side_input_scalar = TYPE[] constant(0.899994934) alpha_side_input = TYPE[1,3,3,64] broadcast(alpha_side_input_scalar), dimensions={} input = TYPE[1,3,3,64] parameter(0) filter = TYPE[3,3,64,64] parameter(1) side_input = TYPE[1,3,3,64] parameter(2) conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1 scaled_side_input = TYPE[1,3,3,64] multiply(side_input, alpha_side_input) add1 = TYPE[1,3,3,64] add(conv, scaled_side_input) ROOT relu = TYPE[1,3,3,64] maximum(zeros, add1) })"); } TEST_F(CudnnFusedConvRewriterTest, DontFuseDepthwiseConvWithScaledSideInput) { TestNotMatchWithAllTypes(R"( HloModule Test ENTRY Test { zero = TYPE[] constant(0) zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={} alpha_side_input_scalar = TYPE[] constant(0.899994934) alpha_side_input = TYPE[1,3,3,64] broadcast(alpha_side_input_scalar), dimensions={} input = TYPE[1,3,3,64] parameter(0) filter = TYPE[3,3,1,64] parameter(1) side_input = TYPE[1,3,3,64] parameter(2) conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=64 scaled_side_input = TYPE[1,3,3,64] multiply(side_input, alpha_side_input) add1 = TYPE[1,3,3,64] add(conv, scaled_side_input) ROOT relu = TYPE[1,3,3,64] maximum(zeros, add1) })"); } TEST_F(CudnnFusedConvRewriterTest, TestScaledConvAndScaledSideInput) { TestMatchWithAllTypes(R"( HloModule Test ENTRY Test { zero = TYPE[] constant(0) zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={} alpha_conv_scalar = TYPE[] constant(0.999994934) alpha_conv = TYPE[1,3,3,64] broadcast(alpha_conv_scalar), dimensions={} alpha_side_input_scalar = TYPE[] constant(0.899994934) alpha_side_input = TYPE[1,3,3,64] broadcast(alpha_side_input_scalar), dimensions={} input = TYPE[1,3,3,64] parameter(0) filter = TYPE[3,3,64,64] parameter(1) side_input = TYPE[1,3,3,64] parameter(2) conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1 scaled_conv = TYPE[1,3,3,64] multiply(conv, alpha_conv) scaled_side_input = TYPE[1,3,3,64] multiply(side_input, alpha_side_input) add1 = TYPE[1,3,3,64] add(scaled_conv, scaled_side_input) ROOT relu = TYPE[1,3,3,64] maximum(zeros, add1) })"); } TEST_F(CudnnFusedConvRewriterTest, TestScaledConvAndScaledSideInputWithBias) { TestMatchWithAllTypes(R"( HloModule Test ENTRY Test { zero = TYPE[] constant(0) zeros = TYPE[1,3,3,64] broadcast(zero), dimensions={} alpha_conv_scalar = TYPE[] constant(0.999994934) alpha_conv = TYPE[1,3,3,64] broadcast(alpha_conv_scalar), dimensions={} alpha_side_input_scalar = TYPE[] constant(0.899994934) alpha_side_input = TYPE[1,3,3,64] broadcast(alpha_side_input_scalar), dimensions={} input = TYPE[1,3,3,64] parameter(0) filter = TYPE[3,3,64,64] parameter(1) side_input = TYPE[1,3,3,64] parameter(2) bias = TYPE[64] parameter(3) conv = TYPE[1,3,3,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1 scaled_conv = TYPE[1,3,3,64] multiply(conv, alpha_conv) scaled_side_input = TYPE[1,3,3,64] multiply(side_input, alpha_side_input) broadcasted_bias = TYPE[1,3,3,64] broadcast(bias), dimensions={3} add1 = TYPE[1,3,3,64] add(scaled_conv, broadcasted_bias) add2 = TYPE[1,3,3,64] add(add1, scaled_side_input) ROOT relu = TYPE[1,3,3,64] maximum(zeros, add2) })"); } TEST_F(CudnnFusedConvRewriterTest, TestMatchMaxZeroOnly) { TestNotMatchWithAllTypes(R"( HloModule Test ENTRY Test { point_one = TYPE[] constant(0.1) point_ones = TYPE[1,32,9,9] broadcast(point_one), dimensions={} input = TYPE[1,17,9,9] parameter(0) filter = TYPE[3,3,17,32] parameter(1) conv = TYPE[1,32,9,9] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1 ROOT relu = TYPE[1,32,9,9] maximum(point_ones, conv) })"); } TEST_F(CudnnFusedConvRewriterTest, PreservesMetadata) { const char* kHloString = R"( HloModule Test ENTRY Test { zero = f32[] constant(0) zeros = f32[1,32,9,9] broadcast(zero), dimensions={} input = f32[1,17,9,9] parameter(0) filter = f32[3,3,17,32] parameter(1) conv = f32[1,32,9,9] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1, metadata={op_type="foo" op_name="bar"} ROOT relu = f32[1,32,9,9] maximum(zeros, conv) })"; const std::string optimized_hlo_string = backend() .compiler() ->RunHloPasses( ParseAndReturnVerifiedModule(kHloString, GetModuleConfigForTest()) .value(), backend().default_stream_executor(), backend().memory_allocator()) .value() ->ToString(); EXPECT_THAT(optimized_hlo_string, ::testing::ContainsRegex( R"(custom-call.*metadata=\{op_type="foo" op_name="bar"\})")); } TEST_F(CudnnFusedConvRewriterTest, TestPreservesFeatureGroupCount) { const char* kHloString = R"( HloModule jaxpr_computation__6.19 primitive_computation__1.4 { parameter.5 = f32[] parameter(0) parameter.6 = f32[] parameter(1) ROOT add.7 = f32[] add(parameter.5, parameter.6) } ENTRY jaxpr_computation__7.8 { parameter.11 = f32[2,64,64,53]{3,2,1,0} parameter(1) parameter.10 = f32[3,3,1,53]{3,2,1,0} parameter(0) convolution.12 = f32[2,64,64,53]{3,2,1,0} convolution(parameter.11, parameter.10), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=53 constant.13 = f32[] constant(0) broadcast.14 = f32[2,64,64,53]{3,2,1,0} broadcast(constant.13), dimensions={} maximum.15 = f32[2,64,64,53]{3,2,1,0} maximum(convolution.12, broadcast.14) ROOT reduce.17 = f32[] reduce(maximum.15, constant.13), dimensions={0,1,2,3}, to_apply=primitive_computation__1.4 } )"; EXPECT_TRUE(RunAndCompare(kHloString, ErrorSpec{0.01})); } TEST_F(CudnnFusedConvRewriterTest, TestConvF8) { MAYBE_SKIP_TEST("F8"); TestF8( R"( HloModule Test ENTRY Test { input = f8e4m3fn[1,128,6,6] parameter(0) filter = f8e4m3fn[3,3,128,16] parameter(1) ROOT conv_a = f8e4m3fn[1,16,6,6] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1 })", R"( )", R"( )"); } TEST_F(CudnnFusedConvRewriterTest, TestConvScaledOutputF8) { MAYBE_SKIP_TEST("F8"); TestF8( R"( HloModule Test ENTRY Test { input = f8e4m3fn[1,128,6,6] parameter(0) filter = f8e4m3fn[3,3,128,16] parameter(1) input_f32 = f32[1,128,6,6] convert(input) filter_f32 = f32[3,3,128,16] convert(filter) z_scale = f32[] parameter(2) z_scale_bcast = f32[1,16,6,6] broadcast(z_scale), dimensions={} conv_a = f32[1,16,6,6] convolution(input_f32, filter_f32), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1 conv_a_scaled = f32[1,16,6,6] multiply(conv_a, z_scale_bcast) c1 = f32[] constant(-448.) c1_bcast = f32[1,16,6,6] broadcast(c1), dimensions={} c2 = f32[] constant(448.) c2_bcast = f32[1,16,6,6] broadcast(c2), dimensions={} conv_a_clamped = f32[1,16,6,6] clamp(c1_bcast, conv_a_scaled, c2_bcast) ROOT conv_f8 = f8e4m3fn[1,16,6,6] convert(conv_a_clamped) })", R"( )", R"( )"); } TEST_F(CudnnFusedConvRewriterTest, TestConvInvscaledOutputF8) { MAYBE_SKIP_TEST("F8"); TestF8( R"( HloModule Test ENTRY Test { input = f8e4m3fn[1,128,6,6] parameter(0) filter = f8e4m3fn[3,3,128,16] parameter(1) input_f32 = f32[1,128,6,6] convert(input) filter_f32 = f32[3,3,128,16] convert(filter) z_scale = f32[] parameter(2) z_scale_bcast = f32[1,16,6,6] broadcast(z_scale), dimensions={} conv_a = f32[1,16,6,6] convolution(input_f32, filter_f32), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1 conv_a_scaled = f32[1,16,6,6] divide(conv_a, z_scale_bcast) c1 = f32[] constant(-448.) c1_bcast = f32[1,16,6,6] broadcast(c1), dimensions={} c2 = f32[] constant(448.) c2_bcast = f32[1,16,6,6] broadcast(c2), dimensions={} conv_a_clamped = f32[1,16,6,6] clamp(c1_bcast, conv_a_scaled, c2_bcast) ROOT conv_f8 = f8e4m3fn[1,16,6,6] convert(conv_a_clamped) })", R"( )", R"( )"); } TEST_F(CudnnFusedConvRewriterTest, TestConvScaledF8Parameterized) { MAYBE_SKIP_TEST("F8"); TestF8Parameterized( R"( HloModule Test ENTRY Test { input = <<InputType>>[1,128,6,6] parameter(0) filter = <<FilterType>>[3,3,128,16] parameter(1) input_scale = f32[] parameter(2) input_scale_bcast = f32[1,128,6,6] broadcast(input_scale), dimensions={} filter_scale = f32[] parameter(3) filter_scale_bcast = f32[3,3,128,16] broadcast(filter_scale), dimensions={} input_f32 = f32[1,128,6,6] convert(input) input_unscaled = f32[1,128,6,6] multiply(input_f32, input_scale_bcast) filter_f32 = f32[3,3,128,16] convert(filter) filter_unscaled = f32[3,3,128,16] multiply(filter_f32, filter_scale_bcast) z_scale = f32[] parameter(4) z_scale_bcast = f32[1,16,6,6] broadcast(z_scale), dimensions={} conv_a = f32[1,16,6,6] convolution(input_unscaled, filter_unscaled), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1 conv_a_scaled = f32[1,16,6,6] multiply(conv_a, z_scale_bcast) c1 = f32[] constant(<<ClampLower>>) c1_bcast = f32[1,16,6,6] broadcast(c1), dimensions={} c2 = f32[] constant(<<ClampUpper>>) c2_bcast = f32[1,16,6,6] broadcast(c2), dimensions={} conv_a_clamped = f32[1,16,6,6] clamp(c1_bcast, conv_a_scaled, c2_bcast) ROOT conv_f8 = <<OutputType>>[1,16,6,6] convert(conv_a_clamped) })", R"( )", R"( )"); } TEST_F(CudnnFusedConvRewriterTest, TestConvScaledBiasF8) { MAYBE_SKIP_TEST("F8"); TestF8( R"( HloModule Test ENTRY Test { input = f8e4m3fn[1,128,6,6] parameter(0) filter = f8e4m3fn[3,3,128,16] parameter(1) input_scale = f32[] parameter(2) input_scale_bcast = f32[1,128,6,6] broadcast(input_scale), dimensions={} filter_scale = f32[] parameter(3) filter_scale_bcast = f32[3,3,128,16] broadcast(filter_scale), dimensions={} input_f32 = f32[1,128,6,6] convert(input) input_unscaled = f32[1,128,6,6] multiply(input_f32, input_scale_bcast) filter_f32 = f32[3,3,128,16] convert(filter) filter_unscaled = f32[3,3,128,16] multiply(filter_f32, filter_scale_bcast) bias = f32[1,16,6,6] parameter(4) z_scale = f32[] parameter(5) z_scale_bcast = f32[1,16,6,6] broadcast(z_scale), dimensions={} conv_a = f32[1,16,6,6] convolution(input_unscaled, filter_unscaled), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1 conv_a_bias = f32[1,16,6,6] add(conv_a, bias) conv_a_scaled = f32[1,16,6,6] multiply(conv_a_bias, z_scale_bcast) c1 = f32[] constant(-448.) c1_bcast = f32[1,16,6,6] broadcast(c1), dimensions={} c2 = f32[] constant(448.) c2_bcast = f32[1,16,6,6] broadcast(c2), dimensions={} conv_a_clamped = f32[1,16,6,6] clamp(c1_bcast, conv_a_scaled, c2_bcast) ROOT conv_f8 = f8e4m3fn[1,16,6,6] convert(conv_a_clamped) })", R"( )", R"( )"); } TEST_F(CudnnFusedConvRewriterTest, TestConvScaledReluF8) { MAYBE_SKIP_TEST("F8"); TestF8( R"( HloModule Test ENTRY Test { input = f8e4m3fn[1,128,6,6] parameter(0) filter = f8e4m3fn[3,3,128,16] parameter(1) input_f32 = f32[1,128,6,6] convert(input) filter_f32 = f32[3,3,128,16] convert(filter) z_scale = f32[] parameter(2) z_scale_bcast = f32[1,16,6,6] broadcast(z_scale), dimensions={} c = f32[] constant(0) c_bcast = f32[1,16,6,6] broadcast(c), dimensions={} conv_a = f32[1,16,6,6] convolution(input_f32, filter_f32), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1 relu_a = f32[1,16,6,6] maximum(conv_a, c_bcast) relu_a_scaled = f32[1,16,6,6] multiply(relu_a, z_scale_bcast) c1 = f32[] constant(-448.) c1_bcast = f32[1,16,6,6] broadcast(c1), dimensions={} c2 = f32[] constant(448.) c2_bcast = f32[1,16,6,6] broadcast(c2), dimensions={} relu_a_clamped = f32[1,16,6,6] clamp(c1_bcast, relu_a_scaled, c2_bcast) ROOT conv_f8 = f8e4m3fn[1,16,6,6] convert(relu_a_clamped) })", R"( )", R"( )"); } TEST_F(CudnnFusedConvRewriterTest, TestConvAmaxF8) { MAYBE_SKIP_TEST("F8"); TestF8( R"( HloModule Test apply { a = f32[] parameter(0) b = f32[] parameter(1) ROOT c = f32[] maximum(a, b) } ENTRY Test { input = f8e4m3fn[1,128,6,6] parameter(0) filter = f8e4m3fn[3,3,128,16] parameter(1) input_scale = f32[] parameter(2) input_scale_bcast = f32[1,128,6,6] broadcast(input_scale), dimensions={} filter_scale = f32[] parameter(3) filter_scale_bcast = f32[3,3,128,16] broadcast(filter_scale), dimensions={} input_f32 = f32[1,128,6,6] convert(input) input_unscaled = f32[1,128,6,6] multiply(input_f32, input_scale_bcast) filter_f32 = f32[3,3,128,16] convert(filter) filter_unscaled = f32[3,3,128,16] multiply(filter_f32, filter_scale_bcast) conv_a = f32[1,16,6,6] convolution(input_unscaled, filter_unscaled), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1 z_scale = f32[] parameter(4) z_scale_bcast = f32[1,16,6,6] broadcast(z_scale), dimensions={} conv_a_scaled = f32[1,16,6,6] multiply(conv_a, z_scale_bcast) c1 = f32[] constant(-448.) c1_bcast = f32[1,16,6,6] broadcast(c1), dimensions={} c2 = f32[] constant(448.) c2_bcast = f32[1,16,6,6] broadcast(c2), dimensions={} conv_a_clamped = f32[1,16,6,6] clamp(c1_bcast, conv_a_scaled, c2_bcast) conv_a_clamped_f8 = f8e4m3fn[1,16,6,6] convert(conv_a_clamped) abs_conv_a = f32[1,16,6,6] abs(conv_a) c0 = f32[] constant(-inf) amax = f32[] reduce(abs_conv_a, c0), dimensions={0,1,2,3}, to_apply=apply ROOT conv_f8 = (f8e4m3fn[1,16,6,6], f32[]) tuple(conv_a_clamped_f8, amax) })", R"( )", R"( )"); } TEST_F(CudnnFusedConvRewriterTest, TestConvReluAmaxF8) { MAYBE_SKIP_TEST("F8"); TestF8( R"( HloModule Test apply { a = f32[] parameter(0) b = f32[] parameter(1) ROOT c = f32[] maximum(a, b) } ENTRY Test { input = f8e4m3fn[1,128,6,6] parameter(0) filter = f8e4m3fn[3,3,128,16] parameter(1) input_scale = f32[] parameter(2) input_scale_bcast = f32[1,128,6,6] broadcast(input_scale), dimensions={} filter_scale = f32[] parameter(3) filter_scale_bcast = f32[3,3,128,16] broadcast(filter_scale), dimensions={} input_f32 = f32[1,128,6,6] convert(input) input_unscaled = f32[1,128,6,6] multiply(input_f32, input_scale_bcast) filter_f32 = f32[3,3,128,16] convert(filter) filter_unscaled = f32[3,3,128,16] multiply(filter_f32, filter_scale_bcast) c = f32[] constant(0) c_bcast = f32[1,16,6,6] broadcast(c), dimensions={} conv_a = f32[1,16,6,6] convolution(input_unscaled, filter_unscaled), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1 relu_a = f32[1,16,6,6] maximum(conv_a, c_bcast) z_scale = f32[] parameter(4) z_scale_bcast = f32[1,16,6,6] broadcast(z_scale), dimensions={} relu_a_scaled = f32[1,16,6,6] multiply(relu_a, z_scale_bcast) c1 = f32[] constant(-448.) c1_bcast = f32[1,16,6,6] broadcast(c1), dimensions={} c2 = f32[] constant(448.) c2_bcast = f32[1,16,6,6] broadcast(c2), dimensions={} relu_a_clamped = f32[1,16,6,6] clamp(c1_bcast, relu_a_scaled, c2_bcast) relu_a_clamped_f8 = f8e4m3fn[1,16,6,6] convert(relu_a_clamped) abs_relu_a = f32[1,16,6,6] abs(relu_a) c0 = f32[] constant(-inf) amax = f32[] reduce(abs_relu_a, c0), dimensions={0,1,2,3}, to_apply=apply ROOT conv_f8 = (f8e4m3fn[1,16,6,6], f32[]) tuple(relu_a_clamped_f8, amax) })", R"( )", R"( )"); } TEST_F(CudnnFusedConvRewriterTest, TestConvScaledOutputMultipleUsersF8) { MAYBE_SKIP_TEST("F8"); TestF8( R"( HloModule Test ENTRY Test { input = f8e4m3fn[1,128,6,6] parameter(0) filter = f8e4m3fn[3,3,128,16] parameter(1) input_f32 = f32[1,128,6,6] convert(input) filter_f32 = f32[3,3,128,16] convert(filter) z_scale0 = f32[] parameter(2) z_scale0_bcast = f32[1,16,6,6] broadcast(z_scale0), dimensions={} z_scale1 = f32[] parameter(3) z_scale1_bcast = f32[1,16,6,6] broadcast(z_scale1), dimensions={} conv_a = f32[1,16,6,6] convolution(input_f32, filter_f32), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1 conv_a_scaled0 = f32[1,16,6,6] multiply(conv_a, z_scale0_bcast) conv_a_scaled1 = f32[1,16,6,6] multiply(conv_a, z_scale1_bcast) c1 = f32[] constant(-448.) c1_bcast = f32[1,16,6,6] broadcast(c1), dimensions={} c2 = f32[] constant(448.) c2_bcast = f32[1,16,6,6] broadcast(c2), dimensions={} conv_a_clamped0 = f32[1,16,6,6] clamp(c1_bcast, conv_a_scaled0, c2_bcast) conv_a_clamped1 = f32[1,16,6,6] clamp(c1_bcast, conv_a_scaled1, c2_bcast) conv_a_convert0 = f8e4m3fn[1,16,6,6] convert(conv_a_clamped0) conv_a_convert1 = f8e4m3fn[1,16,6,6] convert(conv_a_clamped1) ROOT conv_f8 = (f8e4m3fn[1,16,6,6], f8e4m3fn[1,16,6,6]) tuple(conv_a_convert0, conv_a_convert1) })", R"( )", R"( )"); } TEST_F(CudnnFusedConvRewriterTest, TestConvScaledOutputUnsupportedUserF8) { MAYBE_SKIP_TEST("F8"); TestF8( R"( HloModule Test ENTRY Test { input = f8e4m3fn[1,128,6,6] parameter(0) filter = f8e4m3fn[3,3,128,16] parameter(1) input_f32 = f32[1,128,6,6] convert(input) filter_f32 = f32[3,3,128,16] convert(filter) z_scale = f32[] parameter(2) z_scale_bcast = f32[1,16,6,6] broadcast(z_scale), dimensions={} conv_a = f32[1,16,6,6] convolution(input_f32, filter_f32), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1 conv_a_cos = f32[1,16,6,6] cosine(conv_a) conv_a_scaled = f32[1,16,6,6] multiply(conv_a, z_scale_bcast) c1 = f32[] constant(-448.) c1_bcast = f32[1,16,6,6] broadcast(c1), dimensions={} c2 = f32[] constant(448.) c2_bcast = f32[1,16,6,6] broadcast(c2), dimensions={} conv_a_clamped = f32[1,16,6,6] clamp(c1_bcast, conv_a_scaled, c2_bcast) conv_a_convert = f8e4m3fn[1,16,6,6] convert(conv_a_clamped) ROOT conv_f8 = (f8e4m3fn[1,16,6,6], f32[1,16,6,6]) tuple(conv_a_convert, conv_a_cos) })", R"( )", R"( )"); } TEST_F(CudnnFusedConvRewriterTest, TestConvInt8ToInt8) { MAYBE_SKIP_TEST("I8"); TestClamp( R"( HloModule Test ENTRY Test { zero = s8[] constant(0) zeros = s8[1,32,9,9] broadcast(zero), dimensions={} input = s8[1,17,9,9] parameter(0) filter = s8[3,3,17,32] parameter(1) inputs32 = s32[1,17,9,9] convert(input) filters32 = s32[3,3,17,32] convert(filter) conv = s32[1,32,9,9] convolution(inputs32, filters32), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1 lower = s32[] constant(-128) lowers = s32[1,32,9,9] broadcast(lower), dimensions={} upper = s32[] constant(127) uppers = s32[1,32,9,9] broadcast(upper), dimensions={} clamp = s32[1,32,9,9] clamp(lowers, conv, uppers) ROOT convert = s8[1,32,9,9] convert(clamp) })", R"( )"); } TEST_F(CudnnFusedConvRewriterHloTest, TestConvInt8ToFloat) { MAYBE_SKIP_TEST("I8"); const std::string module_str = R"( HloModule Test ENTRY Test { input = s8[1,17,9,9] parameter(0) filter = s8[3,3,17,32] parameter(1) inputs32 = s32[1,17,9,9] convert(input) filters32 = s32[3,3,17,32] convert(filter) conv = s32[1,32,9,9] convolution(inputs32, filters32), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01 ROOT convert = f32[1,32,9,9] convert(conv) })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); ConvRewriter rewriter{GetCudaComputeCapability()}; TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status()); CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(), GetToolkitVersion()}; TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status()); SCOPED_TRACE(m->ToString()); EXPECT_THAT(m->entry_computation()->root_instruction(), GmockMatch(m::GetTupleElement( m::CustomCall({kCudnnConvForwardCallTarget}), 0) .WithShape(F32, {1, 32, 9, 9}))); } TEST_F(CudnnFusedConvRewriterHloTest, TestConvInt8ToInt8BiasSideInput) { MAYBE_SKIP_TEST("I8"); const std::string module_str = R"( HloModule Test ENTRY Test { input = s32[1,17,9,9] convert(s8[1,17,9,9] parameter(0)) filter = s32[3,3,17,32] convert(s8[3,3,17,32] parameter(1)) bias = f32[1,32,9,9] broadcast(f32[32] parameter(2)), dimensions={1} side_input = f32[1,32,9,9] convert(s8[1,32,9,9] parameter(3)) conv = s32[1,32,9,9] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01 conv_f32 = f32[1,32,9,9] convert(conv) ROOT root = s8[1,32,9,9] convert(clamp(f32[1,32,9,9] broadcast(f32[] constant(-128)), add(add(conv_f32, bias), side_input), f32[1,32,9,9] broadcast(f32[] constant(127)))) })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); ConvRewriter rewriter{GetCudaComputeCapability()}; TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status()); CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(), GetToolkitVersion()}; TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status()); AlgebraicSimplifier algsimp(AlgebraicSimplifierOptions{}); TF_ASSERT_OK(RunHloPass(&algsimp, m.get()).status()); SCOPED_TRACE(m->ToString()); EXPECT_THAT( m->entry_computation()->root_instruction(), GmockMatch(m::GetTupleElement( m::CustomCall({kCudnnConvBiasActivationForwardCallTarget}, m::Parameter(0), m::Parameter(1), m::Parameter(2), m::Parameter(3)), 0) .WithShape(S8, {1, 32, 9, 9}))); } TEST_F(CudnnFusedConvRewriterHloTest, TestReluAfterConvert) { MAYBE_SKIP_TEST("I8"); const std::string module_str = R"( HloModule Test ENTRY Test { input = s32[1,17,9,9] convert(s8[1,17,9,9] parameter(0)) filter = s32[3,3,17,32] convert(s8[3,3,17,32] parameter(1)) conv = s32[1,32,9,9] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01 conv_s8 = s8[1,32,9,9] convert(clamp(s32[1,32,9,9] broadcast(s32[] constant(-128)), conv, s32[1,32,9,9] broadcast(s32[] constant(127)))) zeros = s8[1,32,9,9] broadcast(s8[] constant(0)), dimensions={} ROOT root = s8[1,32,9,9] maximum(conv_s8, zeros) })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); ConvRewriter rewriter{GetCudaComputeCapability()}; TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status()); CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(), GetToolkitVersion()}; TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status()); AlgebraicSimplifier algsimp(AlgebraicSimplifierOptions{}); TF_ASSERT_OK(RunHloPass(&algsimp, m.get()).status()); SCOPED_TRACE(m->ToString()); const HloInstruction* conv; ASSERT_THAT( m->entry_computation()->root_instruction(), GmockMatch( m::GetTupleElement( m::CustomCall( &conv, {kCudnnConvBiasActivationForwardCallTarget}, m::Parameter(0), m::Parameter(1), m::Broadcast( m::ConstantEffectiveScalar(0).WithElementType(F32))), 0) .WithShape(S8, {1, 32, 9, 9}))); TF_ASSERT_OK_AND_ASSIGN(auto gpu_config, conv->backend_config<GpuBackendConfig>()); const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config(); EXPECT_EQ(config.activation_mode(), se::dnn::kRelu); } TEST_F(CudnnFusedConvRewriterHloTest, TestConvInt8ToFloatBiasSideInput) { MAYBE_SKIP_TEST("I8"); const std::string module_str = R"( HloModule Test ENTRY Test { input = s8[1,17,9,9] parameter(0) filter = s8[3,3,17,32] parameter(1) bias = f32[32] parameter(2) bias_broadcast = f32[1,32,9,9] broadcast(bias), dimensions={1} side_input_f32 = f32[1,32,9,9] parameter(3) inputs32 = s32[1,17,9,9] convert(input) filters32 = s32[3,3,17,32] convert(filter) conv = s32[1,32,9,9] convolution(inputs32, filters32), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01 conv_f32 = f32[1,32,9,9] convert(conv) sum1 = add(conv_f32, bias_broadcast) ROOT sum2 = add(sum1, side_input_f32) })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); ConvRewriter rewriter{GetCudaComputeCapability()}; TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status()); CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(), GetToolkitVersion()}; TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status()); AlgebraicSimplifier algsimp(AlgebraicSimplifierOptions{}); TF_ASSERT_OK(RunHloPass(&algsimp, m.get()).status()); SCOPED_TRACE(m->ToString()); EXPECT_THAT( m->entry_computation()->root_instruction(), GmockMatch(m::GetTupleElement( m::CustomCall({kCudnnConvBiasActivationForwardCallTarget}, m::Parameter(0), m::Parameter(1), m::Parameter(2), m::Parameter(3)), 0) .WithShape(F32, {1, 32, 9, 9}))); } TEST_F(CudnnFusedConvRewriterHloTest, Int8SideInputWithScaleAndReshape) { MAYBE_SKIP_TEST("I8"); const std::string module_str = R"( HloModule Test ENTRY Test { input = s32[1,17,9,9] convert(s8[1,17,9,9] parameter(0)) filter = s32[3,3,17,32] convert(s8[3,3,17,32] parameter(1)) bias = f32[1,32,9,9] broadcast(f32[32] parameter(2)), dimensions={1} side_input_scale = f32[2592] broadcast(f32[] constant(0.25)), dimensions={} side_input = f32[1,32,9,9] reshape(multiply(f32[2592] convert(s8[2592] parameter(3)), side_input_scale)) conv = s32[1,32,9,9] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01 ROOT root = s8[1,32,9,9] convert(clamp(f32[1,32,9,9] broadcast(f32[] constant(-128)), add(add(f32[1,32,9,9] convert(conv), bias), side_input), f32[1,32,9,9] broadcast(f32[] constant(127)))) })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); ConvRewriter rewriter{GetCudaComputeCapability()}; TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status()); CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(), GetToolkitVersion()}; TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status()); HloPassFix<HloPassPipeline> simplify("simplify"); simplify.AddPass<AlgebraicSimplifier>(AlgebraicSimplifierOptions{}); simplify.AddPass<ReshapeMover>(); simplify.AddPass<ConvertMover>(); TF_ASSERT_OK(RunHloPass(&simplify, m.get()).status()); SCOPED_TRACE(m->ToString()); const HloInstruction* conv = nullptr; ASSERT_THAT( m->entry_computation()->root_instruction(), GmockMatch( m::GetTupleElement( m::CustomCall( &conv, {kCudnnConvBiasActivationForwardCallTarget}, m::Parameter(0), m::Parameter(1), m::Parameter(2), m::Reshape(m::Parameter(3)).WithShape(S8, {1, 32, 9, 9})), 0) .WithShape(S8, {1, 32, 9, 9}))); TF_ASSERT_OK_AND_ASSIGN(auto gpu_config, conv->backend_config<GpuBackendConfig>()); const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config(); EXPECT_EQ(config.conv_result_scale(), 1); EXPECT_EQ(config.side_input_scale(), 0.25); } TEST_F(CudnnFusedConvRewriterHloTest, FuseAlpha) { MAYBE_SKIP_TEST("I8"); const std::string module_str = R"( HloModule Test ENTRY Test { input = s8[1,17,9,9] parameter(0) filter = s8[3,3,17,32] parameter(1) inputs32 = s32[1,17,9,9] convert(input) filters32 = s32[3,3,17,32] convert(filter) alpha = f32[] constant(42) alpha_broadcast = f32[1,32,9,9] broadcast(alpha), dimensions={} conv = s32[1,32,9,9] convolution(inputs32, filters32), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01 convert = f32[1,32,9,9] convert(conv) ROOT root = multiply(convert, alpha_broadcast) })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); ConvRewriter rewriter{GetCudaComputeCapability()}; TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status()); CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(), GetToolkitVersion()}; TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status()); SCOPED_TRACE(m->ToString()); const HloInstruction* conv = nullptr; ASSERT_THAT( m->entry_computation()->root_instruction(), GmockMatch( m::GetTupleElement( m::CustomCall(&conv, {kCudnnConvBiasActivationForwardCallTarget}), 0) .WithShape(F32, {1, 32, 9, 9}))); TF_ASSERT_OK_AND_ASSIGN(auto gpu_config, conv->backend_config<GpuBackendConfig>()); const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config(); EXPECT_EQ(config.conv_result_scale(), 42); } TEST_F(CudnnFusedConvRewriterHloTest, FuseRelu) { const std::string module_str = R"( HloModule Test ENTRY Test { inputs = f32[1,17,9,9] parameter(0) filters = f32[3,3,17,32] parameter(1) bias = f32[32] parameter(2) bias_broadcast = f32[1,32,9,9] broadcast(bias), dimensions={1} zero = f32[] constant(0) zeros = f32[1,32,9,9] broadcast(zero), dimensions={} conv = f32[1,32,9,9] convolution(inputs, filters), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01 sum = add(conv, bias_broadcast) ROOT relu = maximum(sum, zeros) })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); ConvRewriter rewriter{GetCudaComputeCapability()}; TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status()); CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(), GetToolkitVersion()}; TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status()); SCOPED_TRACE(m->ToString()); const HloInstruction* conv; ASSERT_THAT( m->entry_computation()->root_instruction(), GmockMatch( m::GetTupleElement( m::CustomCall(&conv, {kCudnnConvBiasActivationForwardCallTarget}, m::Parameter(0), m::Parameter(1), m::Parameter(2)), 0) .WithShape(F32, {1, 32, 9, 9}))); TF_ASSERT_OK_AND_ASSIGN(auto gpu_config, conv->backend_config<GpuBackendConfig>()); const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config(); EXPECT_EQ(config.activation_mode(), se::dnn::kRelu); } TEST_F(CudnnFusedConvRewriterHloTest, DontFuseReluIfMultipleUses) { const std::string module_str = R"( HloModule Test ENTRY Test { inputs = f32[1,17,9,9] parameter(0) filters = f32[3,3,17,32] parameter(1) bias = f32[1,32,9,9] broadcast(f32[32] parameter(2)), dimensions={1} zeros = f32[1,32,9,9] broadcast(f32[] constant(0)), dimensions={} conv = f32[1,32,9,9] convolution(inputs, filters), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01 sum = add(conv, bias) relu = maximum(sum, zeros) not_relu = minimum(sum, zeros) ROOT root = tuple(relu, not_relu) })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); ConvRewriter rewriter{GetCudaComputeCapability()}; TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status()); CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(), GetToolkitVersion()}; TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status()); SCOPED_TRACE(m->ToString()); const HloInstruction* conv; ASSERT_THAT( m->entry_computation()->root_instruction(), GmockMatch(m::Tuple( m::MaximumAnyOrder( m::Broadcast(m::ConstantEffectiveScalar(0)), m::GetTupleElement( m::CustomCall( &conv, {kCudnnConvBiasActivationForwardCallTarget}, m::Parameter(0), m::Parameter(1), m::Parameter(2)), 0) .WithShape(F32, {1, 32, 9, 9})), m::Minimum()))); TF_ASSERT_OK_AND_ASSIGN(auto gpu_config, conv->backend_config<GpuBackendConfig>()); const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config(); EXPECT_EQ(config.activation_mode(), se::dnn::kNone); } TEST_F(CudnnFusedConvRewriterHloTest, FuseElu) { const std::string module_str = R"( HloModule Test ENTRY Test { inputs = f16[1,16,9,9] parameter(0) filters = f16[3,3,16,32] parameter(1) bias = f16[32] parameter(2) bias_broadcast = f16[1,32,9,9] broadcast(bias), dimensions={1} zero = f16[] constant(0) zeros = f16[1,32,9,9] broadcast(zero), dimensions={} conv = f16[1,32,9,9] convolution(inputs, filters), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01 sum = add(conv, bias_broadcast) cmp = compare(sum, zeros), direction=GT expm1 = exponential-minus-one(sum) ROOT elu = select(cmp, sum, expm1) })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); DebugOptions debug_opts = m->config().debug_options(); debug_opts.set_xla_gpu_use_runtime_fusion(true); m->mutable_config().set_debug_options(debug_opts); ConvRewriter rewriter{GetCudaComputeCapability()}; TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status()); CudnnFusedConvRewriter fuser{se::CudaComputeCapability(8, 0), GetDnnVersion(), GetToolkitVersion()}; TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status()); SCOPED_TRACE(m->ToString()); const HloInstruction* conv; ASSERT_THAT( m->entry_computation()->root_instruction(), GmockMatch( m::GetTupleElement( m::CustomCall(&conv, {kCudnnConvBiasActivationForwardCallTarget}, m::Parameter(0), m::Parameter(1), m::Parameter(2)), 0) .WithShape(F16, {1, 32, 9, 9}))); TF_ASSERT_OK_AND_ASSIGN(auto gpu_config, conv->backend_config<GpuBackendConfig>()); const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config(); EXPECT_EQ(config.activation_mode(), se::dnn::kElu); } TEST_F(CudnnFusedConvRewriterHloTest, DontFuseEluIfMultipleUses) { const std::string module_str = R"( HloModule Test ENTRY Test { inputs = f16[1,16,9,9] parameter(0) filters = f16[3,3,16,32] parameter(1) bias = f16[32] parameter(2) bias_broadcast = f16[1,32,9,9] broadcast(bias), dimensions={1} zero = f16[] constant(0) zeros = f16[1,32,9,9] broadcast(zero), dimensions={} conv = f16[1,32,9,9] convolution(inputs, filters), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01 sum = add(conv, bias_broadcast) cmp = compare(sum, zeros), direction=GT expm1 = exponential-minus-one(sum) elu = select(cmp, sum, expm1) not_elu = minimum(sum, zeros) ROOT root = tuple(elu, not_elu) })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); DebugOptions debug_opts = m->config().debug_options(); debug_opts.set_xla_gpu_use_runtime_fusion(true); m->mutable_config().set_debug_options(debug_opts); ConvRewriter rewriter{GetCudaComputeCapability()}; TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status()); CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(), GetToolkitVersion()}; TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status()); SCOPED_TRACE(m->ToString()); const HloInstruction* conv; auto gte_pattern = m::GetTupleElement( m::CustomCall(&conv, {kCudnnConvBiasActivationForwardCallTarget}, m::Parameter(0), m::Parameter(1), m::Parameter(2)), 0) .WithShape(F16, {1, 32, 9, 9}); ASSERT_THAT( m->entry_computation()->root_instruction(), GmockMatch(m::Tuple( m::Select(m::Compare(gte_pattern, m::Broadcast(m::ConstantEffectiveScalar(0))) .WithComparisonDirection(ComparisonDirection::kGt), gte_pattern, m::Op() .WithPredicate(HloPredicateIsOp<HloOpcode::kExpm1>) .WithOperand(0, gte_pattern)), m::Minimum()))); TF_ASSERT_OK_AND_ASSIGN(auto gpu_config, conv->backend_config<GpuBackendConfig>()); const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config(); EXPECT_EQ(config.activation_mode(), se::dnn::kNone); } TEST_F(CudnnFusedConvRewriterHloTest, FuseRelu6) { const std::string module_str = R"( HloModule Test ENTRY Test { inputs = f16[1,18,9,9] parameter(0) filters = f16[3,3,18,32] parameter(1) bias = f16[32] parameter(2) bias_broadcast = f16[1,32,9,9] broadcast(bias), dimensions={1} zero = f16[] constant(0) zeros = f16[1,32,9,9] broadcast(zero), dimensions={} sixes = f16[1,32,9,9] broadcast(f16[] constant(6)), dimensions={} conv = f16[1,32,9,9] convolution(inputs, filters), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01 sum = add(conv, bias_broadcast) ROOT relu = clamp(zeros, sum, sixes) })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); DebugOptions debug_opts = m->config().debug_options(); debug_opts.set_xla_gpu_use_runtime_fusion(true); m->mutable_config().set_debug_options(debug_opts); ConvRewriter rewriter{GetCudaComputeCapability()}; TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status()); CudnnFusedConvRewriter fuser{se::CudaComputeCapability(8, 0), GetDnnVersion(), GetToolkitVersion()}; TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status()); SCOPED_TRACE(m->ToString()); const HloInstruction* conv; ASSERT_THAT( m->entry_computation()->root_instruction(), GmockMatch( m::GetTupleElement( m::CustomCall(&conv, {kCudnnConvBiasActivationForwardCallTarget}, m::Parameter(0), m::Parameter(1), m::Parameter(2)), 0) .WithShape(F16, {1, 32, 9, 9}))); TF_ASSERT_OK_AND_ASSIGN(auto gpu_config, conv->backend_config<GpuBackendConfig>()); const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config(); EXPECT_EQ(config.activation_mode(), se::dnn::kRelu6); } TEST_F(CudnnFusedConvRewriterHloTest, DontFuseRelu6IfMultipleUses) { const std::string module_str = R"( HloModule Test ENTRY Test { inputs = f16[1,18,9,9] parameter(0) filters = f16[3,3,18,32] parameter(1) bias = f16[1,32,9,9] broadcast(f16[32] parameter(2)), dimensions={1} zeros = f16[1,32,9,9] broadcast(f16[] constant(0)), dimensions={} sixes = f16[1,32,9,9] broadcast(f16[] constant(6)), dimensions={} conv = f16[1,32,9,9] convolution(inputs, filters), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01 sum = add(conv, bias) relu = clamp(zeros, sum, sixes) not_relu = minimum(sum, zeros) ROOT root = tuple(relu, not_relu) })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); DebugOptions debug_opts = m->config().debug_options(); debug_opts.set_xla_gpu_use_runtime_fusion(true); m->mutable_config().set_debug_options(debug_opts); ConvRewriter rewriter{GetCudaComputeCapability()}; TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status()); CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(), GetToolkitVersion()}; TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status()); SCOPED_TRACE(m->ToString()); const HloInstruction* conv; ASSERT_THAT( m->entry_computation()->root_instruction(), GmockMatch(m::Tuple( m::Clamp(m::Broadcast(m::ConstantEffectiveScalar(0)), m::GetTupleElement( m::CustomCall( &conv, {kCudnnConvBiasActivationForwardCallTarget}, m::Parameter(0), m::Parameter(1), m::Parameter(2)), 0) .WithShape(F16, {1, 32, 9, 9}), m::Broadcast(m::ConstantEffectiveScalar(6))), m::Minimum()))); TF_ASSERT_OK_AND_ASSIGN(auto gpu_config, conv->backend_config<GpuBackendConfig>()); const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config(); EXPECT_EQ(config.activation_mode(), se::dnn::kNone); } TEST_F(CudnnFusedConvRewriterHloTest, FuseLeakyRelu) { const std::string module_str = R"( HloModule Test ENTRY Test { inputs = f16[1,16,9,9] parameter(0) filters = f16[3,3,16,32] parameter(1) bias = f16[1,32,9,9] broadcast(f16[32] parameter(2)), dimensions={1} zeros = f16[1,32,9,9] broadcast(f16[] constant(0)), dimensions={} alphas = f16[1,32,9,9] broadcast(f16[] constant(0.2)), dimensions={} conv = f16[1,32,9,9] convolution(inputs, filters), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01 sum = add(conv, bias) cmp = compare(sum, zeros), direction=GT mul = multiply(sum, alphas) ROOT leaky_relu = select(cmp, sum, mul) })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); DebugOptions debug_opts = m->config().debug_options(); debug_opts.set_xla_gpu_use_runtime_fusion(true); m->mutable_config().set_debug_options(debug_opts); ConvRewriter rewriter{GetCudaComputeCapability()}; TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status()); CudnnFusedConvRewriter fuser{se::CudaComputeCapability(8, 0), GetDnnVersion(), GetToolkitVersion()}; TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status()); SCOPED_TRACE(m->ToString()); const HloInstruction* conv; ASSERT_THAT( m->entry_computation()->root_instruction(), GmockMatch( m::GetTupleElement( m::CustomCall(&conv, {kCudnnConvBiasActivationForwardCallTarget}, m::Parameter(0), m::Parameter(1), m::Parameter(2)), 0) .WithShape(F16, {1, 32, 9, 9}))); TF_ASSERT_OK_AND_ASSIGN(auto gpu_config, conv->backend_config<GpuBackendConfig>()); const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config(); EXPECT_EQ(config.activation_mode(), se::dnn::kLeakyRelu); } TEST_F(CudnnFusedConvRewriterHloTest, DontFuseLeakyReluIfMultipleUses) { const std::string module_str = R"( HloModule Test ENTRY Test { inputs = f16[1,16,9,9] parameter(0) filters = f16[3,3,16,32] parameter(1) bias = f16[1,32,9,9] broadcast(f16[32] parameter(2)), dimensions={1} zeros = f16[1,32,9,9] broadcast(f16[] constant(0)), dimensions={} alphas = f16[1,32,9,9] broadcast(f16[] constant(0.2)), dimensions={} conv = f16[1,32,9,9] convolution(inputs, filters), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01 sum = add(conv, bias) cmp = compare(sum, zeros), direction=GT mul = multiply(sum, alphas) leaky_relu = select(cmp, sum, mul) not_leaky_relu = minimum(sum, zeros) ROOT root = tuple(leaky_relu, not_leaky_relu) })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); DebugOptions debug_opts = m->config().debug_options(); debug_opts.set_xla_gpu_use_runtime_fusion(true); m->mutable_config().set_debug_options(debug_opts); ConvRewriter rewriter{GetCudaComputeCapability()}; TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status()); CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(), GetToolkitVersion()}; TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status()); SCOPED_TRACE(m->ToString()); const HloInstruction* conv; auto gte_pattern = m::GetTupleElement( m::CustomCall(&conv, {kCudnnConvBiasActivationForwardCallTarget}, m::Parameter(0), m::Parameter(1), m::Parameter(2)), 0) .WithShape(F16, {1, 32, 9, 9}); ASSERT_THAT( m->entry_computation()->root_instruction(), GmockMatch(m::Tuple( m::Select(m::Compare(gte_pattern, m::Broadcast(m::ConstantEffectiveScalar(0))) .WithComparisonDirection(ComparisonDirection::kGt) .WithOneUse(), gte_pattern, m::Multiply(gte_pattern, m::Broadcast(m::ConstantEffectiveScalar()))), m::Minimum()))); TF_ASSERT_OK_AND_ASSIGN(auto gpu_config, conv->backend_config<GpuBackendConfig>()); const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config(); EXPECT_EQ(config.activation_mode(), se::dnn::kNone); } TEST_F(CudnnFusedConvRewriterHloTest, DontFuseAlphaIfMultipleUsers) { const std::string module_str = R"( HloModule Test ENTRY Test { inputs = f32[1,17,9,9] parameter(0) filters = f32[3,3,17,32] parameter(1) bias = f32[1,32,9,9] broadcast(f32[32] parameter(2)), dimensions={1} alpha = f32[1,32,9,9] broadcast(f32[] parameter(3)), dimensions={} conv = f32[1,32,9,9] convolution(inputs, filters), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01 sum = add(multiply(alpha, conv), bias) ROOT root = tuple(conv, sum) })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); ConvRewriter rewriter{GetCudaComputeCapability()}; TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status()); CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(), GetToolkitVersion()}; TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status()); SCOPED_TRACE(m->ToString()); const HloInstruction* conv1; const HloInstruction* conv2; ASSERT_THAT( m->entry_computation()->root_instruction(), GmockMatch(m::Tuple( m::GetTupleElement(m::CustomCall(&conv1), 0), m::AddAnyOrder(m::Broadcast(m::Parameter(2)), m::MultiplyAnyOrder( m::Broadcast(m::Parameter(3)), m::GetTupleElement(m::CustomCall(&conv2), 0)))))); EXPECT_EQ(conv1, conv2); TF_ASSERT_OK_AND_ASSIGN(auto gpu_config, conv1->backend_config<GpuBackendConfig>()); const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config(); EXPECT_EQ(config.conv_result_scale(), 1); EXPECT_EQ(config.activation_mode(), se::dnn::kNone); } TEST_F(CudnnFusedConvRewriterHloTest, DontFuseBiasIfMultipleUsers) { const std::string module_str = R"( HloModule Test ENTRY Test { inputs = f32[1,17,9,9] parameter(0) filters = f32[3,3,17,32] parameter(1) bias = f32[1,32,9,9] broadcast(f32[32] parameter(2)), dimensions={1} conv = f32[1,32,9,9] convolution(inputs, filters), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01 ROOT root = tuple(conv, add(conv, bias)) })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); ConvRewriter rewriter{GetCudaComputeCapability()}; TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status()); CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(), GetToolkitVersion()}; TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status()); SCOPED_TRACE(m->ToString()); const HloInstruction* conv1; const HloInstruction* conv2; ASSERT_THAT( m->entry_computation()->root_instruction(), GmockMatch(m::Tuple( m::GetTupleElement(m::CustomCall(&conv1), 0), m::AddAnyOrder(m::Broadcast(m::Parameter(2)), m::GetTupleElement(m::CustomCall(&conv2), 0))))); EXPECT_EQ(conv1, conv2); TF_ASSERT_OK_AND_ASSIGN(auto gpu_config, conv1->backend_config<GpuBackendConfig>()); const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config(); EXPECT_EQ(config.conv_result_scale(), 1); EXPECT_EQ(config.activation_mode(), se::dnn::kNone); } TEST_F(CudnnFusedConvRewriterHloTest, DontFuseSideInputThroughRelu) { const std::string module_str = R"( HloModule Test ENTRY Test { inputs = f32[1,17,9,9] parameter(0) filters = f32[3,3,17,32] parameter(1) side_input = f32[1,32,9,9] parameter(2) conv = f32[1,32,9,9] convolution(inputs, filters), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01 relu = maximum(conv, f32[1,32,9,9] broadcast(f32[] constant(0))) ROOT root = add(relu, side_input) })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); ConvRewriter rewriter{GetCudaComputeCapability()}; TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status()); CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(), GetToolkitVersion()}; TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status()); SCOPED_TRACE(m->ToString()); const HloInstruction* conv; ASSERT_THAT( m->entry_computation()->root_instruction(), GmockMatch(m::AddAnyOrder( m::Parameter(2), m::GetTupleElement( m::CustomCall(&conv, m::Parameter(0), m::Parameter(1), m::Broadcast(m::ConstantEffectiveScalar(0))), 0)))); TF_ASSERT_OK_AND_ASSIGN(auto gpu_config, conv->backend_config<GpuBackendConfig>()); const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config(); EXPECT_EQ(config.conv_result_scale(), 1); EXPECT_EQ(config.activation_mode(), se::dnn::kRelu); } TEST_F(CudnnFusedConvRewriterHloTest, DontFuseBiasThroughRelu) { const std::string module_str = R"( HloModule Test ENTRY Test { inputs = f32[1,17,9,9] parameter(0) filters = f32[3,3,17,32] parameter(1) bias = f32[1,32,9,9] broadcast(f32[32] parameter(2)), dimensions={1} conv = f32[1,32,9,9] convolution(inputs, filters), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01 relu = maximum(conv, f32[1,32,9,9] broadcast(f32[] constant(0))) ROOT root = add(relu, bias) })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); ConvRewriter rewriter{GetCudaComputeCapability()}; TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status()); CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(), GetToolkitVersion()}; TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status()); SCOPED_TRACE(m->ToString()); const HloInstruction* conv; ASSERT_THAT(m->entry_computation()->root_instruction(), GmockMatch(m::AddAnyOrder( m::Broadcast(m::Parameter(2)), m::GetTupleElement(m::CustomCall( &conv, m::Parameter(0), m::Parameter(1), m::Broadcast(m::ConstantEffectiveScalar(0))))))); TF_ASSERT_OK_AND_ASSIGN(auto gpu_config, conv->backend_config<GpuBackendConfig>()); const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config(); EXPECT_EQ(config.conv_result_scale(), 1); EXPECT_EQ(config.activation_mode(), se::dnn::kRelu); } TEST_F(CudnnFusedConvRewriterHloTest, DontFuseSideInputIfMultipleUsers) { const std::string module_str = R"( HloModule Test ENTRY Test { inputs = f32[1,17,9,9] parameter(0) filters = f32[3,3,17,32] parameter(1) side_input = f32[1,32,9,9] parameter(2) conv = f32[1,32,9,9] convolution(inputs, filters), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01 ROOT root = tuple(conv, add(conv, side_input)) })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); ConvRewriter rewriter{GetCudaComputeCapability()}; TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status()); CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(), GetToolkitVersion()}; TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status()); SCOPED_TRACE(m->ToString()); const HloInstruction* conv1; const HloInstruction* conv2; ASSERT_THAT( m->entry_computation()->root_instruction(), GmockMatch(m::Tuple( m::GetTupleElement(m::CustomCall(&conv1), 0), m::AddAnyOrder(m::Parameter(2), m::GetTupleElement(m::CustomCall(&conv2), 0))))); EXPECT_EQ(conv1, conv2); TF_ASSERT_OK_AND_ASSIGN(auto gpu_config, conv1->backend_config<GpuBackendConfig>()); const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config(); EXPECT_EQ(config.conv_result_scale(), 1); EXPECT_EQ(config.activation_mode(), se::dnn::kNone); } TEST_F(CudnnFusedConvRewriterHloTest, DontFuseConvertToF16IfMultipleUsers) { const std::string module_str = R"( HloModule Test ENTRY Test { inputs = f32[1,17,9,9] convert(f16[1,17,9,9] parameter(0)) filters = f32[3,3,17,32] convert(f16[3,3,17,32] parameter(1)) conv = f32[1,32,9,9] convolution(inputs, filters), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01 ROOT root = tuple(conv, f16[1,32,9,9] convert(conv)) })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); ConvRewriter rewriter{GetCudaComputeCapability()}; TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status()); CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(), GetToolkitVersion()}; TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status()); SCOPED_TRACE(m->ToString()); const HloInstruction* conv1; const HloInstruction* conv2; ASSERT_THAT(m->entry_computation()->root_instruction(), GmockMatch(m::Tuple( m::GetTupleElement(m::CustomCall(&conv1), 0), m::Convert(m::GetTupleElement(m::CustomCall(&conv2), 0))))); EXPECT_EQ(conv1, conv2); } TEST_F(CudnnFusedConvRewriterHloTest, DontFuseToS8IfMultipleUsers) { const std::string module_str = R"( HloModule Test ENTRY Test { inputs = f32[1,17,9,9] convert(s8[1,17,9,9] parameter(0)) filters = f32[3,3,17,32] convert(s8[3,3,17,32] parameter(1)) conv = f32[1,32,9,9] convolution(inputs, filters), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01 conv_s8 = s8[1,32,9,9] convert(clamp( f32[1,32,9,9] broadcast(f32[] constant(-128)), conv, f32[1,32,9,9] broadcast(f32[] constant(127)))) ROOT root = tuple(conv, conv_s8) })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); ConvRewriter rewriter{GetCudaComputeCapability()}; TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status()); CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(), GetToolkitVersion()}; TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status()); SCOPED_TRACE(m->ToString()); const HloInstruction* conv1; const HloInstruction* conv2; ASSERT_THAT( m->entry_computation()->root_instruction(), GmockMatch(m::Tuple( m::GetTupleElement(m::CustomCall(&conv1), 0), m::Convert(m::Clamp(m::Op(), m::GetTupleElement(m::CustomCall(&conv2), 0), m::Op()))))); EXPECT_EQ(conv1, conv2); } TEST_F(CudnnFusedConvRewriterHloTest, RemoveConvertByFusingS32ToF32) { MAYBE_SKIP_TEST("I8"); const std::string_view module_str = R"( HloModule Test ENTRY test_entry { inputs = s8[1, 17, 9, 9] parameter(0) filters = s8[3, 3, 17, 32] parameter(1) mult_op = f32[1, 32, 9, 9] parameter(2) conv = s32[1, 32, 9, 9] convolution(inputs, filters), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01 ROOT ret = multiply(f32[1, 32, 9, 9] convert(conv), mult_op) })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); ConvRewriter rewriter{GetCudaComputeCapability()}; TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status()); CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(), GetToolkitVersion()}; TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status()); SCOPED_TRACE(m->ToString()); HloInstruction* conv1 = nullptr; ASSERT_THAT(m->entry_computation()->root_instruction(), GmockMatch(m::Multiply(m::GetTupleElement(m::CustomCall(&conv1)), m::Parameter(2)))); } TEST_F(CudnnFusedConvRewriterHloTest, RemoveConvertByFusingS8ToF32) { MAYBE_SKIP_TEST("I8"); const std::string_view module_str = R"( HloModule Test ENTRY test_entry { inputs = s8[1, 17, 9, 9] parameter(0) filters = s8[3, 3, 17, 32] parameter(1) mult_op = f32[1, 32, 9, 9] parameter(2) conv = convolution(inputs, filters), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01 ROOT ret = multiply(f32[1, 32, 9, 9] convert(conv), mult_op) })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); ConvRewriter rewriter{GetCudaComputeCapability()}; TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status()); CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(), GetToolkitVersion()}; TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status()); SCOPED_TRACE(m->ToString()); HloInstruction* conv1 = nullptr; ASSERT_THAT(m->entry_computation()->root_instruction(), GmockMatch(m::Multiply(m::GetTupleElement(m::CustomCall(&conv1)), m::Parameter(2)))); } TEST_F(CudnnFusedConvRewriterHloTest, RemoveConvertByFusingF32ToS8) { MAYBE_SKIP_TEST("I8"); const std::string_view module_str = R"( HloModule Test ENTRY test_entry { inputs = f32[1, 17, 9, 9] parameter(0) filters = f32[3, 3, 17, 32] parameter(1) mult_op = s8[1, 32, 9, 9] parameter(2) conv = convolution(inputs, filters), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01 ROOT ret = multiply(s8[1, 32, 9, 9] convert(conv), mult_op) })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); ConvRewriter rewriter{GetCudaComputeCapability()}; TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status()); CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(), GetToolkitVersion()}; TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status()); SCOPED_TRACE(m->ToString()); HloInstruction* conv1 = nullptr; ASSERT_THAT(m->entry_computation()->root_instruction(), GmockMatch(m::Multiply(m::GetTupleElement(m::CustomCall(&conv1)), m::Parameter(2)))); } TEST_F(CudnnFusedConvRewriterHloTest, DontRemoveConvertDuetoMultpleUser) { const std::string_view module_str = R"( HloModule Test ENTRY test_entry { inputs = f32[1, 17, 9, 9] parameter(0) filters = f32[3, 3, 17, 32] parameter(1) mult_op = s8[1, 32, 9, 9] parameter(2) sub_op = s8[1, 32, 9, 9] parameter(3) conv = convolution(inputs, filters), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01 another = subtract(s8[1, 32, 9, 9] convert(conv), sub_op) ROOT ret = multiply(s8[1, 32, 9, 9] convert(conv), mult_op) })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); ConvRewriter rewriter{GetCudaComputeCapability()}; TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status()); CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(), GetToolkitVersion()}; TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status()); SCOPED_TRACE(m->ToString()); HloInstruction* conv1 = nullptr; ASSERT_THAT(m->entry_computation()->root_instruction(), GmockMatch(m::Multiply( m::Convert(m::GetTupleElement(m::CustomCall(&conv1))), m::Parameter(2)))); } TEST_F(CudnnFusedConvRewriterHloTest, FuseBias) { const std::string module_str = R"( HloModule Test ENTRY Test { inputs = f32[1,17,9,9] parameter(0) filters = f32[3,3,17,32] parameter(1) bias = f32[32] parameter(2) bias_broadcast = f32[1,32,9,9] broadcast(bias), dimensions={1} conv = f32[1,32,9,9] convolution(inputs, filters), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01 ROOT root = add(conv, bias_broadcast) })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); ConvRewriter rewriter{GetCudaComputeCapability()}; TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status()); CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(), GetToolkitVersion()}; TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status()); SCOPED_TRACE(m->ToString()); ASSERT_THAT( m->entry_computation()->root_instruction(), GmockMatch( m::GetTupleElement( m::CustomCall({kCudnnConvBiasActivationForwardCallTarget}, m::Parameter(0), m::Parameter(1), m::Parameter(2)), 0) .WithShape(F32, {1, 32, 9, 9}))); } TEST_F(CudnnFusedConvRewriterHloTest, FuseSideInput) { const std::string module_str = R"( HloModule Test ENTRY Test { inputs = f32[1,17,9,9] parameter(0) filters = f32[3,3,17,32] parameter(1) side_input = f32[1,32,9,9] parameter(2) conv = f32[1,32,9,9] convolution(inputs, filters), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01 ROOT root = add(conv, side_input) })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); ConvRewriter rewriter{GetCudaComputeCapability()}; TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status()); CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(), GetToolkitVersion()}; TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status()); SCOPED_TRACE(m->ToString()); const HloInstruction* conv; ASSERT_THAT( m->entry_computation()->root_instruction(), GmockMatch( m::GetTupleElement( m::CustomCall(&conv, {kCudnnConvBiasActivationForwardCallTarget}, m::Parameter(0), m::Parameter(1), m::Broadcast(m::ConstantEffectiveScalar(0)) .WithShape(F32, {32}), m::Parameter(2)), 0) .WithShape(F32, {1, 32, 9, 9}))); TF_ASSERT_OK_AND_ASSIGN(auto gpu_config, conv->backend_config<GpuBackendConfig>()); const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config(); EXPECT_EQ(config.side_input_scale(), 1); } TEST_F(CudnnFusedConvRewriterHloTest, FuseScaledSideInput) { const std::string module_str = R"( HloModule Test ENTRY Test { inputs = f32[1,17,9,9] parameter(0) filters = f32[3,3,17,32] parameter(1) side_input = f32[1,32,9,9] parameter(2) side_input_scale = f32[] constant(42) side_input_scale_broadcast = f32[1,32,9,9] broadcast(side_input_scale), dimensions={} side_input_product = multiply(side_input, side_input_scale_broadcast) conv = f32[1,32,9,9] convolution(inputs, filters), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01 ROOT root = add(conv, side_input_product) })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); ConvRewriter rewriter{GetCudaComputeCapability()}; TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status()); CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(), GetToolkitVersion()}; TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status()); SCOPED_TRACE(m->ToString()); const HloInstruction* conv; ASSERT_THAT( m->entry_computation()->root_instruction(), GmockMatch( m::GetTupleElement( m::CustomCall(&conv, {kCudnnConvBiasActivationForwardCallTarget}, m::Parameter(0), m::Parameter(1), m::Broadcast(m::ConstantEffectiveScalar(0)) .WithShape(F32, {32}), m::Parameter(2)), 0) .WithShape(F32, {1, 32, 9, 9}))); TF_ASSERT_OK_AND_ASSIGN(auto gpu_config, conv->backend_config<GpuBackendConfig>()); const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config(); EXPECT_EQ(config.side_input_scale(), 42); } TEST_F(CudnnFusedConvRewriterHloTest, FuseBiasAndSideInput) { const std::string module_str = R"( HloModule Test ENTRY Test { inputs = f32[1,17,9,9] parameter(0) filters = f32[3,3,17,32] parameter(1) bias = f32[32] parameter(2) side_input = f32[1,32,9,9] parameter(3) bias_broadcast = f32[1,32,9,9] broadcast(bias), dimensions={1} conv = f32[1,32,9,9] convolution(inputs, filters), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01 sum = add(conv, side_input) ROOT sum2 = add(sum, bias_broadcast) })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); ConvRewriter rewriter{GetCudaComputeCapability()}; TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status()); CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(), GetToolkitVersion()}; TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status()); SCOPED_TRACE(m->ToString()); const HloInstruction* conv; ASSERT_THAT( m->entry_computation()->root_instruction(), GmockMatch( m::GetTupleElement( m::CustomCall(&conv, {kCudnnConvBiasActivationForwardCallTarget}, m::Parameter(0), m::Parameter(1), m::Parameter(2), m::Parameter(3)), 0) .WithShape(F32, {1, 32, 9, 9}))); TF_ASSERT_OK_AND_ASSIGN(auto gpu_config, conv->backend_config<GpuBackendConfig>()); const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config(); EXPECT_EQ(config.side_input_scale(), 1); } TEST_F(CudnnFusedConvRewriterHloTest, EffectiveScalarBias) { const std::string module_str = R"( HloModule Test ENTRY Test { inputs = f32[1,17,9,9] parameter(0) filters = f32[3,3,17,32] parameter(1) bias = f32[1,32,9,9] broadcast(f32[] parameter(2)), dimensions={} conv = f32[1,32,9,9] convolution(inputs, filters), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01 ROOT root = add(conv, bias) })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); ConvRewriter rewriter{GetCudaComputeCapability()}; TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status()); CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(), GetToolkitVersion()}; TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status()); SCOPED_TRACE(m->ToString()); const HloInstruction* conv; ASSERT_THAT( m->entry_computation()->root_instruction(), GmockMatch( m::GetTupleElement( m::CustomCall(&conv, {kCudnnConvBiasActivationForwardCallTarget}, m::Parameter(0), m::Parameter(1), m::Broadcast(m::Parameter(2)).WithShape(F32, {32})), 0) .WithShape(F32, {1, 32, 9, 9}))); } TEST_F(CudnnFusedConvRewriterHloTest, StrengthReduceF32ToF16) { const std::string module_str = R"( HloModule Test ENTRY Test { inputs = f16[1,17,9,9] parameter(0) filters = f16[3,3,17,32] parameter(1) bias = f16[32] parameter(2) side_input = f16[1,32,9,9] parameter(3) inputs_f32 = f32[1,17,9,9] convert(inputs) filters_f32 = f32[3,3,17,32] convert(filters) bias_f32 = f32[32] convert(bias) bias_broadcast = f32[1,32,9,9] broadcast(bias_f32), dimensions={1} side_input_f32 = f32[1,32,9,9] convert(side_input) conv = f32[1,32,9,9] convolution(inputs_f32, filters_f32), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01 sum = add(conv, side_input_f32) sum2 = add(sum, bias_broadcast) ROOT conv_f16 = f16[1,32,9,9] convert(sum2) })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); ConvRewriter rewriter{GetCudaComputeCapability()}; TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status()); CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(), GetToolkitVersion()}; TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status()); AlgebraicSimplifier algsimp(AlgebraicSimplifierOptions{}); TF_ASSERT_OK(RunHloPass(&algsimp, m.get()).status()); SCOPED_TRACE(m->ToString()); const HloInstruction* conv; ASSERT_THAT( m->entry_computation()->root_instruction(), GmockMatch( m::GetTupleElement( m::CustomCall(&conv, {kCudnnConvBiasActivationForwardCallTarget}, m::Parameter(0), m::Parameter(1), m::Parameter(2), m::Parameter(3)), 0) .WithShape(F16, {1, 32, 9, 9}))); TF_ASSERT_OK_AND_ASSIGN(auto gpu_config, conv->backend_config<GpuBackendConfig>()); const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config(); EXPECT_EQ(config.side_input_scale(), 1); } TEST_F(CudnnFusedConvRewriterHloTest, BroadcastReshapeTransposeAfterConvert) { const std::string module_str = R"( HloModule Test ENTRY Test { inputs = f32[1,17,9,9] reshape(f32[1377] convert(f16[1377] parameter(0))) filters = f32[3,3,17,32] transpose(f32[17,32,3,3] convert(f16[17,32,3,3] parameter(1))), dimensions={2,3,0,1} bias = f16[1,32,9,9] broadcast(f16[32] parameter(2)), dimensions={1} side_input = f16[1,32,9,9] reshape(f16[2592] parameter(3)) conv_f32 = f32[1,32,9,9] convolution(inputs, filters), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01 conv_f16 = f16[1,32,9,9] convert(conv_f32) ROOT root = f16[1,32,9,9] add(add(conv_f16, side_input), bias) })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); ConvRewriter rewriter{GetCudaComputeCapability()}; TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status()); CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(), GetToolkitVersion()}; TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status()); AlgebraicSimplifier algsimp(AlgebraicSimplifierOptions{}); TF_ASSERT_OK(RunHloPass(&algsimp, m.get()).status()); SCOPED_TRACE(m->ToString()); const HloInstruction* conv; ASSERT_THAT( m->entry_computation()->root_instruction(), GmockMatch(m::GetTupleElement( m::CustomCall( &conv, {kCudnnConvBiasActivationForwardCallTarget}, m::Convert(m::Reshape(m::Convert(m::Parameter(0)))) .WithElementType(F16), m::Convert(m::Transpose(m::Convert(m::Parameter(1)))) .WithElementType(F16), m::Parameter(2), m::Reshape(m::Parameter(3))), 0) .WithShape(F16, {1, 32, 9, 9}))); TF_ASSERT_OK_AND_ASSIGN(auto gpu_config, conv->backend_config<GpuBackendConfig>()); const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config(); EXPECT_EQ(config.side_input_scale(), 1); } TEST_F(CudnnFusedConvRewriterHloTest, NoStrengthReduceF32ToF16IfBiasIsF32) { const std::string module_str = R"( HloModule Test ENTRY Test { inputs = f16[1,17,9,9] parameter(0) filters = f16[3,3,17,32] parameter(1) bias = f32[32] parameter(2) side_input = f16[1,32,9,9] parameter(3) inputs_f32 = f32[1,17,9,9] convert(inputs) filters_f32 = f32[3,3,17,32] convert(filters) bias_broadcast = f32[1,32,9,9] broadcast(bias), dimensions={1} side_input_f32 = f32[1,32,9,9] convert(side_input) conv = f32[1,32,9,9] convolution(inputs_f32, filters_f32), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01 sum = add(conv, side_input_f32) sum2 = add(sum, bias_broadcast) ROOT conv_f16 = f16[1,32,9,9] convert(sum2) })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); ConvRewriter rewriter{GetCudaComputeCapability()}; TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status()); CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(), GetToolkitVersion()}; TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status()); AlgebraicSimplifier algsimp(AlgebraicSimplifierOptions{}); TF_ASSERT_OK(RunHloPass(&algsimp, m.get()).status()); SCOPED_TRACE(m->ToString()); const HloInstruction* conv; ASSERT_THAT( m->entry_computation()->root_instruction(), GmockMatch( m::Convert(m::GetTupleElement( m::CustomCall( &conv, {kCudnnConvBiasActivationForwardCallTarget}, m::Convert(m::Parameter(0)).WithElementType(F32), m::Convert(m::Parameter(1)).WithElementType(F32), m::Parameter(2), m::Convert(m::Parameter(3)).WithElementType(F32)), 0)) .WithShape(F16, {1, 32, 9, 9}))); TF_ASSERT_OK_AND_ASSIGN(auto gpu_config, conv->backend_config<GpuBackendConfig>()); const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config(); EXPECT_EQ(config.side_input_scale(), 1); } TEST_F(CudnnFusedConvRewriterHloTest, F32Constants) { const std::string module_str = R"( HloModule Test ENTRY Test { inputs = f16[1,2,2,2] parameter(0) filters_f32 = f32[1,1,2,2] constant({{{{1, 2},{3, 4}}}}) bias = f16[2] parameter(1) bias_f32 = f32[2] convert(bias) side_input_f32 = f32[1,2,2,2] constant({{ {{0.5, 0.25}, {0.125, 0.0625}}, {{0.5, 0.25}, {0.125, 0.0625}} }}) inputs_f32 = f32[1,2,2,2] convert(inputs) bias_broadcast = f32[1,2,2,2] broadcast(bias_f32), dimensions={1} conv = f32[1,2,2,2] convolution(inputs_f32, filters_f32), window={size=1x1}, dim_labels=bf01_01io->bf01 sum = add(conv, side_input_f32) sum2 = add(sum, bias_broadcast) ROOT conv_f16 = f16[1,2,2,2] convert(sum2) })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); ConvRewriter rewriter{GetCudaComputeCapability()}; TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status()); CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(), GetToolkitVersion()}; TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status()); AlgebraicSimplifier algsimp(AlgebraicSimplifierOptions{}); TF_ASSERT_OK(RunHloPass(&algsimp, m.get()).status()); HloConstantFolding constant_folding; TF_ASSERT_OK(RunHloPass(&constant_folding, m.get()).status()); SCOPED_TRACE(m->ToString()); const HloInstruction* conv; ASSERT_THAT( m->entry_computation()->root_instruction(), GmockMatch(m::GetTupleElement( m::CustomCall( &conv, {kCudnnConvBiasActivationForwardCallTarget}, m::Parameter(0), m::Constant().WithElementType(F16), m::Parameter(1), m::Constant().WithElementType(F16)), 0) .WithShape(F16, {1, 2, 2, 2}))); TF_ASSERT_OK_AND_ASSIGN(auto gpu_config, conv->backend_config<GpuBackendConfig>()); const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config(); EXPECT_EQ(config.side_input_scale(), 1); } TEST_F(CudnnFusedConvRewriterHloTest, F32ConstantsNotLosslesslyConvertible) { const std::string module_str = R"( HloModule Test ENTRY Test { inputs = f16[1,2,2,2] parameter(0) filters_f32 = f32[1,1,2,2] constant({{{{1, 2.123456789},{3, 4}}}}) bias = f16[2] parameter(1) bias_f32 = f32[2] convert(bias) side_input_f32 = f32[1,2,2,2] constant({{ {{0.1, 0.2}, {0.3, 0.4}}, {{0.5, 0.6}, {0.7, 0.8}} }}) inputs_f32 = f32[1,2,2,2] convert(inputs) bias_broadcast = f32[1,2,2,2] broadcast(bias_f32), dimensions={1} conv = f32[1,2,2,2] convolution(inputs_f32, filters_f32), window={size=1x1}, dim_labels=bf01_01io->bf01 sum = add(conv, side_input_f32) sum2 = add(sum, bias_broadcast) ROOT conv_f16 = f16[1,2,2,2] convert(sum2) })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); ConvRewriter rewriter{GetCudaComputeCapability()}; TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status()); CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(), GetToolkitVersion()}; TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status()); AlgebraicSimplifier algsimp(AlgebraicSimplifierOptions{}); TF_ASSERT_OK(RunHloPass(&algsimp, m.get()).status()); HloConstantFolding constant_folding; TF_ASSERT_OK(RunHloPass(&constant_folding, m.get()).status()); SCOPED_TRACE(m->ToString()); const HloInstruction* conv; ASSERT_THAT( m->entry_computation()->root_instruction(), GmockMatch( m::Convert(m::GetTupleElement( m::CustomCall( &conv, {kCudnnConvBiasActivationForwardCallTarget}, m::Convert(m::Parameter(0)).WithElementType(F32), m::Constant().WithElementType(F32), m::Convert(m::Parameter(1)).WithElementType(F32), m::Constant().WithElementType(F32)), 0) .WithShape(F32, {1, 2, 2, 2})) .WithElementType(F16))); TF_ASSERT_OK_AND_ASSIGN(auto gpu_config, conv->backend_config<GpuBackendConfig>()); const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config(); EXPECT_EQ(config.side_input_scale(), 1); } TEST_F(CudnnFusedConvRewriterHloTest, FuseReluBeforeConvert) { MAYBE_SKIP_TEST("I8"); const std::string module_str = R"( HloModule Test ENTRY Test { input = s8[1,17,9,9] parameter(0) filter = s8[3,3,17,32] parameter(1) inputs32 = s32[1,17,9,9] convert(input) filters32 = s32[3,3,17,32] convert(filter) conv = s32[1,32,9,9] convolution(inputs32, filters32), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1 zero = s32[] constant(0) zeros = s32[1,32,9,9] broadcast(zero), dimensions={} relu = maximum(conv, zeros) lower = s32[] constant(-128) lowers = s32[1,32,9,9] broadcast(lower), dimensions={} upper = s32[] constant(127) uppers = s32[1,32,9,9] broadcast(upper), dimensions={} clamp = s32[1,32,9,9] clamp(lowers, relu, uppers) ROOT convert = s8[1,32,9,9] convert(clamp) })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); ConvRewriter rewriter{GetCudaComputeCapability()}; TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status()); CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(), GetToolkitVersion()}; TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status()); AlgebraicSimplifier algsimp(AlgebraicSimplifierOptions{}); TF_ASSERT_OK(RunHloPass(&algsimp, m.get()).status()); SCOPED_TRACE(m->ToString()); const HloInstruction* conv; ASSERT_THAT( m->entry_computation()->root_instruction(), GmockMatch( m::GetTupleElement( m::CustomCall(&conv, {kCudnnConvBiasActivationForwardCallTarget}, m::Parameter(0), m::Parameter(1), m::Broadcast(m::ConstantEffectiveScalar(0)) .WithShape(F32, {32})), 0) .WithShape(S8, {1, 32, 9, 9}))); TF_ASSERT_OK_AND_ASSIGN(auto gpu_config, conv->backend_config<GpuBackendConfig>()); const CudnnConvBackendConfig& config = gpu_config.cudnn_conv_backend_config(); EXPECT_EQ(config.activation_mode(), se::dnn::kRelu); } TEST_F(CudnnFusedConvRewriterHloTest, BiasTypeMatchesConvTypeIfFp) { MAYBE_SKIP_TEST("F64"); const std::string module_str = R"( HloModule Test ENTRY Test { input = f64[1,17,9,9] parameter(0) filter = f64[3,3,17,32] parameter(1) bias = f64[1,32,9,9] broadcast(f64[32] convert(f32[32] parameter(2))), dimensions={1} conv = f64[1,32,9,9] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1 ROOT root = f64[1,32,9,9] add(conv, bias) })"; TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); ConvRewriter rewriter{GetCudaComputeCapability()}; TF_ASSERT_OK(RunHloPass(&rewriter, m.get()).status()); CudnnFusedConvRewriter fuser{GetCudaComputeCapability(), GetDnnVersion(), GetToolkitVersion()}; TF_ASSERT_OK(RunHloPass(&fuser, m.get()).status()); AlgebraicSimplifier algsimp(AlgebraicSimplifierOptions{}); TF_ASSERT_OK(RunHloPass(&algsimp, m.get()).status()); SCOPED_TRACE(m->ToString()); const HloInstruction* conv; ASSERT_THAT( m->entry_computation()->root_instruction(), GmockMatch( m::GetTupleElement( m::CustomCall(&conv, {kCudnnConvBiasActivationForwardCallTarget}, m::Parameter(0), m::Parameter(1), m::Convert(m::Parameter(2)).WithShape(F64, {32})), 0) .WithShape(F64, {1, 32, 9, 9}))); } TEST_F(CudnnFusedConvRewriterTest, TestFusedConvInt8ToInt8) { MAYBE_SKIP_TEST("I8"); TestClamp( R"( HloModule Test ENTRY Test { zero = f32[] constant(0) zeros = f32[1,3,3,64] broadcast(zero), dimensions={} input = s8[1,3,3,64] parameter(0) filter = s8[3,3,64,64] parameter(1) bias = f32[64] parameter(2) inputs32 = s32[1,3,3,64] convert(input) filters32 = s32[3,3,64,64] convert(filter) conv = s32[1,3,3,64] convolution(inputs32, filters32), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1 convfloat = f32[1,3,3,64] convert(conv) broadcasted_bias = f32[1,3,3,64] broadcast(bias), dimensions={3} add1 = f32[1,3,3,64] add(convfloat, broadcasted_bias) relu = f32[1,3,3,64] maximum(zeros, add1) lower = f32[] constant(-128) lowers = f32[1,3,3,64] broadcast(lower), dimensions={} upper = f32[] constant(127) uppers = f32[1,3,3,64] broadcast(upper), dimensions={} clamp = f32[1,3,3,64] clamp(lowers, relu, uppers) ROOT convert = s8[1,3,3,64] convert(clamp) })", R"( )"); } TEST_F(CudnnFusedConvRewriterTest, DISABLED_TestFusedConvInt8ToFloat) { MAYBE_SKIP_TEST("I8"); TestClamp( R"( HloModule Test ENTRY Test { zero = f32[] constant(0) zeros = f32[1,3,3,64] broadcast(zero), dimensions={} input = s8[1,3,3,64] parameter(0) filter = s8[3,3,64,64] parameter(1) bias = f32[64] parameter(2) inputs32 = s32[1,3,3,64] convert(input) filters32 = s32[3,3,64,64] convert(filter) conv = s32[1,3,3,64] convolution(inputs32, filters32), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1 convfloat = f32[1,3,3,64] convert(conv) broadcasted_bias = f32[1,3,3,64] broadcast(bias), dimensions={3} add1 = f32[1,3,3,64] add(convfloat, broadcasted_bias) ROOT relu = f32[1,3,3,64] maximum(zeros, add1) })", R"( ; CHECK-LABEL: ENTRY %Test (input: s8[1,3,3,64], filter: s8[3,3,64,64], bias: f32[64]) -> f32[1,3,3,64] { ; CHECK: [[custom_call_0:%[^ ]+]]{{(\.[0-9])?}} = (f32[1,3,3,64]{3,2,1,0}, u8[{{[0-9]*}}]{0}) custom-call([[input_1:%[^ ]+]], [[copy_2:%[^ ]+]]{{(\.[0-9])?}}, [[bias_3:%[^ ]+]]), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, custom_call_target="__cudnn$convBiasActivationForward", backend_config= ; CHECK-NEXT: ROOT [[get_tuple_element_4:%[^ ]+]]{{(\.[0-9])?}} = f32[1,3,3,64]{3,2,1,0} get-tuple-element([[custom_call_0]]{{(\.[0-9])?}}), index=0 )"); } TEST_F(CudnnFusedConvRewriterTest, TestFusedConvWithScaledInt8SideInputBiasInt8ToInt8) { MAYBE_SKIP_TEST("I8"); TestClamp( R"( HloModule Test ENTRY Test { zero = f32[] constant(0) zeros = f32[1,3,3,64] broadcast(zero), dimensions={} alpha_conv_scalar = f32[] constant(0.999994934) alpha_conv = f32[1,3,3,64] broadcast(alpha_conv_scalar), dimensions={} alpha_side_input_scalar = f32[] constant(0.899994934) alpha_side_input = f32[1,3,3,64] broadcast(alpha_side_input_scalar), dimensions={} input = s8[1,3,3,64] parameter(0) filter = s8[3,3,64,64] parameter(1) side_input = s8[1,3,3,64] parameter(2) bias = f32[64] parameter(3) inputs32 = s32[1,3,3,64] convert(input) filters32 = s32[3,3,64,64] convert(filter) side_input_f32 = f32[1,3,3,64] convert(side_input) conv = s32[1,3,3,64] convolution(inputs32, filters32), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1 convfloat = f32[1,3,3,64] convert(conv) scaled_conv = f32[1,3,3,64] multiply(convfloat, alpha_conv) scaled_side_input = f32[1,3,3,64] multiply(side_input_f32, alpha_side_input) broadcasted_bias = f32[1,3,3,64] broadcast(bias), dimensions={3} add1 = f32[1,3,3,64] add(scaled_conv, broadcasted_bias) add2 = f32[1,3,3,64] add(add1, scaled_side_input) relu = f32[1,3,3,64] maximum(zeros, add2) lower = f32[] constant(-128) lowers = f32[1,3,3,64] broadcast(lower), dimensions={} upper = f32[] constant(127) uppers = f32[1,3,3,64] broadcast(upper), dimensions={} clamp = f32[1,3,3,64] clamp(lowers, relu, uppers) ROOT convert = s8[1,3,3,64] convert(clamp) })", R"( )"); } TEST_F(CudnnFusedConvRewriterTest, TestFusedConvWithScaledFloatSideInputBiasInt8ToInt8) { MAYBE_SKIP_TEST("I8"); TestClamp( R"( HloModule Test ENTRY Test { zero = f32[] constant(0) zeros = f32[1,3,3,64] broadcast(zero), dimensions={} alpha_conv_scalar = f32[] constant(0.999994934) alpha_conv = f32[1,3,3,64] broadcast(alpha_conv_scalar), dimensions={} alpha_side_input_scalar = f32[] constant(0.899994934) alpha_side_input = f32[1,3,3,64] broadcast(alpha_side_input_scalar), dimensions={} input = s8[1,3,3,64] parameter(0) filter = s8[3,3,64,64] parameter(1) side_input = f32[1,3,3,64] parameter(2) bias = f32[64] parameter(3) inputs32 = s32[1,3,3,64] convert(input) filters32 = s32[3,3,64,64] convert(filter) conv = s32[1,3,3,64] convolution(inputs32, filters32), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1 convfloat = f32[1,3,3,64] convert(conv) scaled_conv = f32[1,3,3,64] multiply(convfloat, alpha_conv) scaled_side_input = f32[1,3,3,64] multiply(side_input, alpha_side_input) broadcasted_bias = f32[1,3,3,64] broadcast(bias), dimensions={3} add1 = f32[1,3,3,64] add(scaled_conv, broadcasted_bias) add2 = f32[1,3,3,64] add(add1, scaled_side_input) relu = f32[1,3,3,64] maximum(zeros, add2) lower = f32[] constant(-128) lowers = f32[1,3,3,64] broadcast(lower), dimensions={} upper = f32[] constant(127) uppers = f32[1,3,3,64] broadcast(upper), dimensions={} clamp = f32[1,3,3,64] clamp(lowers, relu, uppers) ROOT convert = s8[1,3,3,64] convert(clamp) })", R"( )"); } TEST_F(CudnnFusedConvRewriterTest, TestFusedConvWithScaledInt8SideInputBiasInt8ToFloat) { MAYBE_SKIP_TEST("I8"); TestClamp( R"( HloModule Test ENTRY Test { zero = f32[] constant(0) zeros = f32[1,3,3,64] broadcast(zero), dimensions={} alpha_conv_scalar = f32[] constant(0.999994934) alpha_conv = f32[1,3,3,64] broadcast(alpha_conv_scalar), dimensions={} alpha_side_input_scalar = f32[] constant(0.899994934) alpha_side_input = f32[1,3,3,64] broadcast(alpha_side_input_scalar), dimensions={} input = s8[1,3,3,64] parameter(0) filter = s8[3,3,64,64] parameter(1) side_input = s8[1,3,3,64] parameter(2) bias = f32[64] parameter(3) inputs32 = s32[1,3,3,64] convert(input) filters32 = s32[3,3,64,64] convert(filter) side_input_f32 = f32[1,3,3,64] convert(side_input) conv = s32[1,3,3,64] convolution(inputs32, filters32), window={size=3x3 pad=1_1x1_1}, dim_labels=b01f_01io->b01f, feature_group_count=1 convfloat = f32[1,3,3,64] convert(conv) scaled_conv = f32[1,3,3,64] multiply(convfloat, alpha_conv) scaled_side_input = f32[1,3,3,64] multiply(side_input_f32, alpha_side_input) broadcasted_bias = f32[1,3,3,64] broadcast(bias), dimensions={3} add1 = f32[1,3,3,64] add(scaled_conv, broadcasted_bias) add2 = f32[1,3,3,64] add(add1, scaled_side_input) relu = f32[1,3,3,64] maximum(zeros, add2) lower = f32[] constant(-128) lowers = f32[1,3,3,64] broadcast(lower), dimensions={} upper = f32[] constant(127) uppers = f32[1,3,3,64] broadcast(upper), dimensions={} ROOT clamp = f32[1,3,3,64] clamp(lowers, relu, uppers) })", R"( )"); } TEST_F(CudnnFusedConvRewriterTest, TestConvInt8ToInt8NoClamp) { MAYBE_SKIP_TEST("I8"); const std::string module_str = absl::StrFormat(R"( HloModule Test ENTRY Test (input: s8[1,17,9,9], filter: s8[3,3,17,32]) -> s8[1,32,9,9] { zero = s8[] constant(0) zeros = s8[1,32,9,9]{3,2,1,0} broadcast(s8[] zero), dimensions={} input = s8[1,17,9,9]{3,2,1,0} parameter(0) filter = s8[3,3,17,32]{3,2,1,0} parameter(1) custom-call = (s32[1,32,9,9]{3,2,1,0}, u8[0]{0}) custom-call(s8[1,17,9,9]{3,2,1,0} input, s8[3,3,17,32]{3,2,1,0} filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, custom_call_target="__cudnn$convForward", backend_config="{\"convResultScale\":1}" get-tuple-element = s32[1,32,9,9]{3,2,1,0} get-tuple-element((s32[1,32,9,9]{3,2,1,0}, u8[0]{0}) custom-call), index=0 convert = s8[1,32,9,9]{3,2,1,0} convert(s32[1,32,9,9]{3,2,1,0} get-tuple-element) ROOT relu = s8[1,32,9,9]{3,2,1,0} maximum(s8[1,32,9,9]{3,2,1,0} zeros, s8[1,32,9,9]{3,2,1,0} convert) })"); TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); ASSERT_FALSE(CudnnFusedConvRewriter(GetCudaComputeCapability(), GetDnnVersion(), GetToolkitVersion()) .Run(m.get()) .ok()); } TEST_F(CudnnFusedConvRewriterTest, TestFusedConvInt8ToInt8NoClamp) { MAYBE_SKIP_TEST("I8"); const std::string module_str = absl::StrFormat(R"( HloModule Test ENTRY Test (input: s8[1,17,9,9], filter: s8[3,3,17,32]) -> s8[1,32,9,9] { zero = s8[] constant(0) zeros = s8[1,32,9,9]{3,2,1,0} broadcast(s8[] zero), dimensions={} input = s8[1,17,9,9]{3,2,1,0} parameter(0) filter = s8[3,3,17,32]{3,2,1,0} parameter(1) custom-call = (s32[1,32,9,9]{3,2,1,0}, u8[0]{0}) custom-call(s8[1,17,9,9]{3,2,1,0} input, s8[3,3,17,32]{3,2,1,0} filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, custom_call_target="__cudnn$convForward", backend_config="{\"convResultScale\":1}" get-tuple-element = s32[1,32,9,9]{3,2,1,0} get-tuple-element((s32[1,32,9,9]{3,2,1,0}, u8[0]{0}) custom-call), index=0 convert = s8[1,32,9,9]{3,2,1,0} convert(s32[1,32,9,9]{3,2,1,0} get-tuple-element) ROOT relu = s8[1,32,9,9]{3,2,1,0} maximum(s8[1,32,9,9]{3,2,1,0} zeros, s8[1,32,9,9]{3,2,1,0} convert) })"); TF_ASSERT_OK_AND_ASSIGN(auto m, ParseAndReturnVerifiedModule(module_str)); ASSERT_FALSE(CudnnFusedConvRewriter(GetCudaComputeCapability(), GetDnnVersion(), GetToolkitVersion()) .Run(m.get()) .ok()); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/cudnn_fused_conv_rewriter.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/cudnn_fused_conv_rewriter_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
7ab47ab0-9fed-413f-9b03-23b0997a2c1f
cpp
tensorflow/tensorflow
nest_gemm_fusion
third_party/xla/xla/service/gpu/transforms/nest_gemm_fusion.cc
third_party/xla/xla/service/gpu/transforms/nest_gemm_fusion_test.cc
#include "xla/service/gpu/transforms/nest_gemm_fusion.h" #include <algorithm> #include <cstddef> #include <cstdint> #include <string> #include <utility> #include <variant> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "absl/types/span.h" #include "llvm/ADT/SmallVector.h" #include "mlir/IR/MLIRContext.h" #include "xla/hlo/ir/dfs_hlo_visitor_with_default.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_query.h" #include "xla/service/gpu/ir_emission_utils.h" #include "xla/service/gpu/matmul_utils.h" #include "xla/service/gpu/model/symbolic_tile_analysis.h" #include "xla/service/gpu/model/symbolic_tiled_hlo_instruction.h" #include "xla/service/gpu/model/tiled_hlo_computation.h" #include "xla/service/hlo_dce.h" #include "xla/service/instruction_fusion.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla::gpu { namespace { absl::Status FuseInstructionsForConsumer( const std::vector<HloInstruction*>& instructions, HloInstruction& consumer) { HloComputation::Builder builder(instructions.back()->name()); absl::flat_hash_map<const HloInstruction*, HloInstruction*> old_to_new_mapping; std::vector<HloInstruction*> parameters; auto add_parameter = [&](HloInstruction* instruction) -> void { int param_index = parameters.size(); old_to_new_mapping[instruction] = builder.AddInstruction(HloInstruction::CreateParameter( param_index, instruction->shape(), absl::StrCat("parameter_", param_index))); parameters.push_back(instruction); }; for (HloInstruction* instruction : instructions) { if (old_to_new_mapping.contains(instruction)) { continue; } if (instruction->opcode() == HloOpcode::kParameter) { add_parameter(instruction); continue; } std::vector<HloInstruction*> new_operands; for (HloInstruction* operand : instruction->mutable_operands()) { if (!old_to_new_mapping.contains(operand)) { add_parameter(operand); } new_operands.push_back(old_to_new_mapping[operand]); } old_to_new_mapping[instruction] = builder.AddInstruction( instruction->CloneWithNewOperands(instruction->shape(), new_operands)); } HloInstruction* old_root = instructions.back(); old_to_new_mapping[old_root]->MarkAsRoot(); HloComputation* computation = old_root->GetModule()->AddComputationAndUnifyNamesAndIds( builder.Build(), false); HloInstruction* fusion = old_root->parent()->AddInstruction(HloInstruction::CreateFusion( old_root->shape(), HloInstruction::FusionKind::kCustom, parameters, computation)); fusion->GetModule()->SetAndUniquifyInstrName(fusion, "block_fusion"); TF_ASSIGN_OR_RETURN(auto gpu_config, fusion->backend_config<GpuBackendConfig>()); FusionBackendConfig& backend_config = *gpu_config.mutable_fusion_backend_config(); backend_config.set_kind(std::string(kTritonFusionKind)); TF_RETURN_IF_ERROR(fusion->set_backend_config(gpu_config)); for (int64_t operand_index : consumer.OperandIndices(old_root)) { TF_RETURN_IF_ERROR(consumer.ReplaceOperandWith(operand_index, fusion)); } return absl::OkStatus(); } absl::Status AnnotateDotOperandNestedFusionImpl( HloFusionInstruction& nested_fusion, const HloDotInstruction& dot, const TritonGemmConfig& config, absl::Span<const int64_t> contracting_dimensions, absl::Span<const int64_t> batch_dimensions, int64_t contracting_dim_size, int64_t non_contracting_dim_size) { if (contracting_dimensions.size() != 1) { return absl::InternalError( absl::StrCat("Expected a single lhs contracting dimension but got ", contracting_dimensions.size())); } TF_ASSIGN_OR_RETURN( std::vector<int64_t> non_contracting_dimensions, GetNonContractingDims(dot.operand(0)->shape(), batch_dimensions, contracting_dimensions)); if (non_contracting_dimensions.size() != 1) { return absl::InternalError( absl::StrCat("Expected a single non-contracting dimension but got ", non_contracting_dimensions.size())); } std::vector<int64_t> output_tile_sizes(dot.operand(0)->shape().rank(), 1); output_tile_sizes[contracting_dimensions[0]] = contracting_dim_size; output_tile_sizes[non_contracting_dimensions[0]] = non_contracting_dim_size; BlockLevelParameters block_level_parameters; block_level_parameters.output_tile_sizes = std::move(output_tile_sizes); TF_ASSIGN_OR_RETURN(auto backend_config, nested_fusion.backend_config<GpuBackendConfig>()); *backend_config.mutable_fusion_backend_config() ->mutable_block_level_fusion_config() = block_level_parameters.ToBlockLevelFusionConfig(); TF_RETURN_IF_ERROR(nested_fusion.set_backend_config(backend_config)); return absl::OkStatus(); } absl::Status AnnotateDotLhsNestedFusion(HloFusionInstruction& nested_fusion, const HloDotInstruction& dot, const TritonGemmConfig& config) { const DotDimensionNumbers& dimension_numbers = dot.dot_dimension_numbers(); return AnnotateDotOperandNestedFusionImpl( nested_fusion, dot, config, dimension_numbers.lhs_contracting_dimensions(), dimension_numbers.lhs_batch_dimensions(), config.block_k, config.block_m); } absl::Status AnnotateDotRhsNestedFusion(HloFusionInstruction& nested_fusion, const HloDotInstruction& dot, const TritonGemmConfig& config) { const DotDimensionNumbers& dimension_numbers = dot.dot_dimension_numbers(); return AnnotateDotOperandNestedFusionImpl( nested_fusion, dot, config, dimension_numbers.rhs_contracting_dimensions(), dimension_numbers.rhs_batch_dimensions(), config.block_k, config.block_n); } absl::StatusOr<llvm::SmallVector<int64_t>> FindOutputTileSizesForEpilogue( const SymbolicTiledHloInstruction& tiled_dot, const SymbolicTileAnalysis& analysis, const TritonGemmConfig& config) { int64_t dot_rank = tiled_dot.symbolic_tile().tile_map().GetDimensionCount(); llvm::SmallVector<int64_t> expected_dot_tile_sizes(dot_rank, 1); expected_dot_tile_sizes[dot_rank - 2] = config.block_m; expected_dot_tile_sizes[dot_rank - 1] = config.block_n; llvm::SmallVector<int64_t> output_tile_sizes = expected_dot_tile_sizes; std::sort(output_tile_sizes.begin(), output_tile_sizes.end()); do { TF_ASSIGN_OR_RETURN( bool parameters_satisfy_constraints, analysis.ParametersSatisfyConstraints(output_tile_sizes)); if (!parameters_satisfy_constraints) { continue; } auto mapped_dot_tile_sizes = tiled_dot.TileSizes(output_tile_sizes); if (mapped_dot_tile_sizes == expected_dot_tile_sizes) { return output_tile_sizes; } } while (std::next_permutation(output_tile_sizes.begin(), output_tile_sizes.end())); return absl::InternalError(absl::StrCat( "Couldn't find output tile sizes that satisfy ", tiled_dot.ToString())); } absl::StatusOr<TritonGemmConfig> GetTritonGemmConfig( const HloFusionInstruction& fusion) { TF_ASSIGN_OR_RETURN(auto gpu_config, fusion.backend_config<GpuBackendConfig>()); const FusionBackendConfig& backend_config = gpu_config.fusion_backend_config(); if (!backend_config.has_triton_gemm_config()) { return absl::InternalError( "The fusion's backend config doesn't have a triton_gemm_config."); } return TritonGemmConfig::FromProto(backend_config.triton_gemm_config()); } absl::Status MakeNestedFusionFromGemmFusion( HloFusionInstruction* fusion, const TritonGemmConfig& config, const SymbolicTileAnalysis& analysis, const SymbolicTiledHloInstruction& tiled_dot, HloDotInstruction* dot) { DCHECK(GetTritonGemmConfig(*fusion).value() == config); DCHECK_EQ(tiled_dot.hlo(), dot); HloComputation* computation = fusion->called_computation(); TF_RETURN_IF_ERROR(FuseInstructionsForConsumer( computation->MakeInstructionPostOrderFrom(*dot->mutable_operand(0)), *dot)); TF_RETURN_IF_ERROR(AnnotateDotLhsNestedFusion( *::xla::Cast<HloFusionInstruction>(dot->mutable_operand(0)), *dot, config)); TF_RETURN_IF_ERROR(FuseInstructionsForConsumer( computation->MakeInstructionPostOrderFrom(*dot->mutable_operand(1)), *dot)); TF_RETURN_IF_ERROR(AnnotateDotRhsNestedFusion( *::xla::Cast<HloFusionInstruction>(dot->mutable_operand(1)), *dot, config)); TF_ASSIGN_OR_RETURN([[maybe_unused]] bool changed, HloDCE::RunOnComputation( computation, false)); TF_ASSIGN_OR_RETURN( llvm::SmallVector<int64_t> output_tile_sizes, FindOutputTileSizesForEpilogue(tiled_dot, analysis, config)); TF_ASSIGN_OR_RETURN(auto gpu_config, fusion->backend_config<GpuBackendConfig>()); FusionBackendConfig& backend_config = *gpu_config.mutable_fusion_backend_config(); backend_config.set_kind(std::string(kTritonFusionKind)); BlockLevelParameters block_level_parameters; block_level_parameters.output_tile_sizes.assign(output_tile_sizes.begin(), output_tile_sizes.end()); *backend_config.mutable_block_level_fusion_config() = block_level_parameters.ToBlockLevelFusionConfig(); TF_RETURN_IF_ERROR(fusion->set_backend_config(gpu_config)); return absl::OkStatus(); } size_t GetDotCount(HloComputation* computation) { return absl::c_count_if(computation->instructions(), [](HloInstruction* hlo) { return hlo->opcode() == HloOpcode::kDot; }); } class NestGemmFusionVisitor : public DfsHloRewriteVisitor { public: explicit NestGemmFusionVisitor(mlir::MLIRContext* ctx) : ctx_(ctx) {} absl::Status HandleFusion(HloInstruction* instruction) override { HloFusionInstruction* fusion = Cast<HloFusionInstruction>(instruction); absl::StatusOr<TritonGemmConfig> config = GetTritonGemmConfig(*fusion); if (!config.ok()) { return absl::OkStatus(); } HloComputation* computation = fusion->called_computation(); HloInstruction* dot = hlo_query::GetFirstInstructionWithOpcode(*computation, HloOpcode::kDot); if (dot == nullptr) { return absl::OkStatus(); } DCHECK_EQ(GetDotCount(computation), 1) << "Fusion has more than one dot."; SymbolicTileAnalysisOrError analysis_or = SymbolicTileAnalysis::AnalyzeComputation( *fusion->called_computations()[0], ctx_); if (std::holds_alternative<FusionDecision>(analysis_or)) { return absl::InternalError( absl::StrCat("Failed to analyze the computation (", std::get<FusionDecision>(analysis_or).Explain(), "): ", fusion->called_computation()->ToString())); } auto& analysis = std::get<SymbolicTileAnalysis>(analysis_or); auto tiled_dot_it = absl::c_find_if( analysis.GetSymbolicTiledHloComputation(), [&](const auto& tiled_hlo) { return tiled_hlo->hlo() == dot; }); if (tiled_dot_it == analysis.GetSymbolicTiledHloComputation().end()) { return absl::InternalError(absl::StrCat( "Couldn't find a symbolic tiled instruction for ", dot->ToString())); } TF_RETURN_IF_ERROR(MakeNestedFusionFromGemmFusion( fusion, config.value(), analysis, **tiled_dot_it, Cast<HloDotInstruction>(dot))); this->MarkAsChanged(); return absl::OkStatus(); } private: mlir::MLIRContext* ctx_; }; } absl::StatusOr<bool> NestGemmFusion::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool changed = false; mlir::MLIRContext ctx; for (HloComputation* computation : module->MakeNonfusionComputations(execution_threads)) { NestGemmFusionVisitor visitor(&ctx); TF_RETURN_IF_ERROR(computation->Accept(&visitor)); changed |= visitor.changed(); } return changed; } }
#include "xla/service/gpu/transforms/nest_gemm_fusion.h" #include <ostream> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "xla/hlo/ir/hlo_instruction.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/tests/hlo_test_base.h" #include "xla/tsl/lib/core/status_test_util.h" #include "tsl/platform/statusor.h" using ::testing::ElementsAre; namespace xla { static void PrintTo(const HloInstruction& hlo, std::ostream* os) { *os << hlo.ToString(); } namespace gpu { namespace { MATCHER_P(OutputTileSizesIs, matcher, "") { auto backend_config = arg.template backend_config<GpuBackendConfig>(); if (!backend_config.ok()) { *result_listener << "failed to get backend config: " << backend_config.status(); return false; } FusionBackendConfig fusion_backend_config = backend_config->fusion_backend_config(); if (!fusion_backend_config.has_block_level_fusion_config()) { *result_listener << "has no block level fusion config"; return false; } auto output_tile_sizes = fusion_backend_config.block_level_fusion_config().output_tile_sizes(); return ExplainMatchResult(matcher, output_tile_sizes, result_listener); } class NestGemmFusionTest : public HloTestBase {}; TEST_F(NestGemmFusionTest, BasicTest) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"( HloModule module dot { lhs = bf16[8192,512] parameter(0) rhs = bf16[512,512] parameter(1) ROOT %dot = bf16[8192,512] dot(lhs, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0} } ENTRY entry { p0 = bf16[8192,512] parameter(0) p1 = bf16[512,512] parameter(1) ROOT fusion = bf16[8192,512] fusion(p0, p1), kind=kCustom, calls=dot, backend_config={ "fusion_backend_config": { "kind":"__triton_gemm", "triton_gemm_config": { "block_m":"64", "block_n":"256", "block_k":"32", "split_k":"1", "num_stages":"1", "num_warps":"1", "num_ctas":"1" } } } } )")); TF_ASSERT_OK_AND_ASSIGN(bool changed, NestGemmFusion().Run(module.get())) EXPECT_TRUE(changed); TF_ASSERT_OK(verifier().Run(module.get()).status()); const HloInstruction* fusion = nullptr; ASSERT_THAT(module->entry_computation()->root_instruction(), GmockMatch(match::Fusion(&fusion))); EXPECT_THAT(*fusion, OutputTileSizesIs(ElementsAre(64, 256))); const HloInstruction* lhs = nullptr; const HloInstruction* rhs = nullptr; EXPECT_THAT(fusion->fused_expression_root(), GmockMatch(match::Dot(match::Fusion(&lhs), match::Fusion(&rhs)))); EXPECT_THAT(*lhs, OutputTileSizesIs(ElementsAre(64, 32))); EXPECT_THAT(*rhs, OutputTileSizesIs(ElementsAre(32, 256))); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/nest_gemm_fusion.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/nest_gemm_fusion_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
f429a37e-0b2d-4952-9c22-0211ba343576
cpp
tensorflow/tensorflow
scatter_slice_simplifier
third_party/xla/xla/service/gpu/transforms/scatter_slice_simplifier.cc
third_party/xla/xla/service/gpu/transforms/scatter_slice_simplifier_test.cc
#include "xla/service/gpu/transforms/scatter_slice_simplifier.h" #include <cstdint> #include <iterator> #include <optional> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/hlo/ir/dfs_hlo_visitor_with_default.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/hlo_creation_utils.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/util.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace { bool IsValidIntermediaryUser(const HloInstruction* instruction) { return instruction->IsElementwise() || instruction->opcode() == HloOpcode::kGetTupleElement; } class ScatterSliceMatcher { public: explicit ScatterSliceMatcher(const HloScatterInstruction* scatter) : scatter_(scatter), operand_dimensions_( scatter->scatter_operands()[0]->shape().dimensions()), result_dimensions_(operand_dimensions_.begin(), operand_dimensions_.end()) {} std::optional<Shape> InferShape() { VLOG(10) << "Evaluating scatter " << scatter_->name(); if (!AreAllUsersValid(scatter_)) { return std::nullopt; } std::vector<Shape> result_shapes; absl::c_transform(scatter_->scatter_operands(), std::back_inserter(result_shapes), [&](const HloInstruction* op) { return ShapeUtil::MakeShape(op->shape().element_type(), result_dimensions_); }); return ShapeUtil::MakeMaybeTupleShape(result_shapes); } private: bool UpdateDimensions(const HloSliceInstruction* slice) { int64_t rank = slice->shape().rank(); for (int64_t i = 0; i < rank; ++i) { if (slice->slice_starts(i) != 0 || slice->slice_strides(i) != 1) { return false; } if (slice->slice_limits(i) != result_dimensions_[i]) { if (result_dimensions_[i] != operand_dimensions_[i]) { return false; } auto& update_window_dims = scatter_->scatter_dimension_numbers().update_window_dims(); if (absl::c_binary_search(update_window_dims, i)) { return false; } result_dimensions_[i] = slice->slice_limits(i); VLOG(10) << "Dimension " << i << " truncated to size " << result_dimensions_[i]; } } return true; } bool IsUserValid(const HloInstruction* op) { VLOG(10) << "Visiting user " << op->name(); if (auto* slice = DynCast<HloSliceInstruction>(op)) { return UpdateDimensions(slice); } bool is_valid = visited_set_.contains(op) || (IsValidIntermediaryUser(op) && AreAllUsersValid(op)); if (is_valid) { visited_set_.emplace(op); } return is_valid; } bool AreAllUsersValid(const HloInstruction* op) { if (op->user_count() == 0) { return !op->IsRoot(); } return absl::c_all_of(op->users(), [this](const HloInstruction* user) { return IsUserValid(user); }); } const HloScatterInstruction* scatter_; absl::flat_hash_set<const HloInstruction*> visited_set_; absl::Span<const int64_t> operand_dimensions_; DimensionVector result_dimensions_; }; HloInstruction* CreateSliceFrom(HloInstruction* operand, const Shape& shape) { std::vector<int64_t> start_indices(shape.rank(), 0); std::vector<int64_t> limit_indices(shape.rank()); std::vector<int64_t> strides(shape.rank(), 1); for (int64_t i = 0; i < shape.rank(); ++i) { limit_indices[i] = shape.dimensions(i); } return operand->AddInstruction(HloInstruction::CreateSlice( shape, operand, start_indices, limit_indices, strides)); } HloInstruction* CreateScatterFrom(HloScatterInstruction* scatter, const Shape& shape) { std::vector<HloInstruction*> operands(scatter->scatter_operand_count()); for (int64_t i = 0; i < operands.size(); ++i) { operands[i] = CreateSliceFrom(scatter->scatter_operands()[i], shape.IsTuple() ? shape.tuple_shapes(i) : shape); } return scatter->AddInstruction(HloInstruction::CreateScatter( shape, absl::MakeSpan(operands), scatter->scatter_indices(), scatter->scatter_updates(), scatter->called_computations()[0], scatter->scatter_dimension_numbers(), scatter->indices_are_sorted(), scatter->unique_indices())); } class ScatterSliceSimplifierVisitor : public DfsHloRewriteVisitor { public: absl::Status HandleScatter(HloInstruction* instruction) override { auto* scatter = Cast<HloScatterInstruction>(instruction); std::optional<Shape> result_shape = ScatterSliceMatcher(scatter).InferShape(); if (!result_shape.has_value()) { return absl::OkStatus(); } VLOG(2) << "Matched scatter " << scatter->name() << " with shape " << scatter->shape().ToString() << ", inferred result shape " << result_shape->ToString() << " (from the slice users)"; HloInstruction* new_scatter = CreateScatterFrom(scatter, *result_shape); return ReplaceAllUsersRecursive(scatter, new_scatter); } private: absl::Status ReplaceAllUsersRecursive(HloInstruction* old_instruction, HloInstruction* new_instruction) { replacements_[old_instruction] = new_instruction; std::vector<HloInstruction*> users = old_instruction->users(); for (HloInstruction* user : users) { if (user->parent() == nullptr) { VLOG(3) << "Skipping user " << user->name() << " (already replaced)"; continue; } TF_RETURN_IF_ERROR(ReplaceUserRecursive(user, new_instruction)); } return absl::OkStatus(); } absl::Status ReplaceUserRecursive(HloInstruction* user, HloInstruction* operand) { VLOG(3) << "Replacing scatter user " << user->name(); if (user->opcode() == HloOpcode::kSlice) { return ReplaceInstruction(user, operand); } HloInstruction* new_user = nullptr; if (user->IsElementwise()) { auto new_shape = [operand](HloInstruction* from) { return ShapeUtil::MakeShape(from->shape().element_type(), operand->shape().dimensions()); }; std::vector<HloInstruction*> new_operands; absl::c_transform(user->operands(), std::back_inserter(new_operands), [&](HloInstruction* op) { auto it = replacements_.find(op); return it != replacements_.end() ? it->second : CreateSliceFrom(op, new_shape(op)); }); new_user = user->AddInstruction( user->CloneWithNewOperands(new_shape(user), new_operands)); } else { auto* gte = Cast<HloGetTupleElementInstruction>(user); TF_ASSIGN_OR_RETURN(new_user, MakeGetTupleElementHlo(operand, gte->tuple_index(), &user->metadata())); } return ReplaceAllUsersRecursive(user, new_user); } absl::flat_hash_map<HloInstruction*, HloInstruction*> replacements_; }; } absl::StatusOr<bool> ScatterSliceSimplifier::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { return ScatterSliceSimplifierVisitor{}.RunOnModule(module, execution_threads); } }
#include "xla/service/gpu/transforms/scatter_slice_simplifier.h" #include <gmock/gmock.h> #include <gtest/gtest.h> #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/tests/hlo_test_base.h" namespace xla { namespace { namespace m = ::xla::match; using ScatterSliceSimplifierTest = HloTestBase; TEST_F(ScatterSliceSimplifierTest, Scatter1D) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module %add_F32 { %lhs = f32[] parameter(0) %rhs = f32[] parameter(1) ROOT %add = f32[] add(%lhs, %rhs) } ENTRY main { %indices = s32[4] parameter(0) %updates = f32[4] parameter(1) %operands = f32[9] constant(0) %scatter = f32[9] scatter(%operands, %indices, %updates), update_window_dims={}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=%add_F32 ROOT %slice = f32[8] slice(%scatter), slice={[0:8]} } )") .value(); ScatterSliceSimplifier test_pass; ASSERT_TRUE(RunHloPass(&test_pass, module.get()).value()); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Scatter(m::Slice(m::Constant()), m::Parameter(0), m::Parameter(1)) .WithShape(F32, {8}))); } TEST_F(ScatterSliceSimplifierTest, Scatter3D) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module %add_F32 { %lhs = f32[] parameter(0) %rhs = f32[] parameter(1) ROOT %add = f32[] add(%lhs, %rhs) } ENTRY main { %indices = s32[2] parameter(0) %updates = f32[2,4,4] parameter(1) %operands = f32[5,4,4] constant(0) %scatter = f32[5,4,4] scatter(%operands, %indices, %updates), update_window_dims={1,2}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=%add_F32 ROOT %slice = f32[4,4,4] slice(%scatter), slice={[0:4], [0:4], [0:4]} } )") .value(); ScatterSliceSimplifier test_pass; ASSERT_TRUE(RunHloPass(&test_pass, module.get()).value()); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Scatter(m::Slice(m::Constant()), m::Parameter(0), m::Parameter(1)) .WithShape(F32, {4, 4, 4}))); } TEST_F(ScatterSliceSimplifierTest, ScatterMultiOutput) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module %add_F32_add_F16 { %lhs.0 = f32[] parameter(0) %rhs.0 = f32[] parameter(2) %add.0 = f32[] add(%lhs.0, %rhs.0) %lhs.1 = f16[] parameter(1) %rhs.1 = f16[] parameter(3) %add.1 = f16[] add(%lhs.1, %rhs.1) ROOT %tuple = (f32[], f16[]) tuple(%add.0, %add.1) } ENTRY main { %indices = s32[4] parameter(0) %updates.0 = f32[4] parameter(1) %updates.1 = f16[4] parameter(2) %operands.0 = f32[9] constant(0) %operands.1 = f16[9] constant(0) %scatter = (f32[9], f16[9]) scatter(%operands.0, %operands.1, %indices, %updates.0, %updates.1), update_window_dims={}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=%add_F32_add_F16 %gte.0 = f32[9] get-tuple-element(%scatter), index=0 %slice.0 = f32[8] slice(%gte.0), slice={[0:8]} %gte.1 = f16[9] get-tuple-element(%scatter), index=1 %slice.1 = f16[8] slice(%gte.1), slice={[0:8]} ROOT %tuple = (f32[8], f16[8]) tuple(%slice.0, %slice.1) } )") .value(); ScatterSliceSimplifier test_pass; ASSERT_TRUE(RunHloPass(&test_pass, module.get()).value()); auto expected_scatter = m::Scatter(m::Slice(m::Constant()), m::Slice(m::Constant()), m::Parameter(0), m::Parameter(1), m::Parameter(2)); Shape expected_shape = ShapeUtil::MakeTupleShape( {ShapeUtil::MakeShape(F32, {8}), ShapeUtil::MakeShape(F16, {8})}); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Tuple(m::GetTupleElement(expected_scatter), m::GetTupleElement(expected_scatter)) .WithShapeEqualTo(&expected_shape))); } TEST_F(ScatterSliceSimplifierTest, NotMatching) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module %add_F32 { %lhs = f32[] parameter(0) %rhs = f32[] parameter(1) ROOT %add = f32[] add(%lhs, %rhs) } slice_not_truncation { %indices = s32[4] parameter(0) %updates = f32[4] parameter(1) %operands = f32[9] constant(0) %scatter = f32[9] scatter(%operands, %indices, %updates), update_window_dims={}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=%add_F32 ROOT %slice = f32[8] slice(%scatter), slice={[1:9]} } slice_with_stride { %indices = s32[4] parameter(0) %updates = f32[4] parameter(1) %operands = f32[9] constant(0) %scatter = f32[9] scatter(%operands, %indices, %updates), update_window_dims={}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=%add_F32 ROOT %slice = f32[4] slice(%scatter), slice={[0:8:2]} } scatter_multiple_users { %indices = s32[4] parameter(0) %updates = f32[4] parameter(1) %operands = f32[9] constant(0) %scatter = f32[9] scatter(%operands, %indices, %updates), update_window_dims={}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=%add_F32 %slice = f32[8] slice(%scatter), slice={[0:8]} ROOT %tuple = (f32[9], f32[8]) tuple(%scatter, %slice) } scatter_incompatible_slices { %indices = s32[2] parameter(0) %updates = f32[2,4] parameter(1) %operands = f32[4,4] constant(0) %scatter = f32[4,4] scatter(%operands, %indices, %updates), update_window_dims={1}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=%add_F32 %slice.0 = f32[3,4] slice(%scatter), slice={[0:3], [0:4]} %slice.1 = f32[4,3] slice(%scatter), slice={[0:4], [0:3]} ROOT %tuple = (f32[3,4], f32[4,3]) tuple(%slice.0, %slice.1) } slice_not_found { %indices = s32[4] parameter(0) %updates = f32[4] parameter(1) %operands = f32[8] constant(0) %scatter = f32[8] scatter(%operands, %indices, %updates), update_window_dims={}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=%add_F32 ROOT %exp = f32[8] exponential(%scatter) } slice_update_dimensions { %indices = s32[10] parameter(0) %updates = f32[10,1,128] parameter(1) %operands = f32[100,128] constant(0) %scatter = f32[100,128] scatter(%operands, %indices, %updates), update_window_dims={1,2}, inserted_window_dims={}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=%add_F32 ROOT %slice = f32[100,64] slice(%scatter), slice={[0:100], [0:64]} } )") .value(); ScatterSliceSimplifier test_pass; ASSERT_FALSE(RunHloPass(&test_pass, module.get()).value()); } TEST_F(ScatterSliceSimplifierTest, IntermediaryUsers) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module %add_F32 { %lhs = f32[] parameter(0) %rhs = f32[] parameter(1) ROOT %add = f32[] add(%lhs, %rhs) } ENTRY main { %indices = s32[4] parameter(0) %updates = f32[4] parameter(1) %operands = f32[9] constant(0) %scatter = f32[9] scatter(%operands, %indices, %updates), update_window_dims={}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=%add_F32 %unary = f32[9] abs(%scatter) %slice.0 = f32[8] slice(%unary), slice={[0:8]} %binary = f32[9] maximum(%scatter, %operands) %slice.1 = f32[8] slice(%binary), slice={[0:8]} ROOT %tuple = (f32[8], f32[8]) tuple(%slice.0, %slice.1) } )") .value(); ScatterSliceSimplifier test_pass; ASSERT_TRUE(RunHloPass(&test_pass, module.get()).value()); auto expected_scatter = m::Scatter(m::Slice(m::Constant()), m::Parameter(0), m::Parameter(1)); Shape expected_shape = ShapeUtil::MakeTupleShape( {ShapeUtil::MakeShape(F32, {8}), ShapeUtil::MakeShape(F32, {8})}); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(m::Tuple(m::Abs(expected_scatter), m::Maximum(expected_scatter, m::Slice(m::Constant()))) .WithShapeEqualTo(&expected_shape))); } TEST_F(ScatterSliceSimplifierTest, IntermediaryChain) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module %add_F32 { %lhs = f32[] parameter(0) %rhs = f32[] parameter(1) ROOT %add = f32[] add(%lhs, %rhs) } ENTRY main { %indices = s32[4] parameter(0) %updates = f32[4] parameter(1) %operands = f32[9] constant(0) %scatter = f32[9] scatter(%operands, %indices, %updates), update_window_dims={}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=%add_F32 %elementwise.0 = f32[9] abs(%scatter) %elementwise.1 = f32[9] exponential(%elementwise.0) %elementwise.2 = f32[9] add(%elementwise.0, %elementwise.1) ROOT %result = f32[8] slice(%elementwise.2), slice={[0:8]} } )") .value(); ScatterSliceSimplifier test_pass; ASSERT_TRUE(RunHloPass(&test_pass, module.get()).value()); auto expected_scatter = m::Scatter(m::Slice(m::Constant()), m::Parameter(0), m::Parameter(1)); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Add(m::Abs(expected_scatter), m::Exp(m::Abs(expected_scatter))) .WithShape(F32, {8}))); } TEST_F(ScatterSliceSimplifierTest, DiamondShape) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module %add_F32_mul_F32 { %lhs.0 = f32[] parameter(0) %rhs.0 = f32[] parameter(2) %add.0 = f32[] add(%lhs.0, %rhs.0) %lhs.1 = f32[] parameter(1) %rhs.1 = f32[] parameter(3) %mul.1 = f32[] multiply(%lhs.1, %rhs.1) ROOT %tuple = (f32[], f32[]) tuple(%add.0, %mul.1) } ENTRY main { %indices = s32[4] parameter(0) %updates.0 = f32[4] parameter(1) %updates.1 = f32[4] parameter(2) %operands.0 = f32[9] constant(0) %operands.1 = f32[9] constant(0) %scatter = (f32[9], f32[9]) scatter(%operands.0, %operands.1, %indices, %updates.0, %updates.1), update_window_dims={}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=%add_F32_mul_F32 %gte.0 = f32[9] get-tuple-element(%scatter), index=0 %gte.1 = f32[9] get-tuple-element(%scatter), index=1 %consumer = f32[9] add(%gte.0, %gte.1) ROOT %slice = f32[8] slice(%consumer), slice={[0:8]} } )") .value(); ScatterSliceSimplifier test_pass; ASSERT_TRUE(RunHloPass(&test_pass, module.get()).value()); auto expected_scatter = m::Scatter(m::Slice(m::Constant()), m::Slice(m::Constant()), m::Parameter(0), m::Parameter(1), m::Parameter(2)); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Add(m::GetTupleElement(expected_scatter), m::GetTupleElement(expected_scatter)) .WithShape(F32, {8}))); } TEST_F(ScatterSliceSimplifierTest, ElementwiseSelect) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module %add_F32 { %lhs = f32[] parameter(0) %rhs = f32[] parameter(1) ROOT %add = f32[] add(%lhs, %rhs) } ENTRY main { %indices = s32[4] parameter(0) %updates = f32[4] parameter(1) %operands = f32[9] constant(0) %scatter = f32[9] scatter(%operands, %indices, %updates), update_window_dims={}, inserted_window_dims={0}, scatter_dims_to_operand_dims={0}, index_vector_dim=1, to_apply=%add_F32 %pred_ = pred[9] parameter(2) %select = f32[9] select(%pred_, %scatter, %operands) ROOT %slice = f32[8] slice(%select), slice={[0:8]} } )") .value(); ScatterSliceSimplifier test_pass; ASSERT_TRUE(RunHloPass(&test_pass, module.get()).value()); auto expected_scatter = m::Scatter(m::Slice(m::Constant()), m::Parameter(0), m::Parameter(1)); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Select(m::Slice(m::Parameter(2)), expected_scatter, m::Slice(m::Constant())) .WithShape(F32, {8}))); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/scatter_slice_simplifier.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/scatter_slice_simplifier_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
e0bafeec-b8dc-4bfa-840f-87c8a06e09cc
cpp
tensorflow/tensorflow
collective_permute_valid_iteration_annotator
third_party/xla/xla/service/gpu/transforms/collective_permute_valid_iteration_annotator.cc
third_party/xla/xla/service/gpu/transforms/collective_permute_valid_iteration_annotator_test.cc
#include "xla/service/gpu/transforms/collective_permute_valid_iteration_annotator.h" #include "xla/literal_util.h" #include "xla/service/collective_ops_utils.h" #include "xla/service/pattern_matcher.h" #include "xla/service/while_loop_analysis.h" namespace xla { static const HloInstruction* NonConstantOperand(const HloInstruction* instr) { const HloInstruction* result = nullptr; for (const HloInstruction* operand : instr->operands()) { if (!operand->IsConstant()) { if (result != nullptr) { CHECK_EQ(result, operand); } result = operand; } } CHECK_NE(result, nullptr); return result; } std::optional<int64_t> GetStep(HloInstruction* while_inst) { std::optional<int64_t> indvar_tuple_idx = GetLoopInductionVarTupleIdx(while_inst); if (!indvar_tuple_idx) { return std::nullopt; }; auto* while_body_indvar_update = while_inst->while_body()->root_instruction()->mutable_operand( *indvar_tuple_idx); auto* while_body_indvar = NonConstantOperand(while_body_indvar_update); HloInstruction* trip_count_increase_step_instr = nullptr; if (!Match(while_body_indvar_update, match::AddAnyOrder(match::Op().Is(while_body_indvar), match::Op(&trip_count_increase_step_instr)))) { return std::nullopt; } return LiteralUtil::LiteralAsScalarInt64( trip_count_increase_step_instr->literal()); } absl::StatusOr<bool> CollectivePermuteValidIterationAnnotator::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool changed = false; for (HloComputation* comp : module->computations(execution_threads)) { for (HloInstruction* inst : comp->instructions()) { if (inst->opcode() != HloOpcode::kCollectivePermute) { continue; } if (inst->frontend_attributes().map().find(kSendRecvValidationAttr) != inst->frontend_attributes().map().end()) { continue; } auto sourceTargetPairs = inst->source_target_pairs(); if (!IsForwardCycle(sourceTargetPairs) && !IsBackwardCycle(sourceTargetPairs)) { continue; } VLOG(2) << "Collective permute with cycle: " << inst->ToString(); int64_t max_device_num = -1; for (auto [source, target] : sourceTargetPairs) { max_device_num = std::max(std::max(source, target), max_device_num); } int64_t num_devices = max_device_num + 1; HloInstruction* whileOp = inst->parent()->WhileCallInstruction(); if (whileOp == nullptr) { VLOG(2) << "No surrounding while op found. Ignoring " << inst->name(); continue; } if (!whileOp->frontend_attributes().map().contains( "is_pipelined_while_loop")) continue; TF_ASSIGN_OR_RETURN(WhileLoopBackendConfig config, whileOp->backend_config<WhileLoopBackendConfig>()); if (!config.has_known_trip_count()) { VLOG(2) << "Trip count for while loop (" << whileOp->name() << "): unknown"; continue; } int64_t trip_count = config.known_trip_count().n(); std::optional<int64_t> step = GetStep(whileOp); VLOG(2) << "Trip count for while loop (" << whileOp->name() << "): " << trip_count; if (!step) { VLOG(2) << "Could not find step for while operation"; continue; } VLOG(2) << "Step for while loop (" << whileOp->name() << "): " << *step; if (*step != 1) { VLOG(2) << "Step is not 1. Skipping..."; continue; } int64_t offset = trip_count - num_devices; std::vector<std::pair<int64_t, int64_t>> sendRecvValidation( sourceTargetPairs.size()); for (size_t currIdx = 0; currIdx < sourceTargetPairs.size(); currIdx++) { sendRecvValidation[currIdx] = {currIdx, currIdx + offset}; } if (IsBackwardCycle(sourceTargetPairs)) { std::reverse(sendRecvValidation.begin(), sendRecvValidation.end()); } xla::FrontendAttributes attributes; std::string iteration_instances = "{" + absl::StrJoin(sendRecvValidation, ",", [](std::string* out, std::pair<int64_t, int64_t> item) { absl::StrAppend(out, "{", item.first, ",", item.second, "}"); }) + "}"; (*attributes.mutable_map())[kSendRecvValidationAttr] = iteration_instances; inst->add_frontend_attributes(attributes); VLOG(1) << "Adding " << kSendRecvValidationAttr << " to " << inst->name() << ": " << iteration_instances; changed = true; } } return changed; } }
#include "xla/service/gpu/transforms/collective_permute_valid_iteration_annotator.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/pass/hlo_pass_pipeline.h" #include "xla/service/collective_ops_utils.h" #include "xla/service/while_loop_trip_count_annotator.h" #include "xla/tests/hlo_test_base.h" namespace xla { namespace { using CollectivePermuteValidIterationAnnotatorTest = HloTestBase; TEST_F(CollectivePermuteValidIterationAnnotatorTest, NoChange) { absl::string_view hlo_string = R"( HloModule test, entry_computation_layout={()->(s32[], s32[])} %Body (param: (s32[], s32[])) -> (s32[], s32[]) { %param = (s32[], s32[]) parameter(0) %i = s32[] get-tuple-element((s32[], s32[]) %param), index=1 %one = s32[] constant(1) %i_plus_one = s32[] add(s32[] %i, s32[] %one) %permute = s32[] collective-permute(%i_plus_one), channel_id=1, source_target_pairs={{0,1},{1,2},{2,3},{3,0}} ROOT %tuple = (s32[], s32[]) tuple(s32[] %permute, s32[] %permute) } %Cond (param.1: (s32[], s32[])) -> pred[] { %param.1 = (s32[], s32[]) parameter(0) %i.1 = s32[] get-tuple-element((s32[], s32[]) %param.1), index=1 %trip_count = s32[] constant(10) ROOT %done = pred[] compare(s32[] %i.1, s32[] %trip_count), direction=LT } ENTRY %test () -> (s32[], s32[]) { %i_start = s32[] constant(0) %p_start = s32[] constant(0) %initial_tuple = (s32[], s32[]) tuple(s32[] %i_start, s32[] %p_start) ROOT %while = (s32[], s32[]) while((s32[], s32[]) %initial_tuple), condition=%Cond, body=%Body } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string, 1, 4)); HloPassPipeline pipeline("my-pass-pipeline"); pipeline.AddPass<WhileLoopTripCountAnnotator>(); pipeline.AddPass<CollectivePermuteValidIterationAnnotator>(); TF_ASSERT_OK_AND_ASSIGN(bool changed, pipeline.Run(module.get())); EXPECT_FALSE(changed); HloCollectivePermuteInstruction* cp = DynCastOrNull<HloCollectivePermuteInstruction>( FindInstruction(module.get(), HloOpcode::kCollectivePermute)); ASSERT_NE(cp, nullptr); auto sendRecvValidationIt = cp->frontend_attributes().map().find(kSendRecvValidationAttr); ASSERT_EQ(sendRecvValidationIt, cp->frontend_attributes().map().end()); } TEST_F(CollectivePermuteValidIterationAnnotatorTest, ForwardCycle) { absl::string_view hlo_string = R"( HloModule test, entry_computation_layout={()->(s32[], s32[])} %Body (param: (s32[], s32[])) -> (s32[], s32[]) { %param = (s32[], s32[]) parameter(0) %i = s32[] get-tuple-element((s32[], s32[]) %param), index=1 %one = s32[] constant(1) %i_plus_one = s32[] add(s32[] %i, s32[] %one) %permute = s32[] collective-permute(%i_plus_one), channel_id=1, source_target_pairs={{0,1},{1,2},{2,3},{3,0}} ROOT %tuple = (s32[], s32[]) tuple(s32[] %permute, s32[] %i_plus_one) } %Cond (param.1: (s32[], s32[])) -> pred[] { %param.1 = (s32[], s32[]) parameter(0) %i.1 = s32[] get-tuple-element((s32[], s32[]) %param.1), index=1 %trip_count = s32[] constant(10) ROOT %done = pred[] compare(s32[] %i.1, s32[] %trip_count), direction=LT } ENTRY %test () -> (s32[], s32[]) { %i_start = s32[] constant(0) %p_start = s32[] constant(0) %initial_tuple = (s32[], s32[]) tuple(s32[] %i_start, s32[] %p_start) ROOT %while = (s32[], s32[]) while((s32[], s32[]) %initial_tuple), condition=%Cond, body=%Body, frontend_attributes={is_pipelined_while_loop="true"} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string, 1, 4)); HloPassPipeline pipeline("my-pass-pipeline"); pipeline.AddPass<WhileLoopTripCountAnnotator>(); pipeline.AddPass<CollectivePermuteValidIterationAnnotator>(); TF_ASSERT_OK_AND_ASSIGN(bool changed, pipeline.Run(module.get())); EXPECT_TRUE(changed); HloCollectivePermuteInstruction* cp = DynCastOrNull<HloCollectivePermuteInstruction>( FindInstruction(module.get(), HloOpcode::kCollectivePermute)); ASSERT_NE(cp, nullptr); auto sendRecvValidationIt = cp->frontend_attributes().map().find(kSendRecvValidationAttr); ASSERT_NE(sendRecvValidationIt, cp->frontend_attributes().map().end()); std::string sendRecvValidationAttr = sendRecvValidationIt->second; EXPECT_EQ(sendRecvValidationAttr, "{{0,6},{1,7},{2,8},{3,9}}"); } TEST_F(CollectivePermuteValidIterationAnnotatorTest, BackwardCycle) { absl::string_view hlo_string = R"( HloModule test, entry_computation_layout={()->(s32[], s32[])} %Body (param: (s32[], s32[])) -> (s32[], s32[]) { %param = (s32[], s32[]) parameter(0) %i = s32[] get-tuple-element((s32[], s32[]) %param), index=1 %one = s32[] constant(1) %i_plus_one = s32[] add(s32[] %i, s32[] %one) %permute = s32[] collective-permute(%i_plus_one), channel_id=1, source_target_pairs={{0,3},{1,0},{2,1},{3,2}} ROOT %tuple = (s32[], s32[]) tuple(s32[] %permute, s32[] %i_plus_one) } %Cond (param.1: (s32[], s32[])) -> pred[] { %param.1 = (s32[], s32[]) parameter(0) %i.1 = s32[] get-tuple-element((s32[], s32[]) %param.1), index=1 %trip_count = s32[] constant(10) ROOT %done = pred[] compare(s32[] %i.1, s32[] %trip_count), direction=LT } ENTRY %test () -> (s32[], s32[]) { %i_start = s32[] constant(0) %p_start = s32[] constant(0) %initial_tuple = (s32[], s32[]) tuple(s32[] %i_start, s32[] %p_start) ROOT %while = (s32[], s32[]) while((s32[], s32[]) %initial_tuple), condition=%Cond, body=%Body, frontend_attributes={is_pipelined_while_loop="true"} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string, 1, 4)); HloPassPipeline pipeline("my-pass-pipeline"); pipeline.AddPass<WhileLoopTripCountAnnotator>(); pipeline.AddPass<CollectivePermuteValidIterationAnnotator>(); TF_ASSERT_OK_AND_ASSIGN(bool changed, pipeline.Run(module.get())); EXPECT_TRUE(changed); HloCollectivePermuteInstruction* cp = DynCastOrNull<HloCollectivePermuteInstruction>( FindInstruction(module.get(), HloOpcode::kCollectivePermute)); ASSERT_NE(cp, nullptr); auto sendRecvValidationIt = cp->frontend_attributes().map().find(kSendRecvValidationAttr); ASSERT_NE(sendRecvValidationIt, cp->frontend_attributes().map().end()); std::string sendRecvValidationAttr = sendRecvValidationIt->second; EXPECT_EQ(sendRecvValidationAttr, "{{3,9},{2,8},{1,7},{0,6}}"); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/collective_permute_valid_iteration_annotator.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/collective_permute_valid_iteration_annotator_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
0a557562-a4da-4ac0-a621-6c54bbdd406d
cpp
tensorflow/tensorflow
all_reduce_blueconnect
third_party/xla/xla/service/gpu/transforms/all_reduce_blueconnect.cc
third_party/xla/xla/service/gpu/transforms/all_reduce_blueconnect_test.cc
#include "xla/service/gpu/transforms/all_reduce_blueconnect.h" #include <algorithm> #include <cstddef> #include <cstdint> #include <iterator> #include <optional> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/btree_map.h" #include "absl/container/flat_hash_set.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_query.h" #include "xla/service/collective_ops_utils.h" #include "xla/service/computation_placer.h" #include "xla/service/global_device_id.h" #include "xla/service/hlo_creation_utils.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/status_macros.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" #include "tsl/platform/statusor.h" namespace xla { namespace { std::vector<HloInstruction*> GetOutputs(HloInstruction& instruction) { if (!instruction.shape().IsTuple()) { return {&instruction}; } std::vector<HloInstruction*> outputs; outputs.reserve(instruction.shape().tuple_shapes_size()); HloComputation& computation = *instruction.parent(); for (int i = 0; i < instruction.shape().tuple_shapes_size(); ++i) { outputs.push_back(computation.AddInstruction( HloInstruction::CreateGetTupleElement(&instruction, i))); } return outputs; } struct DecomposedReplicaGroups { std::vector<ReplicaGroup> scatter_gather_groups; std::vector<ReplicaGroup> new_all_reduce_groups; }; std::optional<GlobalDeviceId> TryConvertingReplicaIdToDeviceId( int64_t replica_id, const DeviceAssignment& device_assignment, CollectiveOpGroupMode collective_group_mode) { if (collective_group_mode == CollectiveOpGroupMode::kCrossReplica) { if (device_assignment.computation_count() != 1) { return std::nullopt; } return GlobalDeviceId{device_assignment(replica_id, 0)}; } else if (collective_group_mode == CollectiveOpGroupMode::kFlattenedID) { int partition_count = device_assignment.computation_count(); int64_t actual_replica_id = replica_id / partition_count; int64_t partition_id = replica_id % partition_count; return GlobalDeviceId{device_assignment(actual_replica_id, partition_id)}; } VLOG(1) << "Skip AllReduceBlueConnect because of unsupported " "CollectiveOpGroupMode " << CollectiveOpGroupModeToString(collective_group_mode); return std::nullopt; } absl::StatusOr<std::optional<DecomposedReplicaGroups>> TryDecomposeReplicaGroup( const ReplicaGroup& replica_group, const DeviceAssignment& device_assignment, size_t num_devices_per_host, CollectiveOpGroupMode collective_group_mode) { int group_size = replica_group.replica_ids_size(); TF_RET_CHECK(group_size > 0); absl::btree_map<int, std::vector<int64_t>> replica_ids_by_host; for (int64_t replica_id : replica_group.replica_ids()) { std::optional<GlobalDeviceId> device_id = TryConvertingReplicaIdToDeviceId( replica_id, device_assignment, collective_group_mode); if (!device_id.has_value()) { return {std::nullopt}; } TF_RET_CHECK(*device_id >= 0); int host_id = device_id->value() / num_devices_per_host; replica_ids_by_host[host_id].push_back(replica_id); } size_t num_local_devices = replica_ids_by_host.begin()->second.size(); bool same_num_devices_on_each_host = absl::c_all_of(replica_ids_by_host, [&](const auto& entry) { return entry.second.size() == num_local_devices; }); if (!same_num_devices_on_each_host) { return {std::nullopt}; } std::vector<int64_t> sorted_replica_group; sorted_replica_group.reserve(group_size); for (const auto& entry : replica_ids_by_host) { absl::c_copy(entry.second, std::back_inserter(sorted_replica_group)); } size_t scatter_group_size = std::max(num_local_devices, size_t(2)); size_t num_scatter_groups = group_size / scatter_group_size; if ((group_size % scatter_group_size != 0) || (num_scatter_groups < 2)) { return {std::nullopt}; } std::vector<ReplicaGroup> scatter_gather_groups(num_scatter_groups); std::vector<ReplicaGroup> new_all_reduce_groups(scatter_group_size); for (size_t i = 0; i < group_size; ++i) { int64_t replica_id = sorted_replica_group[i]; scatter_gather_groups[i / scatter_group_size].add_replica_ids(replica_id); new_all_reduce_groups[i % scatter_group_size].add_replica_ids(replica_id); } return {DecomposedReplicaGroups{std::move(scatter_gather_groups), std::move(new_all_reduce_groups)}}; } absl::StatusOr<std::optional<DecomposedReplicaGroups>> TryDecomposeReplicaGroups(const HloAllReduceInstruction& all_reduce, size_t num_devices_per_host) { const DeviceAssignment& device_assignment = all_reduce.GetModule()->config().static_device_assignment(); absl::Span<const ReplicaGroup> replica_groups = all_reduce.replica_groups(); ReplicaGroup all_replicas; if (replica_groups.empty()) { for (int i = 0; i < device_assignment.replica_count(); ++i) { all_replicas.add_replica_ids(i); } replica_groups = absl::MakeSpan(&all_replicas, 1); } TF_ASSIGN_OR_RETURN( CollectiveOpGroupMode collective_op_group_mode, GetCollectiveOpGroupMode(all_reduce.channel_id().has_value(), all_reduce.use_global_device_ids())); std::vector<ReplicaGroup> scatter_gather_groups; std::vector<ReplicaGroup> new_all_reduce_groups; for (const ReplicaGroup& replica_group : replica_groups) { TF_ASSIGN_OR_RETURN( std::optional<DecomposedReplicaGroups> decomposed_groups, TryDecomposeReplicaGroup(replica_group, device_assignment, num_devices_per_host, collective_op_group_mode)); if (!decomposed_groups) return {std::nullopt}; int scatter_group_size = decomposed_groups->scatter_gather_groups[0].replica_ids_size(); if (scatter_gather_groups.empty()) { for (const HloInstruction* operand : all_reduce.operands()) { TF_RET_CHECK(operand->shape().IsArray()); int64_t num_elements = ShapeUtil::ElementsIn(operand->shape()); if (num_elements % scatter_group_size != 0) { return {std::nullopt}; } } scatter_gather_groups.reserve( replica_groups.size() * decomposed_groups->scatter_gather_groups.size()); new_all_reduce_groups.reserve( replica_groups.size() * decomposed_groups->new_all_reduce_groups.size()); } else if (scatter_group_size != scatter_gather_groups[0].replica_ids_size()) { return {std::nullopt}; } absl::c_move(decomposed_groups->scatter_gather_groups, std::back_inserter(scatter_gather_groups)); absl::c_move(decomposed_groups->new_all_reduce_groups, std::back_inserter(new_all_reduce_groups)); } return {DecomposedReplicaGroups{std::move(scatter_gather_groups), std::move(new_all_reduce_groups)}}; } absl::StatusOr<bool> TryDecomposeAllReduce(HloAllReduceInstruction* all_reduce, size_t num_devices_per_host) { TF_RET_CHECK(all_reduce); TF_RET_CHECK(!all_reduce->has_sharding()); HloComputation& computation = *all_reduce->parent(); PrimitiveType element_type = all_reduce->operand(0)->shape().element_type(); TF_ASSIGN_OR_RETURN( std::optional<DecomposedReplicaGroups> decomposed_groups, TryDecomposeReplicaGroups(*all_reduce, num_devices_per_host)); if (!decomposed_groups) return false; std::vector<HloInstruction*> flat_operands; flat_operands.reserve(all_reduce->operand_count()); std::vector<Shape> flat_shapes; flat_shapes.reserve(all_reduce->operand_count()); std::vector<Shape> scattered_shapes; scattered_shapes.reserve(all_reduce->operand_count()); int scatter_group_size = decomposed_groups->scatter_gather_groups[0].replica_ids_size(); for (HloInstruction* operand : all_reduce->operands()) { TF_RET_CHECK(operand->shape().IsArray()); int64_t num_elements = ShapeUtil::ElementsIn(operand->shape()); Shape flat_shape = ShapeUtil::MakeShape(element_type, {num_elements}); flat_operands.push_back(computation.AddInstruction( HloInstruction::CreateBitcast(flat_shape, operand))); flat_shapes.push_back(std::move(flat_shape)); scattered_shapes.push_back(ShapeUtil::MakeShape( element_type, {num_elements / scatter_group_size})); } Shape reduce_scatter_shape = ShapeUtil::MakeMaybeTupleShape(scattered_shapes); int64_t next_channel_id = hlo_query::NextChannelId(*computation.parent()); auto get_channel_id = [&]() -> std::optional<int64_t> { if (all_reduce->channel_id().has_value()) { return next_channel_id++; } return std::nullopt; }; HloInstruction* reduce_scatter = computation.AddInstruction(HloInstruction::CreateReduceScatter( reduce_scatter_shape, flat_operands, all_reduce->to_apply(), CollectiveDeviceList(decomposed_groups->scatter_gather_groups), false, get_channel_id(), all_reduce->use_global_device_ids(), 0)); HloInstruction* new_all_reduce = computation.AddInstruction(HloInstruction::CreateAllReduce( reduce_scatter_shape, GetOutputs(*reduce_scatter), all_reduce->to_apply(), CollectiveDeviceList(decomposed_groups->new_all_reduce_groups), false, all_reduce->channel_id(), all_reduce->use_global_device_ids())); HloInstruction* all_gather = computation.AddInstruction(HloInstruction::CreateAllGather( ShapeUtil::MakeMaybeTupleShape(flat_shapes), GetOutputs(*new_all_reduce), 0, CollectiveDeviceList(decomposed_groups->scatter_gather_groups), false, get_channel_id(), all_reduce->use_global_device_ids())); std::vector<HloInstruction*> outputs = GetOutputs(*all_gather); for (int64_t i = 0; i < outputs.size(); ++i) { outputs[i] = computation.AddInstruction(HloInstruction::CreateBitcast( all_reduce->operand(i)->shape(), outputs[i])); } HloInstruction* replacement = MaybeMakeTuple(outputs); TF_RETURN_IF_ERROR( all_reduce->CopyAllControlDepsTo(reduce_scatter, replacement)); TF_RETURN_IF_ERROR(all_reduce->DropAllControlDeps()); TF_RETURN_IF_ERROR(computation.ReplaceInstruction(all_reduce, replacement)); TF_RETURN_IF_ERROR( TryDecomposeAllReduce(Cast<HloAllReduceInstruction>(new_all_reduce), num_devices_per_host) .status()); return true; } } absl::StatusOr<bool> AllReduceBlueConnect::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { VLOG(1) << "Running AllReduceBlueConnect"; if (hlo_query::ContainsLayoutConstrainedAllReduce(*module)) { VLOG(1) << "Skip AllReduceBlueConnect because the module contains all-reduce " "with constrained layouts"; return false; } if (!module->config().has_static_device_assignment()) { VLOG(1) << "Skip AllReduceBlueConnect because the module doesn't have static " "device assignment"; return false; } std::vector<HloAllReduceInstruction*> all_reduces; for (HloComputation* computation : module->MakeNonfusionComputations(execution_threads)) { for (HloInstruction* instruction : computation->instructions()) { if (instruction->opcode() == HloOpcode::kAllReduce) { all_reduces.push_back(Cast<HloAllReduceInstruction>(instruction)); } } } bool changed = false; for (HloAllReduceInstruction* all_reduce : all_reduces) { TF_ASSIGN_OR_RETURN( bool all_reduce_changed, TryDecomposeAllReduce(all_reduce, num_devices_per_host_)); changed |= all_reduce_changed; } return changed; } }
#include "xla/service/gpu/transforms/all_reduce_blueconnect.h" #include <cstddef> #include <cstdint> #include <memory> #include <optional> #include <utility> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/service/computation_placer.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/tests/hlo_test_base.h" #include "xla/util.h" #include "tsl/platform/status_matchers.h" #include "tsl/platform/statusor.h" namespace xla { namespace { using ::tsl::testing::IsOkAndHolds; namespace m = ::xla::match; using AllReduceBlueConnectTest = HloTestBase; HloPredicate MatchChannelId(std::optional<int64_t> channel_id) { return [channel_id](const HloInstruction* instruction) { return instruction->channel_id() == channel_id; }; } void SetModuleConfig(HloModuleConfig* module_config, size_t replica_count, size_t partition_count = 1) { DeviceAssignment device_assignment(replica_count, partition_count); device_assignment.FillIota(0); module_config->set_replica_count(replica_count); module_config->set_num_partitions(partition_count); module_config->set_static_device_assignment(device_assignment); } void SetModuleConfig(HloModule& module, size_t replica_count, size_t partition_count = 1) { SetModuleConfig(&module.mutable_config(), replica_count, partition_count); } TEST_F(AllReduceBlueConnectTest, OneStage) { constexpr absl::string_view hlo_string = R"( HloModule module %add { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT add = f32[] add(lhs, rhs) } ENTRY %comp { p0 = f32[4,4] parameter(0) ROOT crs = f32[4,4] all-reduce(p0), to_apply=add })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); SetModuleConfig(*module, 8); AllReduceBlueConnect pass(4); EXPECT_THAT(pass.Run(module.get()), IsOkAndHolds(true)); std::vector<std::vector<int64_t>> scatter_gather_groups = { {0, 1, 2, 3}, {4, 5, 6, 7}}; std::vector<std::vector<int64_t>> new_all_reduce_groups = { {0, 4}, {1, 5}, {2, 6}, {3, 7}}; auto bitcast = m::Bitcast(m::Parameter(0)).WithShape(F32, {16}); auto reduce_scatter = m::ReduceScatter(bitcast) .WithShape(F32, {4}) .WithReplicaGroups(scatter_gather_groups) .WithPredicate(MatchChannelId(std::nullopt)); auto all_reduce = m::AllReduce(reduce_scatter) .WithShape(F32, {4}) .WithReplicaGroups(new_all_reduce_groups) .WithPredicate(MatchChannelId(std::nullopt)); auto all_gather = m::AllGather(all_reduce) .WithShape(F32, {16}) .WithReplicaGroups(scatter_gather_groups) .WithPredicate(MatchChannelId(std::nullopt)); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Bitcast(all_gather).WithShape(F32, {4, 4}))); } TEST_F(AllReduceBlueConnectTest, TwoStage) { constexpr absl::string_view hlo_string = R"( HloModule module %add { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT add = f32[] add(lhs, rhs) } ENTRY %comp { p0 = f32[4,4] parameter(0) ROOT crs = f32[4,4] all-reduce(p0), to_apply=add })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); SetModuleConfig(*module, 16); AllReduceBlueConnect pass(4); EXPECT_THAT(pass.Run(module.get()), IsOkAndHolds(true)); std::vector<std::vector<int64_t>> outer_scatter_gather_groups = { {0, 1, 2, 3}, {4, 5, 6, 7}, {8, 9, 10, 11}, {12, 13, 14, 15}}; std::vector<std::vector<int64_t>> inner_scatter_gather_groups = { {0, 4}, {8, 12}, {1, 5}, {9, 13}, {2, 6}, {10, 14}, {3, 7}, {11, 15}}; std::vector<std::vector<int64_t>> new_all_reduce_groups = { {0, 8}, {4, 12}, {1, 9}, {5, 13}, {2, 10}, {6, 14}, {3, 11}, {7, 15}}; auto bitcast0 = m::Bitcast(m::Parameter(0)).WithShape(F32, {16}); auto reduce_scatter0 = m::ReduceScatter(bitcast0).WithShape(F32, {4}).WithReplicaGroups( outer_scatter_gather_groups); auto bitcast1 = m::Bitcast(reduce_scatter0).WithShape(F32, {4}); auto reduce_scatter1 = m::ReduceScatter(bitcast1).WithShape(F32, {2}).WithReplicaGroups( inner_scatter_gather_groups); auto all_reduce = m::AllReduce(reduce_scatter1) .WithShape(F32, {2}) .WithReplicaGroups(new_all_reduce_groups); auto all_gather0 = m::AllGather(all_reduce) .WithShape(F32, {4}) .WithReplicaGroups(inner_scatter_gather_groups); auto bitcast2 = m::Bitcast(all_gather0).WithShape(F32, {4}); auto all_gather1 = m::AllGather(bitcast2).WithShape(F32, {16}).WithReplicaGroups( outer_scatter_gather_groups); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Bitcast(all_gather1).WithShape(F32, {4, 4}))); } TEST_F(AllReduceBlueConnectTest, TwoOperands) { constexpr absl::string_view hlo_string = R"( HloModule module %add { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT add = f32[] add(lhs, rhs) } ENTRY %comp { p0 = f32[4,4] parameter(0) p1 = f32[4,4,2] parameter(1) ROOT crs = (f32[4,4], f32[4,4,2]) all-reduce(p0, p1), to_apply=add })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); SetModuleConfig(*module, 8); AllReduceBlueConnect pass(4); EXPECT_THAT(pass.Run(module.get()), IsOkAndHolds(true)); std::vector<std::vector<int64_t>> scatter_gather_groups = { {0, 1, 2, 3}, {4, 5, 6, 7}}; std::vector<std::vector<int64_t>> new_all_reduce_groups = { {0, 4}, {1, 5}, {2, 6}, {3, 7}}; auto bitcast0 = m::Bitcast(m::Parameter(0)).WithShape(F32, {16}); auto bitcast1 = m::Bitcast(m::Parameter(1)).WithShape(F32, {32}); Shape expected0 = ShapeUtil::MakeTupleShape( {ShapeUtil::MakeShape(F32, {4}), ShapeUtil::MakeShape(F32, {8})}); Shape expected1 = ShapeUtil::MakeTupleShape( {ShapeUtil::MakeShape(F32, {16}), ShapeUtil::MakeShape(F32, {32})}); auto reduce_scatter = m::ReduceScatter(bitcast0, bitcast1) .WithShapeEqualTo(&expected0) .WithReplicaGroups(scatter_gather_groups); auto all_reduce = m::AllReduce(m::GetTupleElement(reduce_scatter, 0), m::GetTupleElement(reduce_scatter, 1)) .WithShapeEqualTo(&expected0) .WithReplicaGroups(new_all_reduce_groups); auto all_gather = m::AllGather(m::GetTupleElement(all_reduce, 0), m::GetTupleElement(all_reduce, 1)) .WithShapeEqualTo(&expected1) .WithReplicaGroups(scatter_gather_groups); auto bitcast2 = m::Bitcast(m::GetTupleElement(all_gather, 0)).WithShape(F32, {4, 4}); auto bitcast3 = m::Bitcast(m::GetTupleElement(all_gather, 1)).WithShape(F32, {4, 4, 2}); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Tuple(bitcast2, bitcast3))); } TEST_F(AllReduceBlueConnectTest, MultiplePartitionsFilecheck) { constexpr absl::string_view hlo_string = R"( HloModule module %add { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT add = f32[] add(lhs, rhs) } ENTRY %comp { p0 = f32[8,8] parameter(0) ROOT crs = f32[8,8] all-reduce(p0), channel_id=1, replica_groups={{0,1,2,3,4,5,6,7}}, use_global_device_ids=true, to_apply=add })"; HloModuleConfig module_config; SetModuleConfig(&module_config, 1, 8); AllReduceBlueConnect pass(4); RunAndFilecheckHloRewrite(hlo_string, std::move(pass), R"( CHECK: %p0 = f32[8,8]{1,0} parameter(0) CHECK-NEXT: [[bitcast:%[^ ]+]] = f32[64]{0} bitcast(%p0) CHECK-NEXT: [[reduce_scatter:%[^ ]+]] = f32[16]{0} reduce-scatter([[bitcast]]), channel_id=2, replica_groups={{..0,1,2,3.,.4,5,6,7..}}, use_global_device_ids=true, dimensions={0}, to_apply=%add CHECK-NEXT: [[all_reduce:%[^ ]+]] = f32[16]{0} all-reduce([[reduce_scatter]]), channel_id=1, replica_groups={{..0,4.,.1,5.,.2,6.,.3,7..}}, use_global_device_ids=true, to_apply=%add CHECK-NEXT: [[all_gather:%[^ ]+]] = f32[64]{0} all-gather([[all_reduce]]), channel_id=3, replica_groups={{..0,1,2,3.,.4,5,6,7..}}, dimensions={0}, use_global_device_ids=true CHECK-NEXT: ROOT [[output:%[^ ]+]] = f32[8,8]{1,0} bitcast([[all_gather]]) } )", nullptr, &module_config); } TEST_F(AllReduceBlueConnectTest, DifferentNumLocalDevicesWithinReplicaGroup) { constexpr absl::string_view hlo_string = R"( HloModule module %add { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT add = f32[] add(lhs, rhs) } ENTRY %comp { p0 = f32[4,4] parameter(0) ROOT crs = f32[4,4] all-reduce(p0), replica_groups={{0,1,2,7},{3,4,5,6}}, to_apply=add })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); SetModuleConfig(*module, 8); AllReduceBlueConnect pass(4); EXPECT_THAT(pass.Run(module.get()), IsOkAndHolds(false)); } TEST_F(AllReduceBlueConnectTest, DifferentNumLocalDevicesAcrossReplicaGroups) { constexpr absl::string_view hlo_string = R"( HloModule module %add { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT add = f32[] add(lhs, rhs) } ENTRY %comp { p0 = f32[4,4] parameter(0) ROOT crs = f32[4,4] all-reduce(p0), replica_groups={{0,1,4,5},{2,3,6,7},{8,9,10,11},{12,13,14,15}}, to_apply=add })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); SetModuleConfig(*module, 16); AllReduceBlueConnect pass(4); EXPECT_THAT(pass.Run(module.get()), IsOkAndHolds(false)); } TEST_F(AllReduceBlueConnectTest, OperandIndivisible) { constexpr absl::string_view hlo_string = R"( HloModule module %add { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT add = f32[] add(lhs, rhs) } ENTRY %comp { p0 = f32[4,4] parameter(0) p1 = f32[9] parameter(1) ROOT crs = (f32[4,4], f32[9]) all-reduce(p0, p1), to_apply=add })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); SetModuleConfig(*module, 8); AllReduceBlueConnect pass(4); EXPECT_THAT(pass.Run(module.get()), IsOkAndHolds(false)); } TEST_F(AllReduceBlueConnectTest, ControlDeps) { constexpr absl::string_view hlo_string = R"( HloModule module %add { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT add = f32[] add(lhs, rhs) } ENTRY %comp { p0 = f32[4,4] parameter(0) p1 = f32[4,4] parameter(1) add = f32[4,4] add(p0, p1) crs = f32[4,4] all-reduce(p0), to_apply=add, control-predecessors={add} ROOT add1 = f32[4,4] add(crs, add), control-predecessors={crs} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); SetModuleConfig(*module, 8); const HloInstruction* ar = module->entry_computation()->root_instruction()->operand(0); auto expected_preds = ar->control_predecessors(); auto expected_succs = ar->control_successors(); AllReduceBlueConnect pass(4); EXPECT_THAT(pass.Run(module.get()), IsOkAndHolds(true)); std::vector<std::vector<int64_t>> scatter_gather_groups = { {0, 1, 2, 3}, {4, 5, 6, 7}}; std::vector<std::vector<int64_t>> new_all_reduce_groups = { {0, 4}, {1, 5}, {2, 6}, {3, 7}}; const HloInstruction *matched_rs, *matched_bitcast; auto bitcast = m::Bitcast(m::Parameter(0)).WithShape(F32, {16}); auto reduce_scatter = m::ReduceScatter(&matched_rs, bitcast) .WithShape(F32, {4}) .WithReplicaGroups(scatter_gather_groups); auto all_reduce = m::AllReduce(reduce_scatter) .WithShape(F32, {4}) .WithReplicaGroups(new_all_reduce_groups); auto all_gather = m::AllGather(all_reduce) .WithShape(F32, {16}) .WithReplicaGroups(scatter_gather_groups); HloInstruction* root = module->entry_computation()->root_instruction(); ASSERT_THAT(root, GmockMatch(m::Add())); EXPECT_THAT( root->operand(0), GmockMatch( m::Bitcast(&matched_bitcast, all_gather).WithShape(F32, {4, 4}))); EXPECT_THAT(matched_rs, GmockMatch(m::Op().WithControlDeps( absl::MakeSpan(expected_preds), {}))); EXPECT_THAT(matched_bitcast, GmockMatch(m::Op().WithControlDeps( {}, absl::MakeSpan(expected_succs)))); } TEST_F(AllReduceBlueConnectTest, ReduceScatterUnchanged) { constexpr absl::string_view hlo_string = R"( HloModule module %add { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT add = f32[] add(lhs, rhs) } ENTRY %comp { p0 = f32[8,4] parameter(0) ROOT crs = f32[1,4] reduce-scatter(p0), dimensions={0}, to_apply=add })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); SetModuleConfig(*module, 8); AllReduceBlueConnect pass(4); EXPECT_THAT(pass.Run(module.get()), IsOkAndHolds(false)); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/all_reduce_blueconnect.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/all_reduce_blueconnect_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
52ccbbd8-23d0-473e-a425-5f4b87f19ae3
cpp
tensorflow/tensorflow
rename_fusions
third_party/xla/xla/service/gpu/transforms/rename_fusions.cc
third_party/xla/xla/service/gpu/transforms/rename_fusions_test.cc
#include "xla/service/gpu/transforms/rename_fusions.h" #include <memory> #include <string> #include "absl/container/btree_set.h" #include "absl/container/flat_hash_set.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_join.h" #include "absl/strings/str_replace.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/gpu/hlo_traversal.h" #include "xla/service/gpu/ir_emission_utils.h" namespace xla { namespace gpu { namespace { constexpr absl::string_view FusionKindToString( HloInstruction::FusionKind kind) { switch (kind) { case HloInstruction::FusionKind::kCustom: return "custom"; case HloInstruction::FusionKind::kLoop: return "loop"; case HloInstruction::FusionKind::kInput: return "input"; case HloInstruction::FusionKind::kOutput: return "output"; } } std::string MakeFusionHeroNames(const HloInstruction* instruction) { std::unique_ptr<HloFusionAdaptor> fusion_adaptor = HloFusionAdaptor::ForInstruction(instruction); absl::btree_set<absl::string_view> heroes; for (auto root : fusion_adaptor->GetRoots()) { heroes.insert(HloOpcodeString(FindNonTrivialHero(root).opcode())); } return absl::StrReplaceAll(absl::StrJoin(heroes, "_"), {{"-", "_"}}); } void RenameFusion(HloModule* module, HloInstruction* instruction) { std::string hero_names = MakeFusionHeroNames(instruction); module->SetAndUniquifyInstrName( instruction, absl::StrCat(FusionKindToString(instruction->fusion_kind()), "_", hero_names, "_fusion")); module->SetAndUniquifyComputationName( instruction->fused_instructions_computation(), absl::StrCat("fused_", hero_names)); } } absl::StatusOr<bool> RenameFusions::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { for (HloComputation* computation : module->MakeNonfusionComputations()) { for (HloInstruction* instruction : computation->instructions()) { if (instruction->opcode() != HloOpcode::kFusion || instruction->fusion_kind() == HloInstruction::FusionKind::kCustom) { continue; } RenameFusion(module, instruction); } } return true; } } }
#include "xla/service/gpu/transforms/rename_fusions.h" #include <utility> #include <gtest/gtest.h> #include "absl/strings/string_view.h" #include "xla/tests/hlo_test_base.h" namespace xla { namespace gpu { class RenameFusionsTest : public HloTestBase { protected: RenameFusions rename_fusions_; }; TEST_F(RenameFusionsTest, FusionInstructionNames) { absl::string_view kHlo = R"( HloModule test_module square { p = f32[16384] parameter(0) ROOT m = f32[16384] multiply(p, p) } exp { p = f32[16384] parameter(0) ROOT e = f32[16384] exponential(p) } log { p = f32[16384] parameter(0) ROOT l = f32[16384] log(p) } add { p0 = f32[] parameter(0) p1 = f32[] parameter(1) ROOT add = f32[] add(p0, p1) } ENTRY main { p0 = bf16[1024,8192] parameter(0) p1 = f32[8192] parameter(1) p2 = f32[16384] parameter(2) convert = f32[1024,8192] convert(p0) broadcast = f32[1024,8192] broadcast(p1), dimensions={1} c0 = f32[] constant(0) multiply = f32[1024,8192] multiply(broadcast, convert) reduce = f32[1024] reduce(multiply, c0), dimensions={1}, to_apply=add convert.1 = bf16[1024] convert(reduce) s = f32[16384] fusion(p2), kind=kLoop, calls=square e = f32[16384] fusion(s), kind=kLoop, calls=exp l = f32[16384] fusion(s), kind=kInput, calls=log ROOT result = (bf16[1024]{0}, f32[16384]{0}, f32[16384]{0}) tuple(convert.1, l, e) })"; RunAndFilecheckHloRewrite(kHlo, std::move(rename_fusions_), R"( CHECK: ENTRY %main CHECK: %loop_multiply_fusion{{.*}} calls=%fused_multiply CHECK: %input_log_fusion{{.*}} calls=%fused_log CHECK: %loop_exponential_fusion{{.*}} calls=%fused_exponential CHECK: ROOT %result )"); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/rename_fusions.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/rename_fusions_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
7ffc2709-c3c5-4e50-8744-ccaa79fd462b
cpp
tensorflow/tensorflow
triton_fusion_numerics_verifier
third_party/xla/xla/service/gpu/transforms/triton_fusion_numerics_verifier.cc
third_party/xla/xla/service/gpu/transforms/triton_fusion_numerics_verifier_test.cc
#include "xla/service/gpu/transforms/triton_fusion_numerics_verifier.h" #include <memory> #include <optional> #include <utility> #include "absl/container/flat_hash_set.h" #include "absl/functional/any_invocable.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/executable.h" #include "xla/service/gpu/autotuning/autotuner_compile_util.h" #include "xla/service/gpu/autotuning/autotuner_util.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/buffer_comparator.h" #include "xla/service/gpu/ir_emission_utils.h" #include "xla/service/hlo_module_config.h" #include "xla/service/shaped_buffer.h" #include "xla/shape.h" #include "xla/status_macros.h" #include "xla/stream_executor/stream.h" #include "xla/tools/hlo_decomposer.h" #include "xla/util.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla::gpu { namespace { using ProfilingOutput = AutotunerCompileUtil::ProfilingOutput; absl::StatusOr<const HloFusionInstruction*> AsTritonFusion( const HloInstruction* hlo) { if (hlo->opcode() != HloOpcode::kFusion) { return nullptr; } const HloFusionInstruction* fusion = Cast<HloFusionInstruction>(hlo); TF_ASSIGN_OR_RETURN(auto gpu_config, fusion->backend_config<GpuBackendConfig>()); const FusionBackendConfig& backend_config = gpu_config.fusion_backend_config(); if (backend_config.kind() == kTritonFusionKind) { return fusion; } return nullptr; } std::unique_ptr<HloModule> NewHloModuleFromFusion( const HloFusionInstruction& fusion, const DebugOptions& debug_opts, bool clear_backend_config) { std::unique_ptr<HloModule> new_module = ExtractInstructionIntoNewModule(fusion); if (clear_backend_config) { new_module->entry_computation()->root_instruction()->clear_backend_config(); } new_module->mutable_config().set_debug_options(debug_opts); return new_module; } } namespace triton_fusion_numerics_pass_internal { absl::StatusOr<ScopedShapedBuffer> CompileAndRunFusion( AutotunerCompileUtil& util, const HloFusionInstruction& fusion, const AutotuneConfig& config, const DebugOptions& debug_opts, bool clear_backend_config) { TF_ASSIGN_OR_RETURN(std::unique_ptr<Executable> executable, util.Compile([&](const DebugOptions& opts) { return NewHloModuleFromFusion(fusion, opts, clear_backend_config); })); if (executable == nullptr) { return Internal("Failed to compile Triton fusion."); } TF_ASSIGN_OR_RETURN(auto rz_buffers, RedzoneBuffers::FromInstruction( fusion, config, debug_opts, RedzoneBuffers::kAllInputs)); TF_ASSIGN_OR_RETURN(auto stream, config.GetStream()); TF_ASSIGN_OR_RETURN(std::optional<ProfilingOutput> profiling_output, util.ProfileExecutable(executable.get(), stream, rz_buffers.input_buffers(), rz_buffers.input_shapes())); if (!profiling_output.has_value()) { return Internal("No output after a successful verification run."); } return std::move(profiling_output->output); } absl::Status CompareBuffers(const ScopedShapedBuffer& current, const ScopedShapedBuffer& expected, const Shape& shape, const HloModuleConfig& config, se::Stream* stream) { BufferComparator comparator( shape, config.debug_options().xla_gpu_autotune_gemm_rtol()); TF_ASSIGN_OR_RETURN(bool outputs_match, comparator.CompareEqual(stream, current.root_buffer(), expected.root_buffer())); if (!outputs_match) { return Internal("Triton fusion output does not match emitters output."); } return absl::OkStatus(); } absl::Status ForAllTritonFusions( const HloModule& module, const absl::flat_hash_set<absl::string_view>& execution_threads, absl::AnyInvocable<absl::Status(const HloFusionInstruction&)> fn) { for (HloComputation* computation : module.MakeNonfusionComputations(execution_threads)) { for (HloInstruction* instruction : computation->instructions()) { TF_ASSIGN_OR_RETURN(auto triton_fusion, AsTritonFusion(instruction)); if (triton_fusion != nullptr) { TF_RETURN_IF_ERROR(fn(*triton_fusion)); } } } return absl::OkStatus(); } } namespace { absl::Status VerifyTritonFusion(AutotunerCompileUtil& util, const HloFusionInstruction& fusion, const AutotuneConfig& config, const DebugOptions& debug_opts) { TF_ASSIGN_OR_RETURN(auto triton_result, triton_fusion_numerics_pass_internal::CompileAndRunFusion( util, fusion, config, debug_opts, false)); TF_ASSIGN_OR_RETURN(auto emitters_result, triton_fusion_numerics_pass_internal::CompileAndRunFusion( util, fusion, config, debug_opts, true)); TF_ASSIGN_OR_RETURN(auto stream, config.GetStream()); auto status = triton_fusion_numerics_pass_internal::CompareBuffers( triton_result, emitters_result, fusion.shape(), fusion.GetModule()->config(), stream); if (!status.ok()) { LOG(ERROR) << "Triton numerics verification failed with: " << status.message() << "\n The failing HLO is: \n\n" << ExtractInstructionIntoNewModule(fusion)->ToString(); } return status; } } absl::StatusOr<bool> TritonFusionNumericsVerifier::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { if (config_.IsDeviceless()) { return absl::InternalError( "Cannot run TritonFusionNumericsVerifier on a deviceless compilation."); } DebugOptions debug_options = module->config().debug_options(); debug_options.set_xla_gpu_filter_kernels_spilling_registers_on_autotuning( false); TF_ASSIGN_OR_RETURN(std::optional<AutotunerCompileUtil> opt_compile_util, AutotunerCompileUtil::Create(config_, debug_options)); TF_RET_CHECK(opt_compile_util.has_value()); TF_RETURN_IF_ERROR(triton_fusion_numerics_pass_internal::ForAllTritonFusions( *module, execution_threads, [&](const HloFusionInstruction& fusion) { return VerifyTritonFusion(*opt_compile_util, fusion, config_, debug_options); })); return false; } }
#include "xla/service/gpu/transforms/triton_fusion_numerics_verifier.h" #include <memory> #include <utility> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/status/status.h" #include "absl/strings/string_view.h" #include "absl/strings/substitute.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/primitive_util.h" #include "xla/service/gpu/autotuning/autotuner_compile_util.h" #include "xla/service/gpu/autotuning/autotuner_util.h" #include "xla/service/platform_util.h" #include "xla/stream_executor/platform.h" #include "xla/test_helpers.h" #include "xla/tests/hlo_test_base.h" #include "xla/tsl/lib/core/status_test_util.h" #include "tsl/platform/status_matchers.h" namespace xla::gpu { namespace { class TritonFusionNumericsVerifierTest : public HloTestBase, public ::testing::WithParamInterface<PrimitiveType> { public: DebugOptions GetDebugOptionsForTest() override { auto options = HloTestBase::GetDebugOptionsForTest(); options.set_xla_gpu_experimental_enable_triton_softmax_priority_fusion( true); options.set_xla_gpu_verify_triton_fusion_numerics(true); return options; } protected: std::unique_ptr<xla::HloModule> Module(absl::string_view hlo_text_template, absl::string_view type) { auto m = GetOptimizedModule(absl::Substitute(hlo_text_template, type)); TF_EXPECT_OK(m); return std::move(m.value()); } const HloFusionInstruction* TritonFusion(const xla::HloModule& module) { const HloFusionInstruction* fusion_result = nullptr; absl::Status res = triton_fusion_numerics_pass_internal::ForAllTritonFusions( module, {}, [&](const HloFusionInstruction& fusion) -> absl::Status { EXPECT_EQ(fusion_result, nullptr); fusion_result = &fusion; return absl::OkStatus(); }); return fusion_result; } AutotuneConfig CreateAutotuneConfig() { se::Platform* platform = PlatformUtil::GetDefaultPlatform().value(); auto executors_or = PlatformUtil::GetStreamExecutors(platform); TF_EXPECT_OK(executors_or); return AutotuneConfig{DeviceConfig{executors_or->at(0), nullptr}, GetDebugOptionsForTest()}; } AutotunerCompileUtil CreateAutotunerCompileUtil(AutotuneConfig& config) { auto opt_compile_util_or = AutotunerCompileUtil::Create(config, GetDebugOptionsForTest()); TF_EXPECT_OK(opt_compile_util_or); EXPECT_TRUE(opt_compile_util_or->has_value()); return std::move(opt_compile_util_or->value()); } }; constexpr absl::string_view kSoftmaxHlo = R"( HloModule softmax max_computation { arg_0 = $0[] parameter(0) arg_1 = $0[] parameter(1) ROOT maximum = $0[] maximum(arg_0, arg_1) } add_computation { arg_0.1 = $0[] parameter(0) arg_1.1 = $0[] parameter(1) ROOT add = $0[] add(arg_0.1, arg_1.1) } ENTRY main { param_0 = $0[127,125]{1,0} parameter(0) constant_neg_inf = $0[] constant(-inf) reduce = $0[127]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation broadcast = $0[127,125]{1,0} broadcast(reduce), dimensions={0} subtract = $0[127,125]{1,0} subtract(param_0, broadcast) exponential = $0[127,125]{1,0} exponential(subtract) constant_zero = $0[] constant(0) second_reduce = $0[127]{0} reduce(exponential, constant_zero), dimensions={1}, to_apply=add_computation second_broadcast = $0[127,125]{1,0} broadcast(second_reduce), dimensions={0} ROOT divide = $0[127,125]{1,0} divide(exponential, second_broadcast) } )"; bool HloPassHasRun(const HloModule& module, absl::string_view pass_name) { for (const auto& pass_metadata : module.metadata().proto().pass_metadata()) { if (pass_metadata.pass_name() == pass_name) { return true; } } return false; } TEST_P(TritonFusionNumericsVerifierTest, VerifyExactSoftmaxFusionNumerics) { PrimitiveType data_type = GetParam(); auto module = Module(kSoftmaxHlo, primitive_util::LowercasePrimitiveTypeName(data_type)); EXPECT_TRUE(HloPassHasRun(*module, TritonFusionNumericsVerifier::Name())); auto fusion = TritonFusion(*module); EXPECT_NE(fusion, nullptr); } TEST_F(TritonFusionNumericsVerifierTest, CheckMismatch) { auto module_f16 = Module(kSoftmaxHlo, "f16"); auto fusion_f16 = TritonFusion(*module_f16); EXPECT_NE(fusion_f16, nullptr); auto module_f32 = Module(kSoftmaxHlo, "f32"); auto fusion_f32 = TritonFusion(*module_f32); EXPECT_NE(fusion_f32, nullptr); AutotuneConfig autotune_config = CreateAutotuneConfig(); AutotunerCompileUtil compile_util = CreateAutotunerCompileUtil(autotune_config); const DebugOptions& debug_options = GetDebugOptionsForTest(); auto f16_result = triton_fusion_numerics_pass_internal::CompileAndRunFusion( compile_util, *fusion_f16, autotune_config, debug_options, false); TF_EXPECT_OK(f16_result); auto f32_result = triton_fusion_numerics_pass_internal::CompileAndRunFusion( compile_util, *fusion_f32, autotune_config, debug_options, false); TF_EXPECT_OK(f32_result); auto stream = autotune_config.GetStream(); TF_EXPECT_OK(stream); auto cmp = triton_fusion_numerics_pass_internal::CompareBuffers( *f16_result, *f32_result, fusion_f16->shape(), fusion_f16->GetModule()->config(), *stream); EXPECT_FALSE(cmp.ok()); } TEST_F(TritonFusionNumericsVerifierTest, CompilationSucceedsEvenIfKernelWillSpillRegisters) { auto module = Module(R"( HloModule m add { Arg_0 = f32[] parameter(0) Arg_1 = f32[] parameter(1) ROOT add = f32[] add(Arg_0, Arg_1) } triton_softmax_computation { param_0 = f32[16,256000] parameter(0) constant_0 = f32[] constant(0) reduce_0 = f32[16]{0} reduce(param_0, constant_0), dimensions={1}, to_apply=add broadcast_0 = f32[16,256000]{1,0} broadcast(reduce_0), dimensions={0} ROOT multiply = f32[16,256000]{1,0} multiply(param_0, broadcast_0) } ENTRY main { param_0 = f32[16,256000] parameter(0) ROOT triton_softmax = f32[16,256000]{1,0} fusion(param_0), kind=kCustom, calls=triton_softmax_computation, backend_config={"fusion_backend_config": {"kind":"__triton","block_level_fusion_config":{"output_tile_sizes":["1","256000"],"num_warps":"32"}}} } )", ""); EXPECT_TRUE(HloPassHasRun(*module, TritonFusionNumericsVerifier::Name())); auto fusion = TritonFusion(*module); EXPECT_NE(fusion, nullptr); AutotuneConfig autotune_config = CreateAutotuneConfig(); AutotunerCompileUtil compile_util = CreateAutotunerCompileUtil(autotune_config); auto compilation_result = triton_fusion_numerics_pass_internal::CompileAndRunFusion( compile_util, *fusion, autotune_config, GetDebugOptionsForTest(), false); EXPECT_FALSE(compilation_result.ok()); EXPECT_THAT(compilation_result.status(), tsl::testing::StatusIs(absl::StatusCode::kInternal)); EXPECT_THAT(compilation_result.status().message(), ::testing::HasSubstr("Failed to compile Triton fusion")); } INSTANTIATE_TEST_SUITE_P(TritonFusionNumericsVerifierTestSuite, TritonFusionNumericsVerifierTest, ::testing::Values(F32, F16, BF16)); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/triton_fusion_numerics_verifier.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/triton_fusion_numerics_verifier_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
9f9db1be-f58f-4382-bf7e-e57609423b88
cpp
tensorflow/tensorflow
custom_kernel_fusion_rewriter
third_party/xla/xla/service/gpu/transforms/custom_kernel_fusion_rewriter.cc
third_party/xla/xla/service/gpu/transforms/custom_kernel_fusion_rewriter_test.cc
#include "xla/service/gpu/transforms/custom_kernel_fusion_rewriter.h" #include <cstdint> #include <optional> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/container/inlined_vector.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/service/gpu/kernels/custom_kernel_fusion_pattern.h" #include "xla/shape_util.h" #include "xla/stream_executor/device_description.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" #include "tsl/platform/statusor.h" namespace xla::gpu { CustomKernelFusionRewriter::CustomKernelFusionRewriter( const se::DeviceDescription* device, int kernel_index, const CustomKernelFusionPatternRegistry* patterns) : device_(device), kernel_index_(kernel_index), patterns_(patterns) {} static std::optional<absl::flat_hash_set<HloInstruction*>> GetPatternReplacements(const CustomKernelFusionPattern::Match& match) { absl::flat_hash_set<HloInstruction*> requires_replacement; absl::flat_hash_set<HloInstruction*> instructions_set( match.instructions().begin(), match.instructions().end()); for (HloInstruction* instr : match.instructions()) { for (HloInstruction* user : instr->users()) { if (instr == match.root() || instructions_set.contains(user)) continue; if (match.HasReplacement(instr)) { requires_replacement.insert(instr); continue; } VLOG(3) << "Custom kernel fusion intermediate result " << instr->name() << " has users outside of a matched pattern: " << user->name(); return std::nullopt; } } return requires_replacement; } static absl::InlinedVector<HloInstruction*, 4> GetPatternCaptures( const CustomKernelFusionPattern::Match& match) { absl::InlinedVector<HloInstruction*, 4> captures; absl::flat_hash_set<HloInstruction*> instructions_set( match.instructions().begin(), match.instructions().end()); for (HloInstruction* instr : match.instructions()) { for (HloInstruction* operand : instr->operands()) { if (!instructions_set.contains(operand) && absl::c_find(captures, operand) == captures.end()) { captures.emplace_back(operand); } } } return captures; } static absl::StatusOr<HloComputation*> CreateFusionBody( HloModule* module, const CustomKernelFusionPattern::Match& match, absl::Span<HloInstruction* const> captures) { HloComputation::Builder builder(match.config().name()); absl::flat_hash_map<const HloInstruction*, HloInstruction*> instr_mapping; auto mapped_operands = [&](HloInstruction* instr) { absl::InlinedVector<HloInstruction*, 4> operands; for (HloInstruction* operand : instr->operands()) { operands.push_back(instr_mapping.at(operand)); } return operands; }; for (const HloInstruction* capture : captures) { int64_t index = instr_mapping.size(); instr_mapping[capture] = builder.AddInstruction(HloInstruction::CreateParameter( index, capture->shape(), absl::StrCat("p", index))); } for (HloInstruction* instr : match.instructions()) { instr_mapping[instr] = builder.AddInstruction( instr->CloneWithNewOperands(instr->shape(), mapped_operands(instr))); } HloInstruction* root = builder.last_added_instruction(); if (match.workspace_size_bytes() > 0) { auto workspace_shape = ShapeUtil::MakeShape(PrimitiveType::U8, {match.workspace_size_bytes()}); HloInstruction* workspace = builder.AddInstruction(HloInstruction::CreateCustomCall( workspace_shape, {}, CustomKernelFusionPattern::kWorkspace, "", CustomCallApiVersion::API_VERSION_TYPED_FFI)); builder.AddInstruction(HloInstruction::CreateTuple({root, workspace})); } return module->AddComputationAndUnifyNamesAndIds(builder.Build(), false); } namespace { absl::StatusOr<HloInstruction*> CreateFusionInstruction( HloModule* module, const CustomKernelFusionPattern::Match& match, absl::Span<HloInstruction* const> captures, HloComputation* body, int kernel_index) { HloInstruction* root = match.root(); HloComputation* parent = root->parent(); HloInstruction* fusion = parent->AddInstruction(HloInstruction::CreateFusion( body->root_instruction()->shape(), HloInstruction::FusionKind::kCustom, captures, body)); module->SetAndUniquifyInstrName(fusion, match.config().name()); GpuBackendConfig gpu_config; FusionBackendConfig& backend_config = *gpu_config.mutable_fusion_backend_config(); backend_config.set_kind("__custom_fusion"); *backend_config.mutable_custom_fusion_config() = match.config(); backend_config.mutable_custom_fusion_config()->set_kernel_index(kernel_index); TF_RETURN_IF_ERROR(fusion->set_backend_config(std::move(gpu_config))); if (match.workspace_size_bytes() == 0) return fusion; return parent->AddInstruction( HloInstruction::CreateGetTupleElement(fusion, 0)); } } absl::StatusOr<bool> CustomKernelFusionRewriter::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { std::vector<CustomKernelFusionPattern::Match> matches; for (HloComputation* computation : module->computations()) { for (HloInstruction* instr : computation->instructions()) { auto matched = patterns_->Match(*device_, instr); matches.insert(matches.end(), matched.begin(), matched.end()); } } if (matches.empty()) return false; for (const CustomKernelFusionPattern::Match& match : matches) { VLOG(2) << "Matched custom kernel fusion " << match.config().name() << "; root instruction: " << match.instructions().back()->name(); auto replacememts = GetPatternReplacements(match); if (!replacememts.has_value()) continue; auto captures = GetPatternCaptures(match); TF_ASSIGN_OR_RETURN(HloComputation * fusion_body, CreateFusionBody(module, match, captures)); TF_ASSIGN_OR_RETURN(HloInstruction * fusion, CreateFusionInstruction(module, match, captures, fusion_body, kernel_index_)); VLOG(2) << "Added a fusion instruction: " << fusion->name() << " for custom kernel fusion " << match.config().name() << " (instruction count = " << match.instructions().size() << ")"; for (HloInstruction* instr : *replacememts) { VLOG(2) << "Replace matched instruction: " << instr->name() << " with a pattern replacement"; TF_ASSIGN_OR_RETURN( HloInstruction * replacement, match.BuildReplacement(instr, Cast<HloFusionInstruction>(fusion))); TF_RETURN_IF_ERROR( instr->ReplaceAllUsesWith(replacement, match.config().name())); VLOG(2) << "Replaced instruction: " << instr->name() << " with: " << replacement->name(); } VLOG(2) << "Replace custom kernel fusion root instruction " << match.root()->name() << "with " << fusion->name(); HloComputation* parent = match.root()->parent(); TF_RETURN_IF_ERROR(parent->ReplaceInstruction(match.root(), fusion)); } return true; } }
#include "xla/service/gpu/transforms/custom_kernel_fusion_rewriter.h" #include <cstdint> #include <optional> #include <utility> #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/service/gpu/gpu_device_info_for_tests.h" #include "xla/service/gpu/kernels/custom_kernel_fusion_pattern.h" #include "xla/stream_executor/device_description.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/test.h" namespace xla::gpu { struct SimpleGemmPattern : public CustomKernelFusionPattern { explicit SimpleGemmPattern(int64_t workspace = 0) : workspace(workspace) {} std::optional<Match> TryMatch(const se::DeviceDescription& device, HloInstruction* instr) const override { if (auto* dot = DynCast<HloDotInstruction>(instr)) { CustomFusionConfig config; config.set_name("simple_gemm"); return Match{config, {instr}, workspace}; } return std::nullopt; } int64_t workspace; }; class CustomKernelFusionRewriterTest : public HloTestBase {}; TEST_F(CustomKernelFusionRewriterTest, SimpleGemm) { const char* hlo = R"( HloModule test ENTRY %main (p0: f16[15,19], p1: f16[19,17]) -> f16[15,17] { %p0 = f16[15,19]{1,0} parameter(0) %p1 = f16[19,17]{1,0} parameter(1) ROOT %r = f16[15,17]{1,0} dot(%p0, %p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; const char* expected = R"( ; CHECK: %simple_gemm {{.*}} { ; CHECK: [[P0:%[^ ]+]] = f16[15,19]{1,0} parameter(0) ; CHECK: [[P1:%[^ ]+]] = f16[19,17]{1,0} parameter(1) ; CHECK: ROOT [[DOT:%[^ ]+]] = f16[15,17]{1,0} dot([[P0]], [[P1]]), ; CHECK: lhs_contracting_dims={1}, rhs_contracting_dims={0} ; CHECK: } ; CHECK: ENTRY %main {{.*}} { ; CHECK: ROOT [[FUSION:%[^ ]+]] = f16[15,17]{1,0} fusion ; CHECK: kind=kCustom, calls=%simple_gemm, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"simple_gemm","kernel_index":0} ; CHECK: } ; CHECK: } )"; CustomKernelFusionPatternRegistry patterns; patterns.Emplace<SimpleGemmPattern>(); auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); CustomKernelFusionRewriter pass(&device, 0, &patterns); RunAndFilecheckHloRewrite(hlo, std::move(pass), expected); } TEST_F(CustomKernelFusionRewriterTest, SetsKernelIndex) { const char* hlo = R"( HloModule test ENTRY %main (p0: f16[15,19], p1: f16[19,17]) -> f16[15,17] { %p0 = f16[15,19]{1,0} parameter(0) %p1 = f16[19,17]{1,0} parameter(1) ROOT %r = f16[15,17]{1,0} dot(%p0, %p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; CustomKernelFusionPatternRegistry patterns; patterns.Emplace<SimpleGemmPattern>(); auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); CustomKernelFusionRewriter pass(&device, 1, &patterns); RunAndFilecheckHloRewrite(hlo, std::move(pass), "CHECK: \"kernel_index\":1"); } TEST_F(CustomKernelFusionRewriterTest, SimpleGemmWithWorkspace) { const char* hlo = R"( HloModule test ENTRY %main (p0: f16[15,19], p1: f16[19,17]) -> f16[15,17] { %p0 = f16[15,19]{1,0} parameter(0) %p1 = f16[19,17]{1,0} parameter(1) ROOT %r = f16[15,17]{1,0} dot(%p0, %p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; const char* expected = R"( ; CHECK: %simple_gemm {{.*}} { ; CHECK: [[P0:%[^ ]+]] = f16[15,19]{1,0} parameter(0) ; CHECK: [[P1:%[^ ]+]] = f16[19,17]{1,0} parameter(1) ; CHECK: [[DOT:%[^ ]+]] = f16[15,17]{1,0} dot([[P0]], [[P1]]), ; CHECK: lhs_contracting_dims={1}, rhs_contracting_dims={0} ; CHECK: [[WORKSPACE:%[^ ]+]] = u8[1024]{0} custom-call(), ; CHECK: custom_call_target="__custom_kernel_fusion$workspace" ; CHECK: ROOT [[TUPLE:%[^ ]+]] = (f16[15,17]{1,0}, u8[1024]{0}) ; CHECK: tuple([[DOT]], [[WORKSPACE]]) ; CHECK: } ; CHECK: ENTRY %main {{.*}} { ; CHECK: [[FUSION:%[^ ]+]] = (f16[15,17]{1,0}, u8[1024]{0}) fusion ; CHECK: kind=kCustom, calls=%simple_gemm, ; CHECK: backend_config={ ; CHECK: "kind":"__custom_fusion", ; CHECK: "custom_fusion_config":{"name":"simple_gemm","kernel_index":0} ; CHECK: } ; CHECK: ROOT {{.*}} get-tuple-element([[FUSION]]), index=0 ; CHECK: } )"; CustomKernelFusionPatternRegistry patterns; patterns.Emplace<SimpleGemmPattern>(1024); auto device = TestGpuDeviceInfo::RTXA6000DeviceInfo(); CustomKernelFusionRewriter pass(&device, 0, &patterns); RunAndFilecheckHloRewrite(hlo, std::move(pass), expected); } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/custom_kernel_fusion_rewriter.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/custom_kernel_fusion_rewriter_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
3f4b2a34-7de6-42d3-a0cc-6171f8717ed0
cpp
tensorflow/tensorflow
reduce_scatter_creator
third_party/xla/xla/service/gpu/transforms/reduce_scatter_creator.cc
third_party/xla/xla/service/gpu/transforms/reduce_scatter_creator_test.cc
#include "xla/service/gpu/transforms/reduce_scatter_creator.h" #include <cstdint> #include <optional> #include <vector> #include "absl/container/flat_hash_set.h" #include "absl/log/log.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_query.h" #include "xla/service/collective_opt_utils.h" #include "xla/service/hlo_module_config.h" #include "xla/shape.h" #include "xla/status_macros.h" #include "tsl/platform/errors.h" namespace xla { namespace gpu { absl::StatusOr<bool> ReduceScatterCreator::Run( HloModule *module, const absl::flat_hash_set<absl::string_view> &execution_threads) { const HloModuleConfig &config = module->config(); int64_t next_channel_id = hlo_query::NextChannelId(*module); bool changed = false; for (HloComputation *computation : module->MakeNonfusionComputations(execution_threads)) { for (HloInstruction *instruction : computation->MakeInstructionPostOrder()) { if (instruction->opcode() != HloOpcode::kAllReduce) { continue; } auto *ar = Cast<HloAllReduceInstruction>(instruction); auto ar_spec = MatchReduceScatter(ar, config.num_partitions(), config.replica_count(), false, true); if (!ar_spec) { VLOG(2) << "Cannot match reduce-scatter " << ar->ToString(); continue; } HloInstruction *ds = ar_spec->dynamic_slice; const int64_t split_dim = ar_spec->split_dim; Shape scatter_shape = ar->shape(); const int64_t split_dim_size = scatter_shape.dimensions(split_dim); HloInstruction *rs_input = ar->mutable_operand(0); const int64_t scatter_dim_size = split_dim_size / ar_spec->group_size; TF_RET_CHECK(scatter_dim_size * ar_spec->group_size <= split_dim_size); if (split_dim_size % ar_spec->group_size != 0) { scatter_shape.set_dimensions(split_dim, scatter_dim_size * ar_spec->group_size); rs_input = computation->AddInstruction(HloInstruction::CreateSlice( scatter_shape, rs_input, std::vector<int64_t>(scatter_shape.rank(), 0), scatter_shape.dimensions(), std::vector<int64_t>(scatter_shape.rank(), 1))); } scatter_shape.set_dimensions(split_dim, scatter_dim_size); std::optional<int64_t> channel_id; if (ar->channel_id()) { channel_id = next_channel_id++; } HloInstruction *ars = computation->AddInstruction(HloInstruction::CreateReduceScatter( scatter_shape, {rs_input}, ar->to_apply(), ar->device_list(), ar->constrain_layout(), channel_id, ar->use_global_device_ids(), ar_spec->split_dim)); HloInstruction *result = ars; HloInstruction *reshape = nullptr; if (ds->operand(0) != ar) { reshape = ds->mutable_operand(0); result = computation->AddInstruction( HloInstruction::CreateReshape(ds->shape(), result)); } TF_RETURN_IF_ERROR(ds->ReplaceAllUsesWith(result)); TF_RETURN_IF_ERROR(computation->RemoveInstruction(ds)); if (reshape) { TF_RETURN_IF_ERROR(computation->RemoveInstruction(reshape)); } TF_RETURN_IF_ERROR(computation->RemoveInstructionAndUnusedOperands(ar)); changed = true; } } return changed; } } }
#include "xla/service/gpu/transforms/reduce_scatter_creator.h" #include <cstddef> #include <cstdint> #include <memory> #include <utility> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/algorithm/container.h" #include "absl/log/log.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/hlo_module_config.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/tests/hlo_test_base.h" #include "xla/util.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { namespace m = ::xla::match; class GpuReduceScatterCreatorTest : public HloTestBase { public: absl::StatusOr<std::unique_ptr<HloModule>> RunPass( absl::string_view hlo_module, int64_t num_replicas, int64_t num_partitions, bool expect_change) { HloModuleConfig config = GetModuleConfigForTest( num_replicas, num_partitions); config.set_use_spmd_partitioning(num_partitions > 1); TF_ASSIGN_OR_RETURN(auto module, ParseAndReturnVerifiedModule(hlo_module, config)); auto changed = ReduceScatterCreator().Run(module.get()); if (!changed.ok()) { return changed.status(); } EXPECT_EQ(changed.value(), expect_change); return absl::StatusOr<std::unique_ptr<HloModule>>(std::move(module)); } size_t AllReduceCount(std::unique_ptr<HloModule> &module) { return CollectiveCount(module, HloOpcode::kAllReduce); } size_t ReduceScatterCount(std::unique_ptr<HloModule> &module) { return CollectiveCount(module, HloOpcode::kAllReduce); } private: size_t CollectiveCount(std::unique_ptr<HloModule> &module, HloOpcode opcode) { return absl::c_count_if( module->entry_computation()->instructions(), [&opcode](HloInstruction *instr) { return instr->opcode() == opcode; }); } }; TEST_F(GpuReduceScatterCreatorTest, AllReplicas) { absl::string_view hlo_string = R"( HloModule AllReduce %sum { %a = f32[] parameter(0) %b = f32[] parameter(1) ROOT %add = f32[] add(%a, %b) } ENTRY %AllReduce { %param = f32[32,8,128]{2,1,0} parameter(0) %all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param), replica_groups={}, to_apply=%sum %table = s32[8]{0} constant({0,1,2,3,4,5,6,7}) %rid = u32[] replica-id() %id = s32[1] dynamic-slice(%table, %rid), dynamic_slice_sizes={1} %reshape = s32[] reshape(%id) %slice_size = s32[] constant(4) %offset = s32[] multiply(%reshape, %slice_size) %zero = s32[] constant(0) ROOT %dynamic-slice = f32[4,8,128] dynamic-slice(%all-reduce, %offset, %zero, %zero), dynamic_slice_sizes={4,8,128} } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, 8, 1, true)); ASSERT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::ReduceScatter(m::Parameter(0)))); const auto *rs = Cast<HloReduceScatterInstruction>( module->entry_computation()->root_instruction()); EXPECT_EQ(rs->scatter_dimension(), 0) << rs->ToString(); EXPECT_EQ(AllReduceCount(module), 0); } TEST_F(GpuReduceScatterCreatorTest, AllReplicasWithOffsetReshape) { absl::string_view hlo_string = R"( HloModule AllReduce %sum { %a = f32[] parameter(0) %b = f32[] parameter(1) ROOT %add = f32[] add(%a, %b) } ENTRY %AllReduce { %param = f32[32,8,128]{2,1,0} parameter(0) %all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param), replica_groups={}, to_apply=%sum %table = s32[8]{0} constant({0,1,2,3,4,5,6,7}) %rid = u32[] replica-id() %id = s32[1] dynamic-slice(%table, %rid), dynamic_slice_sizes={1} %slice_size = s32[1] constant({4}) %offset = s32[1] multiply(%id, %slice_size) %reshape = s32[] reshape(%offset) %zero = s32[] constant(0) ROOT %dynamic-slice = f32[4,8,128] dynamic-slice(%all-reduce, %reshape, %zero, %zero), dynamic_slice_sizes={4,8,128} } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, 8, 1, true)); ASSERT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::ReduceScatter(m::Parameter(0)))); const auto *rs = Cast<HloReduceScatterInstruction>( module->entry_computation()->root_instruction()); EXPECT_EQ(rs->scatter_dimension(), 0) << rs->ToString(); EXPECT_EQ(AllReduceCount(module), 0); } TEST_F(GpuReduceScatterCreatorTest, AllReplicasWithReshape) { absl::string_view hlo_string = R"( HloModule AllReduce %sum { %a = f32[] parameter(0) %b = f32[] parameter(1) ROOT %add = f32[] add(%a, %b) } ENTRY %AllReduce { %param = f32[32,8,128]{2,1,0} parameter(0) %all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param), replica_groups={}, to_apply=%sum %table = s32[8]{0} constant({0,1,2,3,4,5,6,7}) %rid = u32[] replica-id() %id = s32[1] dynamic-slice(%table, %rid), dynamic_slice_sizes={1} %reshape = s32[] reshape(%id) %slice_size = s32[] constant(4) %offset = s32[] multiply(%reshape, %slice_size) %zero = s32[] constant(0) %reshape.1 = f32[32,16,64] reshape(%all-reduce) ROOT %dynamic-slice = f32[4,16,64] dynamic-slice(%reshape.1, %offset, %zero, %zero), dynamic_slice_sizes={4,16,64} } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, 8, 1, true)); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Reshape(m::ReduceScatter(m::Parameter(0))))); EXPECT_EQ(AllReduceCount(module), 0); } TEST_F(GpuReduceScatterCreatorTest, AllReplicasWithReshapeSplitDimModified) { absl::string_view hlo_string = R"( HloModule AllReduce %sum { %a = f32[] parameter(0) %b = f32[] parameter(1) ROOT %add = f32[] add(%a, %b) } ENTRY %AllReduce { %param = f32[336,1024] parameter(0) %all-reduce = f32[336,1024] all-reduce(%param), replica_groups={}, to_apply=%sum %rid = u32[] replica-id() %id = s32[] convert(%rid) %slice_size = s32[] constant(128) %offset = s32[] multiply(%id, %slice_size) %zero = s32[] constant(0) %reshape.1 = f32[4,84,1024] reshape(%all-reduce) ROOT %dynamic-slice = f32[4,84,128] dynamic-slice(%reshape.1, %zero, %zero, %offset), dynamic_slice_sizes={4,84,128} } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, 8, 1, true)); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Reshape(m::ReduceScatter(m::Parameter(0))))); EXPECT_EQ(AllReduceCount(module), 0); } TEST_F(GpuReduceScatterCreatorTest, AllReplicasDim2) { absl::string_view hlo_string = R"( HloModule AllReduce %sum { %a = f32[] parameter(0) %b = f32[] parameter(1) ROOT %add = f32[] add(%a, %b) } ENTRY %AllReduce { %param = f32[32,8,128]{2,1,0} parameter(0) %all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param), replica_groups={}, to_apply=%sum %table = s32[8]{0} constant({0,1,2,3,4,5,6,7}) %rid = u32[] replica-id() %rid_s32 = s32[] convert(%rid) %slice_size = s32[] constant(16) %offset = s32[] multiply(%rid_s32, %slice_size) %zero = s32[] constant(0) ROOT %dynamic-slice = f32[32,8,16] dynamic-slice(%all-reduce, %zero, %zero, %offset), dynamic_slice_sizes={32,8,16} } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, 8, 1, true)); ASSERT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::ReduceScatter(m::Parameter(0)))); const auto *rs = Cast<HloReduceScatterInstruction>( module->entry_computation()->root_instruction()); EXPECT_EQ(rs->scatter_dimension(), 2) << rs->ToString(); EXPECT_EQ(AllReduceCount(module), 0); } TEST_F(GpuReduceScatterCreatorTest, AllReplicasWrongOffsets) { absl::string_view hlo_string = R"( HloModule AllReduce %sum { %a = f32[] parameter(0) %b = f32[] parameter(1) ROOT %add = f32[] add(%a, %b) } ENTRY %AllReduce { %param = f32[32,8,128]{2,1,0} parameter(0) %all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param), replica_groups={}, to_apply=%sum %table = s32[8]{0} constant({0,1,2,3,4,5,6,8}) %rid = u32[] replica-id() %id = s32[1] dynamic-slice(%table, %rid), dynamic_slice_sizes={1} %reshape = s32[] reshape(%id) %slice_size = s32[] constant(4) %offset = s32[] multiply(%reshape, %slice_size) %zero = s32[] constant(0) ROOT %dynamic-slice = f32[4,8,128] dynamic-slice(%all-reduce, %offset, %zero, %zero), dynamic_slice_sizes={4,8,128} } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, 8, 1, false)); } TEST_F(GpuReduceScatterCreatorTest, AllReplicasIotaTable) { absl::string_view hlo_string = R"( HloModule AllReduce %sum { %a = f32[] parameter(0) %b = f32[] parameter(1) ROOT %add = f32[] add(%a, %b) } ENTRY %AllReduce { %param = f32[32,8,128]{2,1,0} parameter(0) %all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param), replica_groups={}, to_apply=%sum %table = s32[8]{0} iota(), iota_dimension=0 %rid = u32[] replica-id() %id = s32[1] dynamic-slice(%table, %rid), dynamic_slice_sizes={1} %reshape = s32[] reshape(%id) %slice_size = s32[] constant(4) %offset = s32[] multiply(%reshape, %slice_size) %zero = s32[] constant(0) ROOT %dynamic-slice = f32[4,8,128] dynamic-slice(%all-reduce, %offset, %zero, %zero), dynamic_slice_sizes={4,8,128} } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, 8, 2, true)); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::ReduceScatter(m::Parameter(0)))); EXPECT_EQ(AllReduceCount(module), 0); } TEST_F(GpuReduceScatterCreatorTest, SubgroupedReplicas) { absl::string_view hlo_string = R"( HloModule AllReduce %sum { %a = f32[] parameter(0) %b = f32[] parameter(1) ROOT %add = f32[] add(%a, %b) } ENTRY %AllReduce { %param = f32[32,8,128]{2,1,0} parameter(0) %all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param), replica_groups={{1,3,2,0},{4,5,6,7}}, to_apply=%sum %gtable = s32[8]{0} constant({3,0,2,1,0,1,2,3}) %rid = u32[] replica-id() %id = s32[1] dynamic-slice(%gtable, %rid), dynamic_slice_sizes={1} %reshape.0 = s32[] reshape(%id) %table = s32[4]{0} constant({0,8,16,24}) %offset = s32[1] dynamic-slice(%table, %reshape.0), dynamic_slice_sizes={1} %reshape.1 = s32[] reshape(%offset) %zero = s32[] constant(0) ROOT %dynamic-slice = f32[8,8,128] dynamic-slice(%all-reduce, %reshape.1, %zero, %zero), dynamic_slice_sizes={8,8,128} } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, 8, 2, true)); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::ReduceScatter(m::Parameter(0)))); EXPECT_EQ(AllReduceCount(module), 0); } TEST_F(GpuReduceScatterCreatorTest, AllPartitions) { absl::string_view hlo_string = R"( HloModule AllReduce %sum { %a = f32[] parameter(0) %b = f32[] parameter(1) ROOT %add = f32[] add(%a, %b) } ENTRY %AllReduce { %param = f32[32,8,128]{2,1,0} parameter(0) %all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param), replica_groups={{0},{1}}, to_apply=%sum, channel_id=1 %table = s32[8]{0} constant({0,1,2,3,4,5,6,7}) %pid = u32[] partition-id() %id = s32[1] dynamic-slice(%table, %pid), dynamic_slice_sizes={1} %reshape = s32[] reshape(%id) %slice_size = s32[] constant(4) %offset = s32[] multiply(%reshape, %slice_size) %zero = s32[] constant(0) ROOT %dynamic-slice = f32[4,8,128] dynamic-slice(%all-reduce, %offset, %zero, %zero), dynamic_slice_sizes={4,8,128} } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, 2, 8, true)); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::ReduceScatter(m::Parameter(0)))); EXPECT_EQ(AllReduceCount(module), 0); } TEST_F(GpuReduceScatterCreatorTest, AllReduceFollowedByAllReduce) { absl::string_view hlo_string = R"( HloModule AllReduce %sum { %a = f32[] parameter(0) %b = f32[] parameter(1) ROOT %add = f32[] add(%a, %b) } ENTRY %AllReduce { %param = f32[32,8,128]{2,1,0} parameter(0) %all-reduce.scattered = f32[32,8,128]{2,1,0} all-reduce(%param), replica_groups={{0,1,2,3,4,5,6,7},{8,9,10,11,12,13,14,15}}, to_apply=%sum, use_global_device_ids=true, channel_id=1 %table = s32[8]{0} constant({0,1,2,3,4,5,6,7}) %pid = u32[] partition-id() %id = s32[1] dynamic-slice(%table, %pid), dynamic_slice_sizes={1} %reshape = s32[] reshape(%id) %slice_size = s32[] constant(4) %offset = s32[] multiply(%reshape, %slice_size) %zero = s32[] constant(0) %dynamic-slice = f32[4,8,128] dynamic-slice(%all-reduce.scattered, %offset, %zero, %zero), dynamic_slice_sizes={4,8,128} ROOT %all-reduce.sync = f32[4,8,128]{2,1,0} all-reduce(%dynamic-slice), replica_groups={{0,8},{1,9},{2,10},{3,11},{4,12},{5,13},{6,14},{7,15}}, to_apply=%sum, use_global_device_ids=true, channel_id=2 } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, 2, 8, true)); EXPECT_EQ(AllReduceCount(module), 1); EXPECT_EQ(ReduceScatterCount(module), 1); } TEST_F(GpuReduceScatterCreatorTest, SubgroupsGlobals) { absl::string_view hlo_string = R"( HloModule AllReduce %sum { %a = f32[] parameter(0) %b = f32[] parameter(1) ROOT %add = f32[] add(%a, %b) } ENTRY %AllReduce { %param = f32[32,8,128]{2,1,0} parameter(0) %all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param), replica_groups={{1,3,2,0},{4,5,6,7}}, to_apply=%sum, channel_id=1, use_global_device_ids=true %pid = u32[] partition-id() %rid = u32[] replica-id() %pcount = u32[] constant(4) %ridxp = u32[] multiply(%rid, %pcount) %gid = u32[] add(%ridxp, %pid) %gtable = s32[8]{0} constant({3,0,2,1,0,1,2,3}) %id = s32[1] dynamic-slice(%gtable, %gid), dynamic_slice_sizes={1} %reshape.0 = s32[] reshape(%id) %table = s32[4]{0} constant({0,8,16,24}) %offset = s32[1] dynamic-slice(%table, %reshape.0), dynamic_slice_sizes={1} %reshape.1 = s32[] reshape(%offset) %zero = s32[] constant(0) ROOT %dynamic-slice = f32[8,8,128] dynamic-slice(%all-reduce, %reshape.1, %zero, %zero), dynamic_slice_sizes={8,8,128} } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, 2, 4, true)); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::ReduceScatter(m::Parameter(0)))); EXPECT_EQ(AllReduceCount(module), 0); } TEST_F(GpuReduceScatterCreatorTest, SubgroupsGlobalsOrthogonalReplicas) { absl::string_view hlo_string = R"( HloModule AllReduce %sum { %a = f32[] parameter(0) %b = f32[] parameter(1) ROOT %add = f32[] add(%a, %b) } ENTRY %AllReduce { %param = f32[32,8,128]{2,1,0} parameter(0) %all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param), replica_groups={{1,3,2,0},{5,7,6,4}}, to_apply=%sum, channel_id=1, use_global_device_ids=true %pid = u32[] partition-id() %pid_table = s32[4]{0} constant({3,0,2,1}) %offset = s32[1] dynamic-slice(%pid_table, %pid), dynamic_slice_sizes={1} %reshape = s32[] reshape(%offset) %shard_size = s32[] constant(8) %mul = s32[] multiply(%reshape, %shard_size) %zero = s32[] constant(0) ROOT %dynamic-slice = f32[8,8,128] dynamic-slice(%all-reduce, %mul, %zero, %zero), dynamic_slice_sizes={8,8,128} } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, 2, 4, true)); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::ReduceScatter(m::Parameter(0)))); EXPECT_EQ(AllReduceCount(module), 0); } TEST_F(GpuReduceScatterCreatorTest, SubgroupsGlobalsNonOrthogonalReplicas) { absl::string_view hlo_string = R"( HloModule AllReduce %sum { %a = f32[] parameter(0) %b = f32[] parameter(1) ROOT %add = f32[] add(%a, %b) } ENTRY %AllReduce { %param = f32[32,8,128]{2,1,0} parameter(0) %all-reduce = f32[32,8,128]{2,1,0} all-reduce(%param), replica_groups={{1,3,2,0},{7,5,6,4}}, to_apply=%sum, channel_id=1, use_global_device_ids=true %pid = u32[] partition-id() %pid_table = s32[4]{0} constant({3,0,2,1}) %offset = s32[1] dynamic-slice(%pid_table, %pid), dynamic_slice_sizes={1} %reshape = s32[] reshape(%offset) %shard_size = s32[] constant(8) %mul = s32[] multiply(%reshape, %shard_size) %zero = s32[] constant(0) ROOT %dynamic-slice = f32[8,8,128] dynamic-slice(%all-reduce, %mul, %zero, %zero), dynamic_slice_sizes={8,8,128} } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, 2, 4, false)); } TEST_F(GpuReduceScatterCreatorTest, NonUniformSplit) { absl::string_view hlo_string = R"( HloModule AllReduce %sum { %a = f32[] parameter(0) %b = f32[] parameter(1) ROOT %add = f32[] add(%a, %b) } ENTRY %AllReduce { %param = f32[1,7]{1,0} parameter(0) %all-reduce = f32[1,7]{1,0} all-reduce(%param), replica_groups={{0,1},{2,3},{4,5},{6,7}}, to_apply=%sum, channel_id=1, use_global_device_ids=true %pid = u32[] partition-id() %pid_table = s32[8]{0} constant({0, 1, 0, 1, 0, 1, 0, 1}) %offset = s32[1] dynamic-slice(%pid_table, %pid), dynamic_slice_sizes={1} %reshape = s32[] reshape(%offset) %shard_size = s32[] constant(3) %mul = s32[] multiply(%reshape, %shard_size) %zero = s32[] constant(0) ROOT %dynamic-slice = f32[1,3] dynamic-slice(%all-reduce, %zero, %mul), dynamic_slice_sizes={1,3} } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, 1, 8, true)); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::ReduceScatter(m::Slice(m::Parameter(0))))); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/reduce_scatter_creator.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/reduce_scatter_creator_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
4fb9f5f6-6c15-4a67-965b-bbbacbf591aa
cpp
tensorflow/tensorflow
fusion_merger
third_party/xla/xla/service/gpu/transforms/fusion_merger.cc
third_party/xla/xla/service/gpu/transforms/fusion_merger_test.cc
#include "xla/service/gpu/transforms/fusion_merger.h" #include <optional> #include <string> #include <vector> #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_join.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/gpu/gpu_fusible.h" #include "xla/service/gpu/model/gpu_hlo_cost_analysis.h" #include "xla/service/gpu/model/gpu_performance_model.h" #include "xla/service/gpu/model/gpu_performance_model_base.h" #include "xla/service/hlo_cost_analysis.h" #include "xla/service/hlo_graph_dumper.h" #include "xla/service/instruction_fusion.h" #include "xla/shape_util.h" #include "xla/stream_executor/device_description.h" #include "xla/util.h" #include "tsl/platform/errors.h" #include "tsl/platform/status.h" namespace xla { namespace gpu { class FusionInstructionMerger { public: explicit FusionInstructionMerger( HloComputation* computation, const se::DeviceDescription& gpu_device_info, HloCostAnalysis::ShapeSizeFunction shape_size_function) : computation_(computation), shape_size_function_(shape_size_function), gpu_device_info_(gpu_device_info), dump_fusion_visualization_(computation->parent() ->config() .debug_options() .xla_dump_fusion_visualization()) {} absl::Status Run(); bool changed() const { return changed_; } private: FusionDecision ShouldFuse(HloInstruction* producer); absl::Status FuseIntoAllUsers(HloInstruction* producer); HloComputation* computation_; HloCostAnalysis::ShapeSizeFunction shape_size_function_; std::optional<GpuHloCostAnalysis> cost_analysis_; FusionInfoCache fusion_info_cache_; const se::DeviceDescription& gpu_device_info_; bool changed_ = false; bool dump_fusion_visualization_ = false; int total_visited_ = 0; int total_merged_ = 0; int num_fail_no_users_ = 0; int num_fail_not_loop_fusion_ = 0; int num_fail_merge_all_users_ = 0; int num_fail_inefficient_fusion_emitter_ = 0; int num_fail_fusion_too_large_ = 0; int num_fail_uncoalesced_read_ = 0; int num_fail_slower_if_fused_ = 0; FusionInstructionMerger(const FusionInstructionMerger&) = delete; FusionInstructionMerger& operator=(const FusionInstructionMerger&) = delete; }; absl::Status FusionInstructionMerger::FuseIntoAllUsers( HloInstruction* producer) { std::vector<HloInstruction*> users = producer->users(); for (HloInstruction* user : users) { if (dump_fusion_visualization_) { RegisterFusionState( *computation_, absl::StrCat("About to fuse |", producer->name(), "| into |", user->name(), "| inside FusionMerger"), *user, producer); } TF_RETURN_IF_ERROR(cost_analysis_->RemoveInstruction(user)); HloInstruction* consumer = user; if (consumer->opcode() != HloOpcode::kFusion) { consumer = computation_->AddInstruction(HloInstruction::CreateFusion( user->shape(), ChooseFusionKind(*producer, *user), user)); TF_CHECK_OK(computation_->ReplaceInstruction(user, consumer)); } consumer->MergeFusionInstruction(producer); TF_RETURN_IF_ERROR(cost_analysis_->RevisitInstruction(consumer)); fusion_info_cache_.Invalidate(consumer); if (dump_fusion_visualization_) { RegisterFusionState(*computation_, absl::StrCat("Fused |", producer->name(), "| into |", user->name(), "| inside FusionMerger"), *consumer); } changed_ = true; } CHECK_EQ(0, producer->user_count()) << producer->ToString(); TF_RETURN_IF_ERROR(computation_->RemoveInstruction(producer)); TF_RETURN_IF_ERROR(cost_analysis_->RemoveInstruction(producer)); fusion_info_cache_.Invalidate(producer); VLOG(2) << "Merged fusion instruction: " << producer->name() << " into users { " << absl::StrJoin(users, ", ", [](std::string* out, HloInstruction* user) { absl::StrAppend(out, user->name()); }) << " }"; return absl::OkStatus(); } absl::Status FusionInstructionMerger::Run() { for (HloInstruction* producer : computation_->MakeInstructionPostOrder()) { if (producer->opcode() != HloOpcode::kFusion) { continue; } FusionDecision should_fuse = ShouldFuse(producer); if (should_fuse) { TF_RETURN_IF_ERROR(FuseIntoAllUsers(producer)); ++total_merged_; } else { VLOG(3) << "Not fusing fusion |" << producer->name() << "| with all of it's users due to: " << should_fuse.Explain(); if (dump_fusion_visualization_ && !producer->users().empty()) { RegisterFusionState( *computation_, absl::StrCat( "Not fusing fusion |", producer->name(), "| into all of its users due to: ", should_fuse.Explain()), *producer->users()[0], producer); } } } VLOG(1) << "FusionInstructionMerger EXIT" << " computation: " << computation_->name() << " total_visited: " << total_visited_ << " total_merged: " << total_merged_ << " merge failures { " << " no_users: " << num_fail_no_users_ << " not_loop_fusion: " << num_fail_not_loop_fusion_ << " merge_all_users: " << num_fail_merge_all_users_ << " uncoalesced_read: " << num_fail_uncoalesced_read_ << " inefficient_fusion_emitter: " << num_fail_inefficient_fusion_emitter_ << " slower_if_fused: " << num_fail_slower_if_fused_ << " fusion_too_large: " << num_fail_fusion_too_large_ << " }"; return absl::OkStatus(); } bool TransposesMostData(const HloInstruction& fusion) { float score = 0; for (const HloInstruction* instr : fusion.fused_instructions()) { if (IsPhysicallyTransposing(*instr)) { score += 1.0 * ShapeUtil::ElementsInRecursive(instr->shape()) / ShapeUtil::ElementsInRecursive(fusion.shape()); if (score >= 0.5) { VLOG(3) << fusion.ToString() << " transpose ratio exceeds " << score; return true; } } } return false; } FusionDecision FusionInstructionMerger::ShouldFuse(HloInstruction* producer) { ++total_visited_; VLOG(4) << "Considering producer " << producer->name(); if (producer->users().empty()) { ++num_fail_no_users_; return FusionDecision::Forbid("fusion has no users"); } if (!producer->IsLoopFusion()) { ++num_fail_not_loop_fusion_; return FusionDecision::Forbid("not a loop fusion"); } auto producer_hero = GetRealHeroForMultiOutputFusion(*producer); bool has_reduction_user = false; for (const HloInstruction* user : producer->users()) { if (user->opcode() == HloOpcode::kBitcast) { ++num_fail_merge_all_users_; return FusionDecision::Forbid("not fusing bitcast ops"); } if (user->IsCustomFusion()) { ++num_fail_merge_all_users_; return FusionDecision::Forbid("not fusing custom fusions"); } auto consumer_hero = GetRealHeroForMultiOutputFusion(*user); if (auto compatible = FusionHeroesAreCompatible(producer_hero, consumer_hero); !compatible) { return compatible; } FusionDecision fusible = IsProducerConsumerFusible(*producer, *user); if (!fusible) { ++num_fail_merge_all_users_; VLOG(9) << user->ToString(); return fusible; } if (IsInputFusibleReduction(*user)) { has_reduction_user = true; } } if (has_reduction_user && TransposesMostData(*producer)) { ++num_fail_uncoalesced_read_; return FusionDecision::Forbid("would read mostly uncoalesced"); } for (const HloInstruction* user : producer->users()) { FusionDecision fits = FusionFitsInBudget( *user, *producer, gpu_device_info_, true, &fusion_info_cache_); if (!fits) { ++num_fail_fusion_too_large_; return fits; } } if (!cost_analysis_) { VLOG(2) << "Running full HLO cost analysis for " << computation_->name(); cost_analysis_.emplace( GpuHloCostAnalysis::Options{shape_size_function_, {}, {}, true}, gpu_device_info_); TF_CHECK_OK(computation_->Accept(&cost_analysis_.value())); } for (const HloInstruction* user : producer->users()) { if (cost_analysis_->ProducerConsumerMergedTooLarge(*producer, *user)) { ++num_fail_inefficient_fusion_emitter_; return FusionDecision::Forbid("if merged with ") << user->name() << " will generate huge IR"; } } GpuPerformanceModel::RunTimes t = GpuPerformanceModel::EstimateRunTimes( producer, gpu_device_info_, &*cost_analysis_, GpuPerformanceModelOptions::Default(), producer->users()); if (t.time_fused > t.time_unfused) { ++num_fail_slower_if_fused_; return FusionDecision::Forbid("will execute slower if fused"); } return FusionDecision::Allow(); } absl::StatusOr<bool> FusionMerger::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool changed = false; VLOG(1) << "FusionMerger for module: " << module->name(); for (auto* computation : module->MakeNonfusionComputations(execution_threads)) { VLOG(9) << "Before running FusionInstructionMerger for computation: " << computation->name(); XLA_VLOG_LINES(9, computation->ToString()); FusionInstructionMerger fusion_merger(computation, gpu_device_info_, shape_size_function_); TF_RETURN_IF_ERROR(fusion_merger.Run()); changed |= fusion_merger.changed(); VLOG(9) << "After running FusionInstructionMerger for computation: " << computation->name() << " changed: " << changed; XLA_VLOG_LINES(9, computation->ToString()); } return changed; } } }
#include "xla/service/gpu/transforms/fusion_merger.h" #include <cstdint> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/types/span.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/gpu/gpu_device_info_for_tests.h" #include "xla/service/gpu/gpu_fusible.h" #include "xla/service/hlo_cost_analysis.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/tests/hlo_test_base.h" #include "xla/xla_data.pb.h" namespace xla { namespace gpu { namespace { namespace m = ::xla::match; class FusionMergerTest : public HloTestBase { HloCostAnalysis::ShapeSizeFunction ShapeSizeBytesFunction() const { return [&](const Shape& shape) { constexpr int64_t kPointerSize = 8; return ShapeUtil::ByteSizeOf(shape, kPointerSize); }; } public: FusionMerger fusion_merger_{TestGpuDeviceInfo::RTXA6000DeviceInfo(), ShapeSizeBytesFunction()}; FusionMergerTest() : HloTestBase() {} }; TEST_F(FusionMergerTest, MergeSharedFusionInstruction) { auto module = ParseAndReturnVerifiedModule(R"( HloModule MergeSharedFusionInstruction comp.3 { constant.param_0 = f32[4]{0} parameter(0) param.param_1.2 = (f32[4]{0}, f32[4]{0}, f32[4]{0}) parameter(1) get-tuple-element.6 = f32[4]{0} get-tuple-element(param.param_1.2), index=0 ROOT add.7 = f32[4]{0} add(constant.param_0, get-tuple-element.6) } comp.2 { param.param_1.1 = (f32[4]{0}, f32[4]{0}, f32[4]{0}) parameter(0) get-tuple-element.4 = f32[4]{0} get-tuple-element(param.param_1.1), index=1 get-tuple-element.5 = f32[4]{0} get-tuple-element(param.param_1.1), index=2 ROOT add.6 = f32[4]{0} add(get-tuple-element.4, get-tuple-element.5) } comp.1 { add.1.param_1.1 = f32[4]{0} parameter(1) constant.param_1.3 = f32[4]{0} parameter(0) add.5 = f32[4]{0} add(add.1.param_1.1, constant.param_1.3) ROOT multiply.3 = f32[4]{0} multiply(add.5, constant.param_1.3) } comp { add.1.param_1 = f32[4]{0} parameter(1) constant.param_1.1 = f32[4]{0} parameter(0) multiply.2 = f32[4]{0} multiply(add.1.param_1, constant.param_1.1) ROOT add.4 = f32[4]{0} add(multiply.2, constant.param_1.1) } ENTRY MergeSharedFusionInstruction.Computation0 { constant = f32[4]{0} constant({1, 1, 1, 1}) param = (f32[4]{0}, f32[4]{0}, f32[4]{0}) parameter(0) fusion.3 = f32[4]{0} fusion(constant, param), kind=kLoop, calls=comp.3 fusion.4 = f32[4]{0} fusion(param), kind=kLoop, calls=comp.2 fusion.5 = f32[4]{0} fusion(constant, fusion.4), kind=kLoop, calls=comp.1 fusion.6 = f32[4]{0} fusion(constant, fusion.4), kind=kLoop, calls=comp ROOT tuple = (f32[4]{0}, f32[4]{0}, f32[4]{0}) tuple(fusion.3, fusion.5, fusion.6) })") .value(); EXPECT_TRUE(fusion_merger_.Run(module.get()).value()); auto* root = module->entry_computation()->root_instruction(); EXPECT_EQ(HloOpcode::kTuple, root->opcode()); auto* operand0 = root->operand(0); EXPECT_EQ(HloOpcode::kFusion, operand0->opcode()); EXPECT_EQ(4, operand0->fused_instruction_count()); auto* operand1 = root->operand(1); EXPECT_EQ(HloOpcode::kFusion, operand1->opcode()); EXPECT_EQ(7, operand1->fused_instruction_count()); auto* operand2 = root->operand(2); EXPECT_EQ(HloOpcode::kFusion, operand2->opcode()); EXPECT_EQ(7, operand2->fused_instruction_count()); } TEST_F(FusionMergerTest, MoreMemoryAccessIfFused) { auto module = ParseAndReturnVerifiedModule(R"( HloModule m f32add { x = f32[] parameter(0) y = f32[] parameter(1) ROOT _ = f32[] add(x, y) } comp0 { p = (f32[2048], f32[2048], f32[2048], f32[2048]) parameter(0) gte0 = f32[2048] get-tuple-element(p), index=0 gte1 = f32[2048] get-tuple-element(p), index=1 add.9 = f32[2048] add(gte0, gte1) gte2 = f32[2048] get-tuple-element(p), index=2 add.10 = f32[2048] add(add.9, gte2) gte3 = f32[2048] get-tuple-element(p), index=3 add.11 = f32[2048] add(add.10, gte3) p1 = (f32[2048], f32[2048], f32[2048], f32[2048]) parameter(1) gte4 = f32[2048] get-tuple-element(p1), index=0 gte5 = f32[2048] get-tuple-element(p1), index=1 add.12 = f32[2048] add(gte4, gte5) gte6 = f32[2048] get-tuple-element(p1), index=2 add.13 = f32[2048] add(add.12, gte6) gte7 = f32[2048] get-tuple-element(p1), index=3 add.14 = f32[2048] add(add.13, gte7) ROOT r = f32[2048] add(add.14, add.11) } comp1 { p = f32[2048] parameter(0) c0 = f32[] constant(0) ROOT r = f32[] reduce(p, c0), dimensions={0}, to_apply=f32add } comp2 { p = f32[2048] parameter(0) c0 = f32[] constant(0) r = f32[] reduce(p, c0), dimensions={0}, to_apply=f32add ROOT n = f32[] negate(r) } ENTRY m.Computation2 { p0 = (f32[2048], f32[2048], f32[2048], f32[2048]) parameter(0) p1 = (f32[2048], f32[2048], f32[2048], f32[2048]) parameter(1) fusion.0 = f32[2048] fusion(p0, p1), kind=kLoop, calls=comp0 fusion.1 = f32[] fusion(fusion.0), kind=kLoop, calls=comp1 fusion.2 = f32[] fusion(fusion.0), kind=kLoop, calls=comp2 ROOT tuple = (f32[], f32[]) tuple(fusion.1, fusion.2) } )") .value(); EXPECT_FALSE(fusion_merger_.Run(module.get()).value()); } TEST_F(FusionMergerTest, LessMemoryAccessIfFused) { auto module = ParseAndReturnVerifiedModule(R"( HloModule m comp.2 { state.param_1.1 = (f32[4]{0}, f32[4]{0}, f32[4]{0}) parameter(0) get-tuple-element.5 = f32[4]{0} get-tuple-element(state.param_1.1), index=0 get-tuple-element.6 = f32[4]{0} get-tuple-element(state.param_1.1), index=1 add.7 = f32[4]{0} add(get-tuple-element.5, get-tuple-element.6) get-tuple-element.7 = f32[4]{0} get-tuple-element(state.param_1.1), index=2 ROOT add.8 = f32[4]{0} add(add.7, get-tuple-element.7) } comp.1 { add.1.param_1.1 = f32[4]{0} parameter(1) constant.param_1.3 = f32[4]{0} parameter(0) add.5 = f32[4]{0} add(add.1.param_1.1, constant.param_1.3) ROOT multiply.3 = f32[4]{0} multiply(add.5, constant.param_1.3) } comp { add.1.param_1 = f32[4]{0} parameter(1) constant.param_1.1 = f32[4]{0} parameter(0) multiply.2 = f32[4]{0} multiply(add.1.param_1, constant.param_1.1) ROOT add.4 = f32[4]{0} add(multiply.2, constant.param_1.1) } ENTRY m.Computation2 { constant = f32[4]{0} constant({1, 1, 1, 1}) state = (f32[4]{0}, f32[4]{0}, f32[4]{0}) parameter(0) fusion.2 = f32[4]{0} fusion(state), kind=kLoop, calls=comp.2 fusion.3 = f32[4]{0} fusion(constant, fusion.2), kind=kLoop, calls=comp.1 fusion.4 = f32[4]{0} fusion(constant, fusion.2), kind=kLoop, calls=comp ROOT tuple = (f32[4]{0}, f32[4]{0}) tuple(fusion.3, fusion.4) })") .value(); EXPECT_TRUE(fusion_merger_.Run(module.get()).value()); } TEST_F(FusionMergerTest, WillMergeIntoInputFusion) { auto module = ParseAndReturnVerifiedModule(R"( HloModule m f1_computation { f1_p0 = f32[32]{0} parameter(0) ROOT f1_root = f32[32]{0} add(f1_p0, f1_p0) } add_computation { add_lhs = f32[] parameter(0) add_rhs = f32[] parameter(1) ROOT add_root = f32[] add(add_lhs, add_rhs) } f2_computation { f2_p0 = f32[32]{0} parameter(0) f2_mul = f32[32]{0} multiply(f2_p0, f2_p0) f2_zero = f32[] constant(0) ROOT f2_root = f32[] reduce(f2_mul, f2_zero), dimensions={0}, to_apply=add_computation } ENTRY entry { p0 = f32[32]{0} parameter(0) f1 = f32[32]{0} fusion(p0), kind=kLoop, calls=f1_computation ROOT f2 = f32[] fusion(f1), kind=kInput, calls=f2_computation })") .value(); EXPECT_TRUE(fusion_merger_.Run(module.get()).value()); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Fusion(m::Parameter()))); } TEST_F(FusionMergerTest, WillMergeIntoUnfusedConsumer) { auto module = ParseAndReturnVerifiedModule(R"( HloModule jit_matmul.36 max (parameter.13: f32[], parameter.14: f32[]) -> f32[] { parameter.13 = f32[] parameter(0) parameter.14 = f32[] parameter(1) ROOT maximum.15 = f32[] maximum(f32[] parameter.13, f32[] parameter.14) } add (parameter.29: f32[], parameter.30: f32[]) -> f32[] { parameter.29 = f32[] parameter(0) parameter.30 = f32[] parameter(1) ROOT add.31 = f32[] add(f32[] parameter.29, f32[] parameter.30) } fused_computation.1 (param_1.4: f32[200,200,200], param_2.1: f32[200,200]) -> f32[200,200] { param_1.4 = f32[200,200,200]{2,1,0} parameter(0) param_2.1 = f32[200,200]{1,0} parameter(1) broadcast.3 = f32[200,200,200]{2,1,0} broadcast(f32[200,200]{1,0} param_2.1), dimensions={0,2} subtract.0 = f32[200,200,200]{2,1,0} subtract(f32[200,200,200]{2,1,0} param_1.4, f32[200,200,200]{2,1,0} broadcast.3) exponential.0 = f32[200,200,200]{2,1,0} exponential(f32[200,200,200]{2,1,0} subtract.0) constant.27 = f32[] constant(0) ROOT reduce.0 = f32[200,200]{1,0} reduce(f32[200,200,200]{2,1,0} exponential.0, f32[] constant.27), dimensions={1}, to_apply=add } fused_computation.3 (param_0.7: f32[200,200], param_1.9: f32[200,200]) -> f32[200,200,200] { param_1.9 = f32[200,200]{1,0} parameter(1) broadcast.10 = f32[200,200,200]{2,1,0} broadcast(f32[200,200]{1,0} param_1.9), dimensions={0,1} param_0.7 = f32[200,200]{1,0} parameter(0) broadcast.8 = f32[200,200,200]{2,1,0} broadcast(f32[200,200]{1,0} param_0.7), dimensions={1,2} ROOT add.1 = f32[200,200,200]{2,1,0} add(f32[200,200,200]{2,1,0} broadcast.10, f32[200,200,200]{2,1,0} broadcast.8) } ENTRY entry (parameter.1: f32[200,200], parameter.2: f32[200,200]) -> f32[200,200] { parameter.2 = f32[200,200]{1,0} parameter(1) parameter.1 = f32[200,200]{1,0} parameter(0) fusion.3 = f32[200,200,200]{2,1,0} fusion(f32[200,200]{1,0} parameter.2, f32[200,200]{1,0} parameter.1), kind=kLoop, calls=fused_computation.3 constant.11 = f32[] constant(-inf) reduce.16 = f32[200,200]{1,0} reduce(f32[200,200,200]{2,1,0} fusion.3, f32[] constant.11), dimensions={1}, to_apply=max ROOT fusion.1 = f32[200,200]{1,0} fusion(f32[200,200,200]{2,1,0} fusion.3, f32[200,200]{1,0} reduce.16), kind=kInput, calls=fused_computation.1 })") .value(); EXPECT_TRUE(fusion_merger_.Run(module.get()).value()); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(m::Fusion(m::Fusion(), m::Parameter(), m::Parameter()))); } TEST_F(FusionMergerTest, WillNotMergeReduceUnfriendlyLayouts) { auto module = ParseAndReturnVerifiedModule(R"( HloModule m f1_computation { f1_p0 = f32[16,16,256]{0,1,2} parameter(0) add = f32[16,16,256]{0,1,2} add(f1_p0, f1_p0) ROOT f1_root = f32[16,16,256]{2,1,0} copy(add) } add_computation { add_lhs = f32[] parameter(0) add_rhs = f32[] parameter(1) ROOT add_root = f32[] add(add_lhs, add_rhs) } f2_computation { f2_p0 = f32[16,16,256]{2,1,0} parameter(0) f2_zero = f32[] constant(0) ROOT f2_root = f32[] reduce(f2_p0, f2_zero), dimensions={0,1,2}, to_apply=add_computation } ENTRY entry { p0 = f32[16,16,256]{0,1,2} parameter(0) f1 = f32[16,16,256]{2,1,0} fusion(p0), kind=kLoop, calls=f1_computation ROOT f2 = f32[] fusion(f1), kind=kInput, calls=f2_computation })") .value(); EXPECT_FALSE(fusion_merger_.Run(module.get()).value()); } TEST_F(FusionMergerTest, WillMergeReduceNotTooUnfriendlyLayouts) { auto module = ParseAndReturnVerifiedModule(R"( HloModule m f1_computation { f1_p0 = f32[16,16,256]{0,1,2} parameter(0) slice1 = f32[5,16,256]{0,1,2} slice(f1_p0), slice={[0:5], [0:16], [0:256]} f1_copy = f32[5,16,256]{2,1,0} copy(slice1) slice2 = f32[11,16,256]{0,1,2} slice(f1_p0), slice={[0:11], [0:16], [0:256]} bitcast = f32[11,16,256]{2,1,0} bitcast(slice2) ROOT f1_root = f32[16,16,256]{2,1,0} concatenate(f1_copy, bitcast), dimensions={0} } add_computation { add_lhs = f32[] parameter(0) add_rhs = f32[] parameter(1) ROOT add_root = f32[] add(add_lhs, add_rhs) } f2_computation { f2_p0 = f32[16,16,256]{2,1,0} parameter(0) f2_zero = f32[] constant(0) ROOT f2_root = f32[16,16] reduce(f2_p0, f2_zero), dimensions={2}, to_apply=add_computation } ENTRY entry { p0 = f32[16,16,256]{0,1,2} parameter(0) f1 = f32[16,16,256]{2,1,0} fusion(p0), kind=kLoop, calls=f1_computation ROOT f2 = f32[16,16] fusion(f1), kind=kInput, calls=f2_computation })") .value(); EXPECT_TRUE(fusion_merger_.Run(module.get()).value()); } TEST_F(FusionMergerTest, AvoidsLargeFusion) { constexpr int64_t kNumParams = MaxOperandsAndOutputsPerFusion() + 1; auto module = CreateNewVerifiedModule(); HloComputation::Builder b(TestName()); Shape shape = ShapeUtil::MakeShape(F32, {10, 100}); std::vector<HloInstruction*> entry_params; for (int64_t i = 0; i < kNumParams; ++i) { entry_params.push_back( b.AddInstruction(HloInstruction::CreateParameter(i, shape, "p"))); } auto make_fusion = [&](absl::Span<HloInstruction* const> params) { HloComputation::Builder sub_builder("subcomp"); HloInstruction* sum = nullptr; for (int64_t i = 0; i < params.size(); ++i) { auto p = sub_builder.AddInstruction( HloInstruction::CreateParameter(i, shape, "p")); if (sum == nullptr) { sum = p; } else { sum = sub_builder.AddInstruction( HloInstruction::CreateBinary(shape, HloOpcode::kAdd, sum, p)); } } HloComputation* subcomp = module->AddEmbeddedComputation(sub_builder.Build()); return HloInstruction::CreateFusion( shape, HloInstruction::FusionKind::kLoop, params, subcomp); }; auto fusion = b.AddInstruction( make_fusion(absl::MakeSpan(entry_params) .subspan(0, MaxOperandsAndOutputsPerFusion()))); b.AddInstruction(make_fusion({entry_params.back(), fusion})); module->AddEntryComputation(b.Build()); EXPECT_FALSE(fusion_merger_.Run(module.get()).value()); } TEST_F(FusionMergerTest, WillNotMergeIfFusionEmitterIsInefficient) { auto module = ParseAndReturnVerifiedModule(R"( HloModule m f1 { Arg_0.5 = f32[200000] parameter(0) slice.7 = f32[100000] slice(Arg_0.5), slice={[0:199999:2]} slice.8 = f32[100000] slice(Arg_0.5), slice={[1:200000:2]} add.9 = f32[100000] add(slice.7, slice.8) slice.10 = f32[50000] slice(add.9), slice={[0:99999:2]} slice.11 = f32[50000] slice(add.9), slice={[1:100000:2]} add.12 = f32[50000] add(slice.10, slice.11) slice.13 = f32[25000] slice(add.12), slice={[0:49999:2]} slice.14 = f32[25000] slice(add.12), slice={[1:50000:2]} add.15 = f32[25000] add(slice.13, slice.14) slice.16 = f32[12500] slice(add.15), slice={[0:24999:2]} slice.17 = f32[12500] slice(add.15), slice={[1:25000:2]} add.18 = f32[12500] add(slice.16, slice.17) slice.19 = f32[6250] slice(add.18), slice={[0:12499:2]} slice.20 = f32[6250] slice(add.18), slice={[1:12500:2]} add.21 = f32[6250] add(slice.19, slice.20) slice.22 = f32[3125] slice(add.21), slice={[0:6249:2]} slice.23 = f32[3125] slice(add.21), slice={[1:6250:2]} ROOT add.24 = f32[3125] add(slice.22, slice.23) } f2 { Arg_0 = f32[3125] parameter(0) slice.25 = f32[1562] slice(Arg_0), slice={[0:3124:2]} slice.26 = f32[1562] slice(Arg_0), slice={[1:3125:2]} add.27 = f32[1562] add(slice.25, slice.26) slice.28 = f32[781] slice(add.27), slice={[0:1561:2]} slice.29 = f32[781] slice(add.27), slice={[1:1562:2]} add.30 = f32[781] add(slice.28, slice.29) slice.31 = f32[390] slice(add.30), slice={[0:780:2]} slice.32 = f32[390] slice(add.30), slice={[1:781:2]} add.33 = f32[390] add(slice.31, slice.32) slice.34 = f32[195] slice(add.33), slice={[0:389:2]} slice.35 = f32[195] slice(add.33), slice={[1:390:2]} add.36 = f32[195] add(slice.34, slice.35) slice.37 = f32[97] slice(add.36), slice={[0:194:2]} slice.38 = f32[97] slice(add.36), slice={[1:195:2]} add.39 = f32[97] add(slice.37, slice.38) slice.40 = f32[48] slice(add.39), slice={[0:96:2]} slice.41 = f32[48] slice(add.39), slice={[1:97:2]} ROOT add.42 = f32[48] add(slice.40, slice.41) } ENTRY e { p0 = f32[200000] parameter(0) f1 = f32[3125] fusion(p0), kind=kLoop, calls=f1 ROOT r = f32[48] fusion(f1), kind=kLoop, calls=f2 })") .value(); EXPECT_FALSE(fusion_merger_.Run(module.get()).value()); } TEST_F(FusionMergerTest, WillMergeSliceIntoReusingConsumer) { auto module = ParseAndReturnVerifiedModule(R"( HloModule m f1 { p01 = s8[1000000] parameter(0) ROOT s0 = s8[10] slice(p01), slice={[0:10]} } f2 { p02 = s8[10] parameter(0) ROOT b0 = s8[10,1000000] broadcast(p02), dimensions={0} } ENTRY e { p0 = s8[1000000] parameter(0) f1 = s8[10] fusion(p0), kind=kLoop, calls=f1 ROOT r = s8[10,1000000] fusion(f1), kind=kLoop, calls=f2 })") .value(); EXPECT_TRUE(fusion_merger_.Run(module.get()).value()); } TEST_F(FusionMergerTest, WillMergeExpensiveFusionsIfSavesMemory) { auto module = ParseAndReturnVerifiedModule(R"( HloModule m %f_a (p: f32[]) -> f32[1024,1024,1024] { %p = f32[] parameter(0) %b = f32[1024,1024,1024] broadcast(%p), dimensions={} ROOT %t = f32[1024,1024,1024] tanh(%b) } %f_b (p: f32[1024,1024,1024]) -> f32[1024,1024,1024] { %p = f32[1024,1024,1024] parameter(0) ROOT %t = f32[1024,1024,1024] tanh(%p) } %f_c (p: f32[1024,1024,1024]) -> f32[1024,1024,1024] { %p = f32[1024,1024,1024] parameter(0) ROOT %t = f32[1024,1024,1024] tanh(%p) } ENTRY entry { p0 = f32[] parameter(0) f1 = f32[1024,1024,1024] fusion(p0), kind=kLoop, calls=%f_a f2 = f32[1024,1024,1024] fusion(f1), kind=kLoop, calls=%f_b f3 = f32[1024,1024,1024] fusion(f1), kind=kLoop, calls=%f_c ROOT f4 = f32[1024,1024,1024] add(f2, f3) })") .value(); EXPECT_TRUE(fusion_merger_.Run(module.get()).value()); } TEST_F(FusionMergerTest, WillMergeExpensiveFusionsWithSingleConsumer) { auto module = ParseAndReturnVerifiedModule(R"( HloModule m %f_b (p: f32[1024,1024,1024]) -> f32[1024,1024,1024] { %p = f32[1024,1024,1024] parameter(0) ROOT %t = f32[1024,1024,1024] tanh(%p) } %f_c (p: f32[1024,1024,1024]) -> f32[1024,1024,1024] { %p = f32[1024,1024,1024] parameter(0) ROOT %t = f32[1024,1024,1024] add(%p, %p) } ENTRY entry { p0 = f32[1024,1024,1024] parameter(0) f1 = f32[1024,1024,1024] fusion(p0), kind=kLoop, calls=%f_b ROOT f2 = f32[1024,1024,1024] fusion(f1), kind=kLoop, calls=%f_c })") .value(); EXPECT_TRUE(fusion_merger_.Run(module.get()).value()); } TEST_F(FusionMergerTest, WillNotMergeExpensiveFusionsWithReusingConsumer) { auto module = ParseAndReturnVerifiedModule(R"( HloModule m %f_b { %p = f32[1024,1024,1024] parameter(0) %t1 = f32[1024,1024,1024] tanh(%p) %t2 = f32[1024,1024,1024] tanh(%t1) %t3 = f32[1024,1024,1024] tanh(%t2) %t4 = f32[1024,1024,1024] tanh(%t3) %t5 = f32[1024,1024,1024] tanh(%t4) %t6 = f32[1024,1024,1024] tanh(%t5) %t7 = f32[1024,1024,1024] tanh(%t6) %t8 = f32[1024,1024,1024] tanh(%t7) ROOT %t9 = f32[1024,1024,1024] tanh(%t8) } %f_c { %p = f32[1024,1024,1024] parameter(0) ROOT %t = f32[1024,1024,1024,2048] broadcast(%p), dimensions={0,1,2} } ENTRY entry { p0 = f32[1024,1024,1024] parameter(0) f1 = f32[1024,1024,1024] fusion(p0), kind=kLoop, calls=%f_b ROOT f2 = f32[1024,1024,1024,2048] fusion(f1), kind=kLoop, calls=%f_c })") .value(); EXPECT_FALSE(fusion_merger_.Run(module.get()).value()); } TEST_F(FusionMergerTest, NoMergeWithBitcast) { auto module = ParseAndReturnVerifiedModule(R"( HloModule m f32add { x.634 = f32[] parameter(0) y.635 = f32[] parameter(1) ROOT add.636 = f32[] add(x.634, y.635) } fused_computation.103 { param_0.310 = f16[1,8,512,1536]{2,3,1,0} parameter(0) param_1.420 = f32[8,512]{1,0} parameter(1) bitcast.1144 = f32[1,8,512]{2,1,0} bitcast(param_1.420) convert.252 = f16[1,8,512]{2,1,0} convert(bitcast.1144) bitcast.1143 = f16[8,512]{1,0} bitcast(convert.252) broadcast.481 = f16[1,8,512,1536]{2,3,1,0} broadcast(bitcast.1143), dimensions={1,2} divide.15 = f16[1,8,512,1536]{2,3,1,0} divide(param_0.310, broadcast.481) ROOT bitcast.1142 = f16[8,512,1536]{1,2,0} bitcast(divide.15) } fused_computation.105 { param_1.426 = f16[8,1536,512]{2,1,0} parameter(1) bitcast.1896 = f16[1,8,1536,512]{3,2,1,0} bitcast(param_1.426) transpose.238 = f16[1,8,512,1536]{2,3,1,0} transpose(bitcast.1896), dimensions={0,1,3,2} param_0.315 = f16[8,512]{1,0} parameter(0) broadcast.482 = f16[1,8,512,1536]{2,3,1,0} broadcast(param_0.315), dimensions={1,2} subtract.22 = f16[1,8,512,1536]{2,3,1,0} subtract(transpose.238, broadcast.482) ROOT exponential.15 = f16[1,8,512,1536]{2,3,1,0} exponential(subtract.22) } fused_computation.104 { param_0.1000 = f16[8,1536,512]{2,1,0} parameter(0) convert.652 = f32[8,1536,512]{2,1,0} convert(param_0.1000) constant_752 = f32[] constant(-0) ROOT reduce.232 = f32[8,512]{1,0} reduce(convert.652, constant_752), dimensions={1}, to_apply=f32add } ENTRY entry { p0 = f16[8,1536,512]{2,1,0} parameter(0) p1 = f16[8,512]{1,0} parameter(1) fusion.105 = f16[1,8,512,1536]{2,3,1,0} fusion(p1, p0), kind=kLoop, calls=fused_computation.105 bitcast.1787 = f16[8,1536,512]{2,1,0} bitcast(fusion.105) fusion.104 = f32[8,512]{1,0} fusion(bitcast.1787), kind=kInput, calls=fused_computation.104 ROOT fusion.103 = f16[8,512,1536]{1,2,0} fusion(fusion.105, fusion.104), kind=kLoop, calls=fused_computation.103 } )") .value(); EXPECT_FALSE(fusion_merger_.Run(module.get()).value()); } TEST_F(FusionMergerTest, CostBasedMerge) { auto module = ParseAndReturnVerifiedModule(R"( HloModule m fused_computation.45 { param_1.194 = f16[8,1536,512]{2,1,0} parameter(1) bitcast.1042 = f16[1,8,512,1536]{2,3,1,0} bitcast(param_1.194) param_0.135 = f16[8,512]{1,0} parameter(0) broadcast.391 = f16[1,8,512,1536]{2,3,1,0} broadcast(param_0.135), dimensions={1,2} subtract.6 = f16[1,8,512,1536]{2,3,1,0} subtract(bitcast.1042, broadcast.391) ROOT exponential.11 = f16[1,8,512,1536]{2,3,1,0} exponential(subtract.6) } f32add { x.634 = f32[] parameter(0) y.635 = f32[] parameter(1) ROOT add.636 = f32[] add(x.634, y.635) } fused_computation.44 { param_0.869 = f16[1,8,512,1536]{2,3,1,0} parameter(0) convert.221 = f32[1,8,512,1536]{2,3,1,0} convert(param_0.869) transpose.212 = f32[1,8,1536,512]{3,2,1,0} transpose(convert.221), dimensions={0,1,3,2} bitcast.1041 = f32[8,1536,512]{2,1,0} bitcast(transpose.212) constant_429 = f32[] constant(0) ROOT reduce.149 = f32[8,512]{1,0} reduce(bitcast.1041, constant_429), dimensions={1}, to_apply=f32add } fused_computation.43 { param_0.130 = f16[1,8,512,1536]{2,3,1,0} parameter(0) param_1.188 = f32[8,512]{1,0} parameter(1) bitcast.1040 = f32[1,8,512]{2,1,0} bitcast(param_1.188) convert.220 = f16[1,8,512]{2,1,0} convert(bitcast.1040) bitcast.1039 = f16[8,512]{1,0} bitcast(convert.220) broadcast.390 = f16[1,8,512,1536]{2,3,1,0} broadcast(bitcast.1039), dimensions={1,2} divide.11 = f16[1,8,512,1536]{2,3,1,0} divide(param_0.130, broadcast.390) ROOT bitcast.1038 = f16[8,512,1536]{1,2,0} bitcast(divide.11) } ENTRY entry { p0 = f16[8,1536,512]{2,1,0} parameter(0) p1 = f16[8,512]{1,0} parameter(1) fusion.45 = f16[1,8,512,1536]{2,3,1,0} fusion(p1, p0), kind=kLoop, calls=fused_computation.45 fusion.44 = f32[8,512]{1,0} fusion(fusion.45), kind=kInput, calls=fused_computation.44 ROOT fusion.43 = f16[8,512,1536]{1,2,0} fusion(fusion.45, fusion.44), kind=kLoop, calls=fused_computation.43 } )") .value(); auto& debug_options = module->mutable_config().mutable_debug_options(); debug_options.set_xla_gpu_mlir_emitter_level(3); EXPECT_TRUE(fusion_merger_.Run(module.get()).value()); } TEST_F(FusionMergerTest, CostBasedNoMerge) { auto module = ParseAndReturnVerifiedModule(R"( HloModule m add_float_.56 { x.57 = f32[] parameter(0) y.58 = f32[] parameter(1) ROOT add.59 = f32[] add(x.57, y.58) } fused_computation.66 { constant.635 = f32[] constant(0) broadcast.257 = f32[459,3]{1,0} broadcast(constant.635), dimensions={} constant.641 = f32[] constant(1) broadcast.256 = f32[459,3]{1,0} broadcast(constant.641), dimensions={} broadcast.255 = f32[459]{0} broadcast(constant.635), dimensions={} iota.28 = f32[459]{0} iota(), iota_dimension=0 constant.629 = f32[] constant(1.49891067) broadcast.253 = f32[459]{0} broadcast(constant.629), dimensions={} multiply.39 = f32[459]{0} multiply(iota.28, broadcast.253) constant.633 = f32[] constant(-1) broadcast.252 = f32[459]{0} broadcast(constant.633), dimensions={} add.31 = f32[459]{0} add(multiply.39, broadcast.252) ceil.11 = f32[459]{0} ceil(add.31) constant.630 = f32[] constant(685) broadcast.251 = f32[459]{0} broadcast(constant.630), dimensions={} clamp.49 = f32[459]{0} clamp(broadcast.255, ceil.11, broadcast.251) subtract.11 = f32[459]{0} subtract(clamp.49, multiply.39) broadcast.249 = f32[459,3]{1,0} broadcast(subtract.11), dimensions={0} iota.26 = f32[459,3]{1,0} iota(), iota_dimension=1 add.30 = f32[459,3]{1,0} add(broadcast.249, iota.26) abs.3 = f32[459,3]{1,0} abs(add.30) subtract.10 = f32[459,3]{1,0} subtract(broadcast.256, abs.3) maximum.6 = f32[459,3]{1,0} maximum(broadcast.257, subtract.10) ROOT reduce.3 = f32[459]{0} reduce(maximum.6, constant.635), dimensions={1}, to_apply=add_float_.56 } fused_computation.67 { constant.684 = f32[] constant(0) broadcast.296 = f32[1130,3]{1,0} broadcast(constant.684), dimensions={} constant.685 = f32[] constant(1) broadcast.295 = f32[1130,3]{1,0} broadcast(constant.685), dimensions={} broadcast.294 = f32[1130]{0} broadcast(constant.684), dimensions={} iota.41 = f32[1130]{0} iota(), iota_dimension=0 constant.675 = f32[] constant(1.34513271) broadcast.293 = f32[1130]{0} broadcast(constant.675), dimensions={} multiply.47 = f32[1130]{0} multiply(iota.41, broadcast.293) constant.677 = f32[] constant(-1) broadcast.290 = f32[1130]{0} broadcast(constant.677), dimensions={} add.39 = f32[1130]{0} add(multiply.47, broadcast.290) ceil.15 = f32[1130]{0} ceil(add.39) constant.676 = f32[] constant(1517) broadcast.289 = f32[1130]{0} broadcast(constant.676), dimensions={} clamp.53 = f32[1130]{0} clamp(broadcast.294, ceil.15, broadcast.289) subtract.19 = f32[1130]{0} subtract(clamp.53, multiply.47) broadcast.287 = f32[1130,3]{1,0} broadcast(subtract.19), dimensions={0} iota.39 = f32[1130,3]{1,0} iota(), iota_dimension=1 add.38 = f32[1130,3]{1,0} add(broadcast.287, iota.39) abs.7 = f32[1130,3]{1,0} abs(add.38) subtract.18 = f32[1130,3]{1,0} subtract(broadcast.295, abs.7) maximum.10 = f32[1130,3]{1,0} maximum(broadcast.296, subtract.18) ROOT reduce.4 = f32[1130]{0} reduce(maximum.10, constant.684), dimensions={1}, to_apply=add_float_.56 } fused_computation.59 { constant.532 = f32[] constant(0) broadcast.316 = f32[1130,3]{1,0} broadcast(constant.532), dimensions={} constant.663 = f32[] constant(1) broadcast.315 = f32[1130,3]{1,0} broadcast(constant.663), dimensions={} broadcast.314 = f32[1130]{0} broadcast(constant.532), dimensions={} iota.47 = f32[1130]{0} iota(), iota_dimension=0 constant.579 = f32[] constant(1.34513271) broadcast.311 = f32[1130]{0} broadcast(constant.579), dimensions={} multiply.51 = f32[1130]{0} multiply(iota.47, broadcast.311) constant.578 = f32[] constant(-1) broadcast.310 = f32[1130]{0} broadcast(constant.578), dimensions={} add.43 = f32[1130]{0} add(multiply.51, broadcast.310) ceil.17 = f32[1130]{0} ceil(add.43) constant.576 = f32[] constant(1517) broadcast.309 = f32[1130]{0} broadcast(constant.576), dimensions={} clamp.55 = f32[1130]{0} clamp(broadcast.314, ceil.17, broadcast.309) subtract.24 = f32[1130]{0} subtract(clamp.55, multiply.51) broadcast.306 = f32[1130,3]{1,0} broadcast(subtract.24), dimensions={0} iota.45 = f32[1130,3]{1,0} iota(), iota_dimension=1 add.42 = f32[1130,3]{1,0} add(broadcast.306, iota.45) abs.9 = f32[1130,3]{1,0} abs(add.42) subtract.23 = f32[1130,3]{1,0} subtract(broadcast.315, abs.9) maximum.12 = f32[1130,3]{1,0} maximum(broadcast.316, subtract.23) param_2.183 = f32[1130]{0} parameter(2) broadcast.172 = f32[1130,3]{1,0} broadcast(param_2.183), dimensions={0} divide.3 = f32[1130,3]{1,0} divide(maximum.12, broadcast.172) bitcast.53 = f32[3390]{0} bitcast(divide.3) broadcast.171 = f32[3390,1377]{1,0} broadcast(bitcast.53), dimensions={0} broadcast.276 = f32[459,3]{1,0} broadcast(constant.532), dimensions={} broadcast.275 = f32[459,3]{1,0} broadcast(constant.663), dimensions={} broadcast.274 = f32[459]{0} broadcast(constant.532), dimensions={} iota.35 = f32[459]{0} iota(), iota_dimension=0 constant.614 = f32[] constant(1.49891067) broadcast.273 = f32[459]{0} broadcast(constant.614), dimensions={} multiply.43 = f32[459]{0} multiply(iota.35, broadcast.273) broadcast.272 = f32[459]{0} broadcast(constant.578), dimensions={} add.35 = f32[459]{0} add(multiply.43, broadcast.272) ceil.13 = f32[459]{0} ceil(add.35) constant.611 = f32[] constant(685) broadcast.269 = f32[459]{0} broadcast(constant.611), dimensions={} clamp.51 = f32[459]{0} clamp(broadcast.274, ceil.13, broadcast.269) subtract.15 = f32[459]{0} subtract(clamp.51, multiply.43) broadcast.267 = f32[459,3]{1,0} broadcast(subtract.15), dimensions={0} iota.33 = f32[459,3]{1,0} iota(), iota_dimension=1 add.34 = f32[459,3]{1,0} add(broadcast.267, iota.33) abs.5 = f32[459,3]{1,0} abs(add.34) subtract.14 = f32[459,3]{1,0} subtract(broadcast.275, abs.5) maximum.8 = f32[459,3]{1,0} maximum(broadcast.276, subtract.14) param_1.177 = f32[459]{0} parameter(1) broadcast.170 = f32[459,3]{1,0} broadcast(param_1.177), dimensions={0} divide.2 = f32[459,3]{1,0} divide(maximum.8, broadcast.170) bitcast.52 = f32[1377]{0} bitcast(divide.2) broadcast.169 = f32[3390,1377]{1,0} broadcast(bitcast.52), dimensions={1} multiply.15 = f32[3390,1377]{1,0} multiply(broadcast.171, broadcast.169) bitcast.61 = f32[1130,3,459,3]{3,2,1,0} bitcast(multiply.15) transpose.68 = f32[459,1130,3,3]{2,0,3,1} transpose(bitcast.61), dimensions={2,0,3,1} copy.1 = f32[459,1130,3,3]{3,2,1,0} copy(transpose.68) bitcast.50 = f32[1130,459,9]{2,1,0} bitcast(copy.1) broadcast.168 = f32[1130,459,6,9]{3,2,1,0} broadcast(bitcast.50), dimensions={0,1,3} param_0.171 = u8[1,688,1520,6]{3,2,1,0} parameter(0) bitcast.49 = u8[688,1520,1,6]{3,1,0,2} bitcast(param_0.171) convert.175 = f32[688,1520,1,6]{3,1,0,2} convert(bitcast.49) broadcast.167 = f32[459,1130,1]{2,1,0} broadcast(clamp.51), dimensions={0} broadcast.166 = f32[459,1130,1]{2,1,0} broadcast(clamp.55), dimensions={1} concatenate.3 = f32[459,1130,2]{2,1,0} concatenate(broadcast.167, broadcast.166), dimensions={2} convert.174 = s32[459,1130,2]{2,1,0} convert(concatenate.3) bitcast.48 = s32[518670,2]{1,0} bitcast(convert.174) gather.1 = f32[518670,3,3,1,6]{2,1,4,0,3} gather(convert.175, bitcast.48), offset_dims={1,2,3,4}, collapsed_slice_dims={}, start_index_map={0,1}, index_vector_dim=1, slice_sizes={3,3,1,6} transpose.69 = f32[1,518670,6,3,3]{4,3,2,1,0} transpose(gather.1), dimensions={3,0,4,1,2} bitcast.47 = f32[1130,459,6,9]{3,2,1,0} bitcast(transpose.69) multiply.14 = f32[1130,459,6,9]{3,2,1,0} multiply(broadcast.168, bitcast.47) reduce.2 = f32[1130,459,6]{2,1,0} reduce(multiply.14, constant.532), dimensions={3}, to_apply=add_float_.56 convert.173 = f16[1130,459,6]{2,1,0} convert(reduce.2) bitcast.46 = f16[1,459,1130,6]{3,2,1,0} bitcast(convert.173) constant.533 = f16[] constant(0) pad.9 = f16[1,480,1130,6]{3,2,1,0} pad(bitcast.46, constant.533), padding=0_0x0_21x0_0x0_0 pad.8 = f16[1,480,1152,6]{3,2,1,0} pad(pad.9, constant.533), padding=0_0x0_0x0_22x0_0 constant.532f16 = f16[] constant(0) ROOT pad.7 = f16[1,485,1157,6]{3,2,1,0} pad(pad.8, constant.532f16), padding=0_0x2_3x2_3x0_0 } ENTRY e { arg0.1 = u8[1,688,1520,6]{3,2,1,0} parameter(0), parameter_replication={false} fusion.66 = f32[459]{0} fusion(), kind=kLoop, calls=fused_computation.66 fusion.67 = f32[1130]{0} fusion(), kind=kLoop, calls=fused_computation.67 ROOT fusion.59 = f16[1,485,1157,6]{2,1,3,0} fusion(arg0.1, fusion.66, fusion.67), kind=kLoop, calls=fused_computation.59 } )") .value(); EXPECT_FALSE(fusion_merger_.Run(module.get()).value()); } TEST_F(FusionMergerTest, NoMergeBecauseTooManyBasicBlockSplits) { auto module = ParseAndReturnVerifiedModule(R"( HloModule m region_6.97 { Arg_0.98 = pred[] parameter(0) Arg_1.99 = pred[] parameter(1) ROOT or.100 = pred[] or(Arg_0.98, Arg_1.99) } region_4.50 { Arg_0.51 = f64[] parameter(0) Arg_1.52 = f64[] parameter(1) ROOT add.53 = f64[] add(Arg_0.51, Arg_1.52) } f2 { param_0 = s64[1]{0} parameter(0) constant_70 = f64[] constant(0) convert.41.clone.1 = f64[1]{0} convert(param_0) ROOT pad.99.clone.1 = f64[3]{0} pad(convert.41.clone.1, constant_70), padding=0_2 } f1 { param_0.361 = pred[5]{0} parameter(0) broadcast.107 = pred[10,5]{1,0} broadcast(param_0.361), dimensions={1} param_6.244 = pred[5]{0} parameter(6) broadcast.111.clone.1 = pred[10,5]{1,0} broadcast(param_6.244), dimensions={1} param_1.450 = f64[10,5]{1,0} parameter(1) constant_294_clone_1 = f64[] constant(1) broadcast.153.clone.1 = f64[10,5]{1,0} broadcast(constant_294_clone_1), dimensions={} compare.22.clone.1 = pred[10,5]{1,0} compare(param_1.450, broadcast.153.clone.1), direction=GE constant_75_clone_1 = f64[] constant(-1) broadcast.109.clone.1 = f64[10,5]{1,0} broadcast(constant_75_clone_1), dimensions={} add.34.clone.1 = f64[10,5]{1,0} add(param_1.450, broadcast.109.clone.1) param_5.322 = f64[10,5,4]{1,0,2} parameter(5) slice.45.clone.1 = f64[10,5,1]{1,0,2} slice(param_5.322), slice={[0:10], [0:5], [3:4]} bitcast.94.clone.1 = f64[10,5]{1,0} bitcast(slice.45.clone.1) divide.7.clone.1 = f64[10,5]{1,0} divide(add.34.clone.1, bitcast.94.clone.1) add.33.clone.1 = f64[10,5]{1,0} add(divide.7.clone.1, broadcast.153.clone.1) constant_70 = f64[] constant(0) broadcast.157.clone.1 = f64[10,5]{1,0} broadcast(constant_70), dimensions={} compare.26.clone.1 = pred[10,5]{1,0} compare(param_1.450, broadcast.157.clone.1), direction=LE slice.46.clone.1 = f64[10,5,1]{1,0,2} slice(param_5.322), slice={[0:10], [0:5], [0:1]} bitcast.93.clone.1 = f64[10,5]{1,0} bitcast(slice.46.clone.1) divide.6.clone.1 = f64[10,5]{1,0} divide(param_1.450, bitcast.93.clone.1) broadcast.295.clone.1 = f64[10,5,3]{1,0,2} broadcast(param_1.450), dimensions={0,1} param_4.368 = f64[10,5,2]{1,0,2} parameter(4) pad.103.clone.1 = f64[10,5,3]{1,0,2} pad(param_4.368, constant_70), padding=0_0x0_0x1_0 compare.121.clone.1 = pred[10,5,3]{1,0,2} compare(broadcast.295.clone.1, pad.103.clone.1), direction=GE pad.102.clone.1 = f64[10,5,3]{1,0,2} pad(param_4.368, constant_294_clone_1), padding=0_0x0_0x0_1 compare.120.clone.1 = pred[10,5,3]{1,0,2} compare(broadcast.295.clone.1, pad.102.clone.1), direction=LT and.39.clone.1 = pred[10,5,3]{1,0,2} and(compare.121.clone.1, compare.120.clone.1) transpose.9 = pred[3,10,5]{2,1,0} transpose(and.39.clone.1), dimensions={2,0,1} constant_296_clone_1 = pred[] constant(false) reduce.91.clone.1 = pred[10,5]{1,0} reduce(transpose.9, constant_296_clone_1), dimensions={0}, to_apply=region_6.97 broadcast.294.clone.1 = pred[10,5,3]{1,0,2} broadcast(reduce.91.clone.1), dimensions={0,1} pad.99.clone.1 = f64[3]{0} parameter(3) broadcast.292.clone.1 = f64[3]{0} broadcast(constant_70), dimensions={} compare.117.clone.1 = pred[3]{0} compare(pad.99.clone.1, broadcast.292.clone.1), direction=NE broadcast.290.clone.1 = pred[10,5,3]{1,0,2} broadcast(compare.117.clone.1), dimensions={2} select.67.clone.1 = pred[10,5,3]{1,0,2} select(broadcast.294.clone.1, and.39.clone.1, broadcast.290.clone.1) convert.40.clone.1 = f64[10,5,3]{1,0,2} convert(select.67.clone.1) broadcast.288.clone.1 = f64[10,5,3,3]{1,0,2,3} broadcast(convert.40.clone.1), dimensions={0,1,2} param_2.361 = f64[10,5,4,3]{1,0,2,3} parameter(2) slice.114.clone.1 = f64[10,5,3,3]{1,0,2,3} slice(param_2.361), slice={[0:10], [0:5], [1:4], [0:3]} multiply.53.clone.1 = f64[10,5,3,3]{1,0,2,3} multiply(broadcast.288.clone.1, slice.114.clone.1) transpose.10 = f64[3,3,10,5]{3,2,1,0} transpose(multiply.53.clone.1), dimensions={3,2,0,1} reduce.90.clone.1 = f64[3,10,5]{2,1,0} reduce(transpose.10, constant_70), dimensions={1}, to_apply=region_4.50 transpose.11 = f64[10,5,3]{1,0,2} transpose(reduce.90.clone.1), dimensions={1,2,0} slice.28.clone.1 = f64[10,5,1]{1,0,2} slice(transpose.11), slice={[0:10], [0:5], [0:1]} bitcast.99.clone.1 = f64[10,5]{1,0} bitcast(slice.28.clone.1) slice.108.clone.1 = f64[10,5,3,3]{1,0,2,3} slice(param_2.361), slice={[0:10], [0:5], [0:3], [0:3]} multiply.49.clone.1 = f64[10,5,3,3]{1,0,2,3} multiply(broadcast.288.clone.1, slice.108.clone.1) transpose.12 = f64[3,3,10,5]{3,2,1,0} transpose(multiply.49.clone.1), dimensions={3,2,0,1} reduce.82.clone.1 = f64[3,10,5]{2,1,0} reduce(transpose.12, constant_70), dimensions={1}, to_apply=region_4.50 transpose.13 = f64[10,5,3]{1,0,2} transpose(reduce.82.clone.1), dimensions={1,2,0} slice.107.clone.1 = f64[10,5,1]{1,0,2} slice(transpose.13), slice={[0:10], [0:5], [0:1]} bitcast.240.clone.1 = f64[10,5]{1,0} bitcast(slice.107.clone.1) subtract.27.clone.1 = f64[10,5]{1,0} subtract(bitcast.99.clone.1, bitcast.240.clone.1) slice.27.clone.1 = f64[10,5,1]{1,0,2} slice(transpose.13), slice={[0:10], [0:5], [2:3]} bitcast.98.clone.1 = f64[10,5]{1,0} bitcast(slice.27.clone.1) slice.26.clone.1 = f64[10,5,1]{1,0,2} slice(transpose.11), slice={[0:10], [0:5], [2:3]} bitcast.97.clone.1 = f64[10,5]{1,0} bitcast(slice.26.clone.1) add.36.clone.1 = f64[10,5]{1,0} add(bitcast.97.clone.1, bitcast.98.clone.1) slice.24.clone.1 = f64[10,5,1]{1,0,2} slice(transpose.11), slice={[0:10], [0:5], [1:2]} bitcast.95.clone.1 = f64[10,5]{1,0} bitcast(slice.24.clone.1) slice.121.clone.1 = f64[10,5,1]{1,0,2} slice(transpose.13), slice={[0:10], [0:5], [1:2]} bitcast.274.clone.1 = f64[10,5]{1,0} bitcast(slice.121.clone.1) subtract.26.clone.1 = f64[10,5]{1,0} subtract(bitcast.95.clone.1, bitcast.274.clone.1) divide.21 = f64[10,5]{1,0} divide(subtract.26.clone.1, subtract.27.clone.1) constant_77_clone_1 = f64[] constant(2) broadcast.117.clone.1 = f64[10,5]{1,0} broadcast(constant_77_clone_1), dimensions={} multiply.37.clone.1 = f64[10,5]{1,0} multiply(divide.21, broadcast.117.clone.1) subtract.25.clone.1 = f64[10,5]{1,0} subtract(add.36.clone.1, multiply.37.clone.1) subtract.24.clone.1 = f64[10,5]{1,0} subtract(param_1.450, bitcast.274.clone.1) divide.9.clone.1 = f64[10,5]{1,0} divide(subtract.24.clone.1, subtract.26.clone.1) clamp.7.clone.1 = f64[10,5]{1,0} clamp(broadcast.157.clone.1, divide.9.clone.1, broadcast.153.clone.1) multiply.36.clone.1 = f64[10,5]{1,0} multiply(subtract.25.clone.1, clamp.7.clone.1) subtract.23.clone.1 = f64[10,5]{1,0} subtract(bitcast.98.clone.1, multiply.36.clone.1) compare.13.clone.1 = pred[10,5]{1,0} compare(subtract.23.clone.1, broadcast.157.clone.1), direction=GE negate.19.clone.1 = f64[10,5]{1,0} negate(divide.21) multiply.35.clone.1 = f64[10,5]{1,0} multiply(negate.19.clone.1, clamp.7.clone.1) multiply.34.clone.1 = f64[10,5]{1,0} multiply(multiply.35.clone.1, broadcast.117.clone.1) negate.18.clone.1 = f64[10,5]{1,0} negate(subtract.23.clone.1) multiply.33.clone.1 = f64[10,5]{1,0} multiply(subtract.23.clone.1, subtract.23.clone.1) subtract.22.clone.1 = f64[10,5]{1,0} subtract(divide.21, subtract.23.clone.1) constant_78_clone_1 = f64[] constant(4) broadcast.113.clone.1 = f64[10,5]{1,0} broadcast(constant_78_clone_1), dimensions={} multiply.32.clone.1 = f64[10,5]{1,0} multiply(subtract.22.clone.1, broadcast.113.clone.1) multiply.31.clone.1 = f64[10,5]{1,0} multiply(multiply.32.clone.1, multiply.35.clone.1) subtract.21.clone.1 = f64[10,5]{1,0} subtract(multiply.33.clone.1, multiply.31.clone.1) compare.12.clone.1 = pred[10,5]{1,0} compare(subtract.21.clone.1, broadcast.157.clone.1), direction=GT constant_79_clone_1 = f64[] constant(2.2250738585072014e-308) broadcast.112.clone.1 = f64[10,5]{1,0} broadcast(constant_79_clone_1), dimensions={} maximum.18.clone.1 = f64[10,5]{1,0} maximum(broadcast.112.clone.1, subtract.21.clone.1) sqrt.1.clone.1 = f64[10,5]{1,0} sqrt(maximum.18.clone.1) select.47.clone.1 = f64[10,5]{1,0} select(compare.12.clone.1, sqrt.1.clone.1, broadcast.157.clone.1) add.35.clone.1 = f64[10,5]{1,0} add(negate.18.clone.1, select.47.clone.1) select.46.clone.1 = f64[10,5]{1,0} select(compare.13.clone.1, multiply.34.clone.1, add.35.clone.1) subtract.20.clone.1 = f64[10,5]{1,0} subtract(negate.18.clone.1, select.47.clone.1) multiply.30.clone.1 = f64[10,5]{1,0} multiply(subtract.22.clone.1, broadcast.117.clone.1) select.45.clone.1 = f64[10,5]{1,0} select(compare.13.clone.1, subtract.20.clone.1, multiply.30.clone.1) divide.8.clone.1 = f64[10,5]{1,0} divide(select.46.clone.1, select.45.clone.1) clamp.6.clone.1 = f64[10,5]{1,0} clamp(broadcast.157.clone.1, divide.8.clone.1, broadcast.153.clone.1) multiply.29.clone.1 = f64[10,5]{1,0} multiply(subtract.27.clone.1, clamp.6.clone.1) add.32.clone.1 = f64[10,5]{1,0} add(multiply.29.clone.1, bitcast.240.clone.1) select.44.clone.1 = f64[10,5]{1,0} select(compare.26.clone.1, divide.6.clone.1, add.32.clone.1) select.43.clone.1 = f64[10,5]{1,0} select(compare.22.clone.1, add.33.clone.1, select.44.clone.1) select.42.clone.1 = f64[10,5]{1,0} select(broadcast.111.clone.1, param_1.450, select.43.clone.1) select.41 = f64[10,5]{1,0} select(broadcast.107, select.42.clone.1, broadcast.157.clone.1) ROOT tuple.14 = (f64[10,5]{1,0}, f64[10,5]{1,0}, f64[10,5]{1,0}, f64[10,5]{1,0}, f64[10,5]{1,0}, f64[10,5]{1,0}, f64[10,5]{1,0}, f64[10,5]{1,0}) tuple(select.41, select.42.clone.1, clamp.6.clone.1, subtract.25.clone.1, bitcast.97.clone.1, multiply.37.clone.1, bitcast.98.clone.1, divide.21) } ENTRY e { p3 = s64[1]{0} parameter(3) f2 = f64[3]{0} fusion(p3), kind=kLoop, calls=f2 p0 = pred[5]{0} parameter(0) p1 = f64[10,5]{1,0} parameter(1) p2 = f64[10,5,4,3]{1,0,2,3} parameter(2) p4 = f64[10,5,2]{1,0,2} parameter(4) p5 = f64[10,5,4]{1,0,2} parameter(5) p6 = pred[5]{0} parameter(6) ROOT ret = (f64[10,5]{1,0}, f64[10,5]{1,0}, f64[10,5]{1,0}, f64[10,5]{1,0}, f64[10,5]{1,0}, f64[10,5]{1,0}, f64[10,5]{1,0}, f64[10,5]{1,0}) fusion(p0, p1, p2, f2, p4, p5, p6), kind=kLoop, calls=f1 } )") .value(); auto& debug_options = module->mutable_config().mutable_debug_options(); debug_options.set_xla_gpu_mlir_emitter_level(3); EXPECT_FALSE(fusion_merger_.Run(module.get()).value()); } TEST_F(FusionMergerTest, CommonElementwiseUsedParameter) { auto module = ParseAndReturnVerifiedModule(R"( HloModule m p { p0 = f32[10000000] parameter(0) p1 = f32[10000000] parameter(1) p2 = f32[10000000] parameter(2) p3 = f32[10000000] parameter(3) a0 = f32[10000000] add(p1, p2) a1 = f32[10000000] add(a0, p3) ROOT _ = add(p0, a1) } c1 { p0 = f32[10000000] parameter(0) p1 = f32[10000000] parameter(1) ROOT _ = add(p0, p1) } c2 { p0 = f32[10000000] parameter(0) p1 = f32[10000000] parameter(1) ROOT _ = multiply(p0, p1) } ENTRY entry { p0 = f32[10000000] parameter(0) p1 = f32[10000000] parameter(1) p2 = f32[10000000] parameter(2) p3 = f32[10000000] parameter(3) f = f32[10000000] fusion(p0, p1, p2, p3), kind=kLoop, calls=p f1 = f32[10000000] fusion(p0, f), kind=kLoop, calls=c1 f2 = f32[10000000] fusion(p1, f), kind=kLoop, calls=c2 ROOT _ = (f32[10000000], f32[10000000]) tuple(f1, f2) } )") .value(); EXPECT_TRUE(fusion_merger_.Run(module.get()).value()); } TEST_F(FusionMergerTest, IncompatibleNonTrivialHeroes) { auto module = ParseAndReturnVerifiedModule(R"( HloModule module fused_computation { param_0.1 = f32[18,16,32]{2,1,0} parameter(0) param_1.1 = f32[32,16,18]{2,1,0} parameter(1) s.1 = f32[18,16,32]{2,1,0} sqrt(param_0.1) t.1 = f32[32,16,18]{2,1,0} transpose(s.1), dimensions={2,1,0} sub.1 = f32[32,16,18]{2,1,0} subtract(t.1, param_1.1) exp.1 = f32[32,16,18]{2,1,0} exponential(sub.1) ROOT add.1 = f32[32,16,18]{2,1,0} add(exp.1, exp.1) } fused_computation.2 { param_0.2 = f32[32,16,18]{2,1,0} parameter(0) s.2 = f32[32,16,18]{2,1,0} sqrt(param_0.2) ROOT t.2 = f32[32,18,16]{2,1,0} transpose(s.2), dimensions={0,2,1} } ENTRY main { p = f32[18,16,32]{2,1,0} parameter(0) p2 = f32[32,16,18]{2,1,0} parameter(1) fusion = f32[32,16,18]{2,1,0} fusion(p, p2), kind=kLoop, calls=fused_computation ROOT fusion2 = f32[32,18,16]{2,1,0} fusion(fusion), kind=kInput, calls=fused_computation.2 } )") .value(); EXPECT_FALSE(fusion_merger_.Run(module.get()).value()); } TEST_F(FusionMergerTest, DoNotMergeDUSFusions) { auto module = ParseAndReturnVerifiedModule(R"( HloModule module %fused_computation (param_0: f32[8], param_1.2: f32[], param_2.3: f32[8]) -> f32[8] { %param_0 = f32[8]{0} parameter(0) %param_2.3 = f32[8]{0} parameter(2) %slice.2 = f32[5]{0} slice(f32[8]{0} %param_2.3), slice={[0:5]} %param_1.2 = f32[] parameter(1) %broadcast.2 = f32[5]{0} broadcast(f32[] %param_1.2), dimensions={} %add.2 = f32[5]{0} add(f32[5]{0} %slice.2, f32[5]{0} %broadcast.2) %two.1 = s32[] constant(2) ROOT %dynamic-update-slice.2 = f32[8]{0} dynamic-update-slice(f32[8]{0} %param_0, f32[5]{0} %add.2, s32[] %two.1) } %fused_computation.1 (param_0.1: f32[8], param_1.4: f32[6], param_2.6: f32[]) -> f32[8] { %param_0.1 = f32[8]{0} parameter(0) %param_1.4 = f32[6]{0} parameter(1) %param_2.6 = f32[] parameter(2) %broadcast.3 = f32[6]{0} broadcast(f32[] %param_2.6), dimensions={} %add.3 = f32[6]{0} add(f32[6]{0} %param_1.4, f32[6]{0} %broadcast.3) %three.1 = s32[] constant(3) ROOT %dynamic-update-slice.3 = f32[8]{0} dynamic-update-slice(f32[8]{0} %param_0.1, f32[6]{0} %add.3, s32[] %three.1) } ENTRY %Test (parameter: f32[8]) -> f32[8] { %parameter = f32[8]{0} parameter(0) %slice.1 = f32[6]{0} slice(f32[8]{0} %parameter), slice={[0:6]} %one = f32[] constant(1) %fusion.1 = f32[8]{0} fusion(f32[8]{0} %parameter, f32[6]{0} %slice.1, f32[] %one), kind=kLoop, calls=%fused_computation.1 ROOT %fusion = f32[8]{0} fusion(f32[8]{0} %fusion.1, f32[] %one, f32[8]{0} %parameter), kind=kLoop, calls=%fused_computation } )") .value(); EXPECT_FALSE(fusion_merger_.Run(module.get()).value()); } TEST_F(FusionMergerTest, MergeDUSFusionWithElementwiseFusion) { auto module = ParseAndReturnVerifiedModule(R"( HloModule module %fused_computation { %param_0 = f32[1,8]{1,0} parameter(0) %bitcast = f32[8]{0} bitcast(%param_0) ROOT %neg = f32[8]{0} negate(%bitcast) } %fused_computation.1 { %param_0.1 = f32[8]{0} parameter(0) %param_1.4 = f32[5]{0} parameter(1) %three.1 = s32[] constant(3) %exp = f32[5]{0} exponential(%param_1.4) ROOT %dynamic-update-slice.3 = f32[8]{0} dynamic-update-slice(f32[8]{0} %param_0.1, f32[5]{0} %exp, s32[] %three.1) } ENTRY %Test { %parameter = f32[5]{0} parameter(0) %parameter.1 = f32[1,8]{1,0} parameter(1) %fusion = f32[8]{0} fusion(f32[1,8]{1,0} %parameter.1), kind=kLoop, calls=%fused_computation ROOT %fusion.1 = f32[8]{0} fusion(f32[8]{0} %fusion, f32[5]{0} %parameter), kind=kLoop, calls=%fused_computation.1 } )") .value(); EXPECT_TRUE(fusion_merger_.Run(module.get()).value()); } TEST_F(FusionMergerTest, DoNotMergeTwoReduces) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test_module add { p0 = f32[] parameter(0) p1 = f32[] parameter(1) ROOT add.13235 = f32[] add(p0, p1) } ENTRY main { p0 = f32[8,4,128,226]{3,2,1,0} parameter(0) c0 = f32[] constant(0) r0 = f32[8,4,128]{2,1,0} reduce(p0, c0), dimensions={3}, to_apply=add ROOT r1 = f32[8,4]{1,0} reduce(r0, c0), dimensions={2}, to_apply=add } )") .value(); EXPECT_FALSE(fusion_merger_.Run(module.get()).value()); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/fusion_merger.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/fusion_merger_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
e065e452-5515-4f94-a09b-a7f41b0f0597
cpp
tensorflow/tensorflow
gemv_rewriter
third_party/xla/xla/service/gpu/transforms/gemv_rewriter.cc
third_party/xla/xla/service/gpu/transforms/gemv_rewriter_test.cc
#include "xla/service/gpu/transforms/gemv_rewriter.h" #include <cstdint> #include <vector> #include "absl/container/flat_hash_set.h" #include "absl/container/inlined_vector.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/hlo/ir/dfs_hlo_visitor_with_default.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/layout.h" #include "xla/layout_util.h" #include "xla/shape.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { absl::StatusOr<Layout> GetLayoutWithNewMinorMostDimension( const Layout& layout) { if (!LayoutUtil::IsMonotonicWithDim0Major(layout)) { return absl::InvalidArgumentError("Layout is not normalized."); } return LayoutUtil::MakeDescendingLayout(layout.minor_to_major_size() + 1); } class GemvRewriterVisitor : public DfsHloRewriteVisitor { public: absl::Status HandleDot(HloInstruction* instr) override { HloDotInstruction* dot = Cast<HloDotInstruction>(instr); const DotDimensionNumbers& dim_numbers = dot->dot_dimension_numbers(); HloInstruction* lhs = dot->mutable_operand(0); HloInstruction* rhs = dot->mutable_operand(1); bool lhs_has_non_contracting_dim = lhs->shape().rank() == dim_numbers.lhs_batch_dimensions_size() + dim_numbers.lhs_contracting_dimensions_size() + 1; bool rhs_has_non_contracting_dim = rhs->shape().rank() == dim_numbers.rhs_batch_dimensions_size() + dim_numbers.rhs_contracting_dimensions_size() + 1; if (lhs_has_non_contracting_dim && rhs_has_non_contracting_dim) { return absl::OkStatus(); } if (!lhs_has_non_contracting_dim && !rhs_has_non_contracting_dim) { return absl::OkStatus(); } if (dot->shape().is_dynamic()) { return absl::OkStatus(); } changed_ = true; HloComputation* computation = dot->parent(); HloInstruction* new_lhs = lhs; if (!lhs_has_non_contracting_dim) { const Shape& lhs_shape = lhs->shape(); absl::Span<const int64_t> lhs_dimensions = lhs_shape.dimensions(); std::vector<int64_t> new_lhs_dimensions(lhs_dimensions.begin(), lhs_dimensions.end()); new_lhs_dimensions.push_back(1); Shape new_lhs_shape( lhs_shape.element_type(), new_lhs_dimensions, absl::InlinedVector<bool, 4>(new_lhs_dimensions.size(), false), {}); TF_ASSIGN_OR_RETURN( *new_lhs_shape.mutable_layout(), GetLayoutWithNewMinorMostDimension(lhs_shape.layout())); new_lhs = computation->AddInstruction( HloInstruction::CreateBitcast(new_lhs_shape, lhs)); } HloInstruction* new_rhs = rhs; if (!rhs_has_non_contracting_dim) { const Shape& rhs_shape = rhs->shape(); absl::Span<const int64_t> rhs_dimensions = rhs_shape.dimensions(); std::vector<int64_t> new_rhs_dimensions(rhs_dimensions.begin(), rhs_dimensions.end()); new_rhs_dimensions.push_back(1); Shape new_rhs_shape( rhs_shape.element_type(), new_rhs_dimensions, absl::InlinedVector<bool, 4>(new_rhs_dimensions.size(), false), {}); TF_ASSIGN_OR_RETURN( *new_rhs_shape.mutable_layout(), GetLayoutWithNewMinorMostDimension(rhs_shape.layout())); new_rhs = computation->AddInstruction( HloInstruction::CreateBitcast(new_rhs_shape, rhs)); } std::vector<int64_t> new_out_dimensions; new_out_dimensions.reserve(dot->shape().dimensions().size() + 1); for (int64_t dim_size : dot->shape().dimensions()) { new_out_dimensions.push_back(dim_size); } if (!lhs_has_non_contracting_dim) { int non_contracting_dim_size = new_out_dimensions.back(); new_out_dimensions[new_out_dimensions.size() - 1] = 1; new_out_dimensions.push_back(non_contracting_dim_size); } else { new_out_dimensions.push_back(1); } Shape new_out_shape( dot->shape().element_type(), new_out_dimensions, absl::InlinedVector<bool, 4>(new_out_dimensions.size(), false), {}); TF_ASSIGN_OR_RETURN( *new_out_shape.mutable_layout(), GetLayoutWithNewMinorMostDimension(dot->shape().layout())); HloInstruction* new_dot = computation->AddInstruction(HloInstruction::CreateDot( new_out_shape, new_lhs, new_rhs, dot->dot_dimension_numbers(), dot->precision_config())); HloInstruction* bitcast = computation->AddInstruction( HloInstruction::CreateBitcast(dot->shape(), new_dot)); return computation->ReplaceInstruction(dot, bitcast); } bool changed() const { return changed_; } private: bool changed_ = false; }; } absl::StatusOr<bool> GemvRewriter::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { GemvRewriterVisitor gemv_rewriter; for (HloComputation* computation : module->MakeNonfusionComputations(execution_threads)) { TF_RETURN_IF_ERROR(computation->Accept(&gemv_rewriter)); } return gemv_rewriter.changed(); } } }
#include "xla/service/gpu/transforms/gemv_rewriter.h" #include <memory> #include <optional> #include <gtest/gtest.h> #include "absl/status/statusor.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/statusor.h" namespace xla::gpu { namespace { class GemvRewriterTest : public HloTestBase {}; TEST_F(GemvRewriterTest, RewriteMatrixVectorMultiplicationToGemm) { const char* hlo = R"( HloModule m ENTRY e { p0 = f32[32,7] parameter(0) p1 = f32[7] parameter(1) ROOT d = f32[32] dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} })"; const char* expected = R"() })"; RunAndFilecheckHloRewrite(hlo, GemvRewriter(), expected); } TEST_F(GemvRewriterTest, RewriteVectorMatrixMultiplicationToGemm) { const char* hlo = R"( HloModule m ENTRY e { p0 = f32[7] parameter(0) p1 = f32[7,32] parameter(1) ROOT d = f32[32] dot(p0, p1), lhs_contracting_dims={0}, rhs_contracting_dims={0} })"; const char* expected = R"() })"; RunAndFilecheckHloRewrite(hlo, GemvRewriter(), expected); } TEST_F(GemvRewriterTest, RewriteMatrixVectorMultiplicationWithBatch) { const char* hlo = R"( HloModule m ENTRY e { p0 = f32[2,5,32,7] parameter(0) p1 = f32[2,5,7] parameter(1) ROOT d = f32[2,5,32] dot(p0, p1), lhs_batch_dims={0,1}, rhs_batch_dims={0,1}, lhs_contracting_dims={3}, rhs_contracting_dims={2} })"; const char* expected = R"() })"; RunAndFilecheckHloRewrite(hlo, GemvRewriter(), expected); } TEST_F(GemvRewriterTest, DotNotRewriteVectorVectorMultiplication) { const char* hlo = R"( HloModule m ENTRY e { p0 = f32[7] parameter(0) p1 = f32[7] parameter(1) ROOT d = f32[] dot(p0, p1), lhs_contracting_dims={0}, rhs_contracting_dims={0} })"; RunAndFilecheckHloRewrite(hlo, GemvRewriter(), std::nullopt); } TEST_F(GemvRewriterTest, DotNotRewriteMatrixMatrixMultiplication) { const char* hlo = R"( HloModule m ENTRY e { p0 = f32[5,7] parameter(0) p1 = f32[7,32] parameter(1) ROOT d = f32[5,32] dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} })"; RunAndFilecheckHloRewrite(hlo, GemvRewriter(), std::nullopt); } TEST_F(GemvRewriterTest, DoNotRewriteDotsWithNonNormalizedLayout) { const char* hlo = R"( HloModule m ENTRY e { p0 = f32[5,32,7]{2,1,0} parameter(0) p1 = f32[5,7]{0,1} parameter(1) ROOT d = f32[5,32]{0,1} dot(p0, p1), lhs_batch_dims={0}, rhs_batch_dims={0}, lhs_contracting_dims={2}, rhs_contracting_dims={1} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo)); GemvRewriter rewriter; absl::StatusOr<bool> result = this->RunHloPass(&rewriter, module.get()); EXPECT_FALSE(result.ok()); EXPECT_EQ(result.status().message(), "Layout is not normalized."); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/gemv_rewriter.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/gemv_rewriter_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
822ba1e5-6daf-48a0-96e7-6ab374d53667
cpp
tensorflow/tensorflow
sanitize_constant_names
third_party/xla/xla/service/gpu/transforms/sanitize_constant_names.cc
third_party/xla/xla/service/gpu/transforms/sanitize_constant_names_test.cc
#include "xla/service/gpu/transforms/sanitize_constant_names.h" #include <string> #include "absl/container/flat_hash_set.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/llvm_ir/buffer_assignment_util.h" #include "xla/service/name_uniquer.h" #include "tsl/platform/logging.h" namespace xla { namespace gpu { absl::StatusOr<bool> SanitizeConstantNames::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool changed = false; NameUniquer instr_name_uniquer("_"); for (HloComputation* computation : module->computations(execution_threads)) { for (HloInstruction* instr : computation->instructions()) { if (instr->opcode() == HloOpcode::kConstant) { continue; } instr_name_uniquer.GetUniqueName(instr->name()); } } for (HloComputation* computation : module->computations(execution_threads)) { for (HloInstruction* instr : computation->instructions()) { if (instr->opcode() != HloOpcode::kConstant) { continue; } std::string sanitized_name = llvm_ir::SanitizeConstantName(*instr); instr->SetAndSanitizeName(sanitized_name); instr->UniquifyName(&instr_name_uniquer); module->instruction_name_uniquer().GetUniqueName(instr->name()); changed = true; } } return changed; } } }
#include "xla/service/gpu/transforms/sanitize_constant_names.h" #include <cstdint> #include <memory> #include <utility> #include "xla/hlo/ir/hlo_instruction.h" #include "xla/literal_util.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/statusor.h" #include "tsl/platform/test.h" namespace xla { namespace gpu { namespace { namespace m = ::xla::match; using SanitizeConstantNamesTest = HloTestBase; TEST_F(SanitizeConstantNamesTest, InstructionNameWithHyphenSanitized) { const char *const kHloString = R"( HloModule HyphenInInstructionName ENTRY kernelEntry { ROOT equal-to = s32[2]{0} constant({42, 73}) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloString)); EXPECT_TRUE(SanitizeConstantNames().Run(module.get()).value()); HloInstruction *root = module->entry_computation()->root_instruction(); EXPECT_EQ(root->name(), "equal_to"); } TEST_F(SanitizeConstantNamesTest, InstructionNameWithDotSanitized) { const char *const kHloString = R"( HloModule HyphenInInstructionName ENTRY kernelEntry { ROOT equal.to = s32[2]{0} constant({42, 73}) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloString)); EXPECT_TRUE(SanitizeConstantNames().Run(module.get()).value()); HloInstruction *root = module->entry_computation()->root_instruction(); EXPECT_EQ(root->name(), "equal_to"); } TEST_F(SanitizeConstantNamesTest, NewInstructionNameRegisteredWithModule) { const char *const kHloString = R"( HloModule HyphenInInstructionName ENTRY kernelEntry { ROOT equal.to = s32[2]{0} constant({42, 73}) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloString)); EXPECT_TRUE(SanitizeConstantNames().Run(module.get()).value()); HloInstruction *root = module->entry_computation()->root_instruction(); EXPECT_EQ(root->name(), "equal_to"); auto constant_instr = HloInstruction::CreateConstant(LiteralUtil::CreateR0<int32_t>(1)); constant_instr->SetAndSanitizeName("equal_to"); module->entry_computation()->AddInstruction(std::move(constant_instr)); EXPECT_THAT(FindInstruction(module.get(), "equal_to.1"), GmockMatch(m::Constant())); } TEST_F(SanitizeConstantNamesTest, BufferSanitizedNameCollisionResolved) { const char *const kHloString = R"( HloModule BufferSanitizedName ENTRY kernelEntry { equal.to = s32[2]{0} constant({42, 73}) equal-to = s32[2]{0} constant({67, 3}) ROOT equal_to = s32[2]{0} add(equal.to, equal-to) })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloString)); EXPECT_TRUE(SanitizeConstantNames().Run(module.get()).value()); EXPECT_THAT(FindInstruction(module.get(), "equal_to_1"), GmockMatch(m::Constant())); EXPECT_THAT(FindInstruction(module.get(), "equal_to_2"), GmockMatch(m::Constant())); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/sanitize_constant_names.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/sanitize_constant_names_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
c59397db-329b-4522-9467-0a871a153527
cpp
tensorflow/tensorflow
all_gather_dynamic_slice_simplifier
third_party/xla/xla/service/gpu/transforms/all_gather_dynamic_slice_simplifier.cc
third_party/xla/xla/service/gpu/transforms/all_gather_dynamic_slice_simplifier_test.cc
#include "xla/service/gpu/transforms/all_gather_dynamic_slice_simplifier.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/service/collective_opt_utils.h" namespace xla { bool AllGatherDynamicSliceSimplifier::InstructionMatchesPattern( HloInstruction* instruction) { if (instruction->opcode() != HloOpcode::kDynamicSlice) { return false; } HloDynamicSliceInstruction* dynamic_slice = Cast<HloDynamicSliceInstruction>(instruction); HloInstruction* operand = dynamic_slice->mutable_operand(0); bool is_reshape = operand->opcode() == HloOpcode::kReshape; bool is_all_gather = operand->opcode() == HloOpcode::kAllGather; if (!is_reshape && !is_all_gather) { return false; } if (is_reshape && operand->operand(0)->opcode() != HloOpcode::kAllGather) { return false; } const HloModuleConfig& config = instruction->GetModule()->config(); HloAllGatherInstruction* all_gather = is_reshape ? Cast<HloAllGatherInstruction>(operand->mutable_operand(0)) : Cast<HloAllGatherInstruction>(operand); bool match = AllGatherDynamicSliceCancellation( all_gather, config.num_partitions(), config.replica_count(), true, true, 1, HloPredicateIsOp<HloOpcode::kPartitionId>, HloPredicateIsOp<HloOpcode::kReplicaId>, false, true); return match; } absl::StatusOr<HloInstruction*> AllGatherDynamicSliceSimplifier::ExpandInstruction( HloInstruction* instruction) { HloDynamicSliceInstruction* dynamic_slice = Cast<HloDynamicSliceInstruction>(instruction); HloInstruction* operand = dynamic_slice->mutable_operand(0); if (operand->opcode() != HloOpcode::kReshape) { return operand->mutable_operand(0); } HloReshapeInstruction* reshape = Cast<HloReshapeInstruction>(operand); HloAllGatherInstruction* all_gather = Cast<HloAllGatherInstruction>(reshape->mutable_operand(0)); HloInstruction* all_gather_input = all_gather->mutable_operand(0); auto* new_reshape = instruction->parent()->AddInstruction( HloInstruction::CreateReshape(dynamic_slice->shape(), all_gather_input)); return new_reshape; } }
#include "xla/service/gpu/transforms/all_gather_dynamic_slice_simplifier.h" #include <cstdint> #include <memory> #include <vector> #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/tests/hlo_test_base.h" #include "xla/xla_data.pb.h" namespace xla { namespace gpu { namespace { using ::testing::Matcher; namespace op = xla::testing::opcode_matchers; class AllGatherDynamicSliceSimplifierTest : public HloTestBase { public: absl::StatusOr<std::unique_ptr<HloModule>> RunPass( absl::string_view hlo_module, int64_t num_replicas, int64_t num_partitions, bool expect_change) { HloModuleConfig config = GetModuleConfigForTest( num_replicas, num_partitions); config.set_use_spmd_partitioning(num_partitions > 1); TF_ASSIGN_OR_RETURN(auto module, ParseAndReturnVerifiedModule(hlo_module, config)); auto changed = AllGatherDynamicSliceSimplifier().Run(module.get()); if (!changed.ok()) { return changed.status(); } EXPECT_EQ(changed.value(), expect_change); return std::move(module); } }; TEST_F(AllGatherDynamicSliceSimplifierTest, AllPartitions) { absl::string_view hlo_string = R"( HloModule AllGather ENTRY %AllGather { %param = f32[32,8,128]{2,1,0} parameter(0) %ag = f32[256,8,128]{2,1,0} all-gather(%param), replica_groups={{0,1,2,3,4,5,6,7}}, dimensions={0}, channel_id=1, use_global_device_ids=true %pid = u32[] partition-id() %pid_s32 = s32[] convert(%pid) %slice_size = s32[] constant(32) %offset = s32[] multiply(%pid_s32, %slice_size) %zero = s32[] constant(0) ROOT %ds = f32[32,8,128]{2,1,0} dynamic-slice(%ag, %offset, %zero, %zero), dynamic_slice_sizes={32,8,128} } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, 1, 8, true)); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Parameter(0)); } TEST_F(AllGatherDynamicSliceSimplifierTest, AllReplicasWithReshape) { absl::string_view hlo_string = R"( HloModule AllGather ENTRY %AllGather { %param = f32[32,8,128]{2,1,0} parameter(0) %ag = f32[256,8,128]{2,1,0} all-gather(%param), replica_groups={{0,1,2,3,4,5,6,7}}, dimensions={0}, channel_id=1, use_global_device_ids=true %reshape = f32[256,8,64,2]{3,2,1,0} reshape(%ag) %pid = u32[] partition-id() %pid_s32 = s32[] convert(%pid) %slice_size = s32[] constant(32) %offset = s32[] multiply(%pid_s32, %slice_size) %zero = s32[] constant(0) ROOT %ds = f32[32,8,64,2]{3,2,1,0} dynamic-slice(%reshape, %offset, %zero, %zero, %zero), dynamic_slice_sizes={32,8,64,2} } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, 1, 8, true)); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Reshape(op::Parameter(0))); } TEST_F(AllGatherDynamicSliceSimplifierTest, AllPartitionsWithReshapeOnSliceDim) { absl::string_view hlo_string = R"( HloModule AllGather ENTRY %AllGather { %param = f32[32,8,128]{2,1,0} parameter(0) %ag = f32[256,8,128]{2,1,0} all-gather(%param), replica_groups={{0,1,2,3,4,5,6,7}}, dimensions={0}, channel_id=1, use_global_device_ids=true %reshape = f32[2048,128]{1,0} reshape(%ag) %pid = u32[] partition-id() %pid_s32 = s32[] convert(%pid) %slice_size = s32[] constant(256) %offset = s32[] multiply(%pid_s32, %slice_size) %zero = s32[] constant(0) ROOT %ds = f32[256,128]{1,0} dynamic-slice(%reshape, %offset, %zero), dynamic_slice_sizes={256,128} } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, 1, 8, false)); EXPECT_THAT(module->entry_computation()->root_instruction(), op::DynamicSlice( op::Reshape(op::AllGather(op::Parameter(0))), op::Multiply(op::Convert(op::PartitionId()), op::Constant()), op::Constant())); } TEST_F(AllGatherDynamicSliceSimplifierTest, NoAllGather) { absl::string_view hlo_string = R"( HloModule NoAllGather ENTRY %NoAllGather { %param = f32[32,8,128]{2,1,0} parameter(0) %pid = u32[] partition-id() %pid_s32 = s32[] convert(%pid) %slice_size = s32[] constant(32) %offset = s32[] multiply(%pid_s32, %slice_size) %zero = s32[] constant(0) ROOT %ds = f32[32,8,128]{2,1,0} dynamic-slice(%param, %offset, %zero, %zero), dynamic_slice_sizes={32,8,128} } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, 1, 1, false)); EXPECT_THAT(module->entry_computation()->root_instruction(), op::DynamicSlice( op::Parameter(0), op::Multiply(op::Convert(op::PartitionId()), op::Constant()), op::Constant(), op::Constant())); } TEST_F(AllGatherDynamicSliceSimplifierTest, IncorrectAllGatherDimension) { absl::string_view hlo_string = R"( HloModule IncorrectAllGatherDimension ENTRY %IncorrectAllGatherDimension { %param = f32[32,8,128]{2,1,0} parameter(0) %ag = f32[32,64,128]{2,1,0} all-gather(%param), replica_groups={}, dimensions={1}, channel_id=1 %pid = u32[] partition-id() %pid_s32 = s32[] convert(%pid) %slice_size = s32[] constant(8) %offset = s32[] multiply(%pid_s32, %slice_size) %zero = s32[] constant(0) ROOT %ds = f32[32,8,128]{2,1,0} dynamic-slice(%ag, %zero, %offset, %zero), dynamic_slice_sizes={32,8,128} } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, 8, 1, false)); EXPECT_THAT(module->entry_computation()->root_instruction(), op::DynamicSlice( op::AllGather(op::Parameter(0)), op::Constant(), op::Multiply(op::Convert(op::PartitionId()), op::Constant()), op::Constant())); } TEST_F(AllGatherDynamicSliceSimplifierTest, AllReplicasWithReshapeMultipleUsers) { absl::string_view hlo_string = R"( HloModule AllGather ENTRY %AllGather { %param = f32[32,8,128]{2,1,0} parameter(0) %ag = f32[256,8,128]{2,1,0} all-gather(%param), replica_groups={{0,1,2,3,4,5,6,7}}, dimensions={0}, channel_id=1, use_global_device_ids=true %reshape = f32[256,8,64,2]{3,2,1,0} reshape(%ag) %pid = u32[] partition-id() %pid_s32 = s32[] convert(%pid) %slice_size = s32[] constant(32) %offset = s32[] multiply(%pid_s32, %slice_size) %zero = s32[] constant(0) %ds = f32[32,8,64,2]{3,2,1,0} dynamic-slice(%reshape, %offset, %zero, %zero, %zero), dynamic_slice_sizes={32,8,64,2} ROOT %tuple = (f32[32,8,64,2]{3,2,1,0}, f32[256,8,128]{2,1,0}) tuple(%ds, %ag) } )"; TF_ASSERT_OK_AND_ASSIGN(auto module, RunPass(hlo_string, 1, 8, true)); EXPECT_THAT(module->entry_computation()->root_instruction(), op::Tuple(op::Reshape(op::Parameter(0)), op::AllGather(op::Parameter(0)))); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/all_gather_dynamic_slice_simplifier.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/all_gather_dynamic_slice_simplifier_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
7244c624-0159-4253-9a5a-0c37c152e4d2
cpp
tensorflow/tensorflow
fusion_wrapper
third_party/xla/xla/service/gpu/transforms/fusion_wrapper.cc
third_party/xla/xla/service/gpu/transforms/fusion_wrapper_test.cc
#include "xla/service/gpu/transforms/fusion_wrapper.h" #include <functional> #include "absl/container/flat_hash_set.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/gpu/gpu_fusible.h" #include "tsl/platform/errors.h" namespace xla { namespace gpu { absl::StatusOr<bool> FusionWrapper::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { auto instructions = module->entry_computation()->MakeInstructionPostOrder(); bool changed = false; std::function<absl::Status(HloInstruction*)> handle_instruction; handle_instruction = [&](HloInstruction* instruction) -> absl::Status { switch (instruction->opcode()) { case HloOpcode::kConditional: case HloOpcode::kWhile: for (auto* computation : instruction->called_computations()) { for (auto* inner_instruction : computation->MakeInstructionPostOrder()) { TF_RETURN_IF_ERROR(handle_instruction(inner_instruction)); } } break; case HloOpcode::kAbs: case HloOpcode::kAdd: case HloOpcode::kAnd: case HloOpcode::kAtan2: case HloOpcode::kBitcastConvert: case HloOpcode::kBroadcast: case HloOpcode::kCeil: case HloOpcode::kCbrt: case HloOpcode::kClamp: case HloOpcode::kClz: case HloOpcode::kCompare: case HloOpcode::kComplex: case HloOpcode::kConcatenate: case HloOpcode::kConvolution: case HloOpcode::kConvert: case HloOpcode::kCopy: case HloOpcode::kCos: case HloOpcode::kDivide: case HloOpcode::kDot: case HloOpcode::kDynamicSlice: case HloOpcode::kDynamicUpdateSlice: case HloOpcode::kErf: case HloOpcode::kExp: case HloOpcode::kExpm1: case HloOpcode::kFloor: case HloOpcode::kGather: case HloOpcode::kImag: case HloOpcode::kIota: case HloOpcode::kIsFinite: case HloOpcode::kLog: case HloOpcode::kLog1p: case HloOpcode::kMap: case HloOpcode::kMaximum: case HloOpcode::kMinimum: case HloOpcode::kMultiply: case HloOpcode::kNegate: case HloOpcode::kNot: case HloOpcode::kOr: case HloOpcode::kPad: case HloOpcode::kPopulationCount: case HloOpcode::kPower: case HloOpcode::kReal: case HloOpcode::kReshape: case HloOpcode::kReduce: case HloOpcode::kReducePrecision: case HloOpcode::kReduceWindow: case HloOpcode::kRemainder: case HloOpcode::kReverse: case HloOpcode::kRoundNearestAfz: case HloOpcode::kRoundNearestEven: case HloOpcode::kRsqrt: case HloOpcode::kScatter: case HloOpcode::kSelect: case HloOpcode::kShiftLeft: case HloOpcode::kShiftRightLogical: case HloOpcode::kShiftRightArithmetic: case HloOpcode::kSign: case HloOpcode::kSin: case HloOpcode::kSlice: case HloOpcode::kSqrt: case HloOpcode::kSubtract: case HloOpcode::kStochasticConvert: case HloOpcode::kTan: case HloOpcode::kTanh: case HloOpcode::kTranspose: case HloOpcode::kXor: { auto* computation = instruction->parent(); auto* fusion_instruction = computation->AddInstruction(HloInstruction::CreateFusion( instruction->shape(), ChooseFusionKind(*instruction, *instruction), instruction)); const absl::string_view wrapped_opcode = HloOpcodeString(instruction->opcode()); module->SetAndUniquifyInstrName( fusion_instruction, absl::StrCat("wrapped_", wrapped_opcode)); module->SetAndUniquifyComputationName( fusion_instruction->fused_instructions_computation(), absl::StrCat("wrapped_", wrapped_opcode, "_computation")); if (module->has_schedule()) { module->schedule().replace_instruction(computation, instruction, fusion_instruction); } TF_RETURN_IF_ERROR( fusion_instruction->CopyAllControlDepsFrom(instruction)); TF_RETURN_IF_ERROR(instruction->DropAllControlDeps()); TF_RETURN_IF_ERROR(instruction->ReplaceAllUsesWith(fusion_instruction)); TF_RETURN_IF_ERROR(computation->RemoveInstruction(instruction)); changed = true; break; } default: break; } return absl::OkStatus(); }; for (auto* instruction : instructions) { TF_RETURN_IF_ERROR(handle_instruction(instruction)); } return changed; } } }
#include "xla/service/gpu/transforms/fusion_wrapper.h" #include <optional> #include <gtest/gtest.h> #include "xla/tests/hlo_test_base.h" namespace xla { namespace gpu { namespace { class FusionWrapperTest : public HloTestBase {}; TEST_F(FusionWrapperTest, ConvolutionWorks) { RunAndFilecheckHloRewrite(R"(HloModule TestModule ENTRY TestComputation { input = f32[1,10,1,10,5,20]{5,4,3,2,1,0} parameter(0) kernel = f32[20,1,2,1,4,15]{5,4,3,2,1,0} parameter(1) ROOT conv = f32[15,1,9,1,7,5]{5,4,3,2,1,0} convolution(input, kernel), dim_labels=0123bf_i0123o->f0123b, window={size=1x2x1x4} })", FusionWrapper(), R"( } TEST_F(FusionWrapperTest, SimpleOp) { RunAndFilecheckHloRewrite(R"( HloModule TestModule ENTRY TestComputation { p0 = f16[30,41] parameter(0) p1 = f16[30,41] parameter(1) ROOT result = f16[60, 41] concatenate(p0, p1), dimensions={0} })", FusionWrapper(), R"( } TEST_F(FusionWrapperTest, Scatter) { RunAndFilecheckHloRewrite(R"( HloModule ScatterIntoScalar update_s32 { lhs = s32[] parameter(0) ROOT rhs = s32[] parameter(1) } ENTRY main { parameter.1 = s32[] parameter(0) parameter.2 = s32[0]{0} parameter(1) parameter.3 = s32[] parameter(2) ROOT scatter_ScatterIntoScalar = s32[] scatter(parameter.1, parameter.2, parameter.3), update_window_dims={}, inserted_window_dims={}, scatter_dims_to_operand_dims={}, index_vector_dim=0, to_apply=update_s32 })", FusionWrapper(), R"( } TEST_F(FusionWrapperTest, ControlDependency) { RunAndFilecheckHloRewrite(R"( HloModule TestModule fusion { ROOT param = f32[] parameter(0) } ENTRY main { param = f32[] parameter(0) fusion = f32[] fusion(param), kind=kLoop, calls=fusion constant_one = f32[] constant(1) ROOT add = f32[] add(param, constant_one), control-predecessors={fusion} })", FusionWrapper(), R"( } TEST_F(FusionWrapperTest, While) { RunAndFilecheckHloRewrite(R"( HloModule While %body { %parameter.5 = (f32[5]{0}) parameter(0) %constant_8 = f32[] constant(0) %broadcast.9 = f32[5]{0} broadcast(f32[] %constant_8), dimensions={} ROOT %tuple.2 = (f32[5]{0}) tuple(f32[5]{0} %broadcast.9) } %cond { %parameter.12 = (f32[5]{0}) parameter(0) ROOT %constant_1 = pred[] constant(false) } ENTRY %main (parameter.1: f32[5]) -> (f32[5]) { %parameter.1 = f32[5]{0} parameter(0) %copy.3 = f32[5]{0} copy(f32[5]{0} %parameter.1) %tuple = (f32[5]{0}) tuple(f32[5]{0} %copy.3) ROOT %while.19 = (f32[5]{0}) while((f32[5]{0}) %tuple), condition=%cond, body=%body })", FusionWrapper(), R"( } TEST_F(FusionWrapperTest, WhileInFusion) { RunAndFilecheckHloRewrite(R"( HloModule While %body { %parameter.5 = (f32[5]{0}) parameter(0) %constant_8 = f32[] constant(0) %broadcast.9 = f32[5]{0} broadcast(f32[] %constant_8), dimensions={} ROOT %tuple.2 = (f32[5]{0}) tuple(f32[5]{0} %broadcast.9) } %cond { %parameter.12 = (f32[5]{0}) parameter(0) ROOT %constant_1 = pred[] constant(false) } %fusion { %parameter.1 = f32[5]{0} parameter(0) %copy.3 = f32[5]{0} copy(f32[5]{0} %parameter.1) %tuple = (f32[5]{0}) tuple(f32[5]{0} %copy.3) ROOT %while.19 = (f32[5]{0}) while((f32[5]{0}) %tuple), condition=%cond, body=%body } ENTRY %main (parameter.1: f32[5]) -> (f32[5]) { %parameter.1 = f32[5]{0} parameter(0) ROOT %fusion = (f32[5]{0}) fusion(f32[5]{0} %parameter.1), kind=kLoop, calls=%fusion })", FusionWrapper(), std::nullopt); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/fusion_wrapper.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/fusion_wrapper_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
351585fa-6996-47ca-985c-e882662a3ca9
cpp
tensorflow/tensorflow
double_buffer_loop_unrolling
third_party/xla/xla/service/gpu/transforms/double_buffer_loop_unrolling.cc
third_party/xla/xla/service/gpu/transforms/double_buffer_loop_unrolling_test.cc
#include "xla/service/gpu/transforms/double_buffer_loop_unrolling.h" #include <algorithm> #include <cmath> #include <cstdint> #include <iterator> #include <optional> #include <string> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_clone_context.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instruction_utils.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_query.h" #include "xla/service/collective_ops_utils.h" #include "xla/service/flatten_call_graph.h" #include "xla/service/hlo_parser.h" #include "xla/status_macros.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/status.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { void SetChannelIdForNewCollective(HloInstruction* new_instr, const HloModule* module) { absl::flat_hash_map<int64_t, int64_t> old_to_new_channel_id_map; absl::flat_hash_map<int64_t, HloComputation*> channel_id_comp_map; if (new_instr->IsAsynchronous() && hlo_query::IsCollectiveCommunicationOp( new_instr->async_wrapped_opcode())) { HloInstruction* wrapped_instr = DynCast<HloAsyncInstruction>(new_instr)->async_wrapped_instruction(); int64_t old_channel_id = *wrapped_instr->channel_id(); int64_t new_channel_id = old_to_new_channel_id_map[old_channel_id]; if (old_to_new_channel_id_map.find(old_channel_id) == old_to_new_channel_id_map.end()) { new_channel_id = hlo_query::NextChannelId(*module); VLOG(2) << "Generated new channel id " << new_channel_id; old_to_new_channel_id_map[old_channel_id] = new_channel_id; } VLOG(2) << "Setting channel id to " << new_channel_id; wrapped_instr->set_channel_id(new_channel_id); if (channel_id_comp_map.find(new_channel_id) == channel_id_comp_map.end()) { channel_id_comp_map[new_channel_id] = new_instr->async_wrapped_computation(); } else { channel_id_comp_map[new_channel_id]->AddAsyncStart(new_instr); } } else if (hlo_query::IsCollectiveCommunicationOp(new_instr->opcode()) || hlo_query::IsAsyncCollectiveStartOp(new_instr)) { new_instr->set_channel_id(hlo_query::NextChannelId(*module)); } } using Interval = std::pair<int64_t, int64_t>; absl::StatusOr<std::vector<Interval>> ParseVectorOfPairs( absl::string_view str) { TF_ASSIGN_OR_RETURN(std::vector<ReplicaGroup> replica_groups, ParseReplicaGroupsOnly(str)); std::vector<Interval> res; res.reserve(replica_groups.size()); for (const ReplicaGroup& replica_group : replica_groups) { TF_RET_CHECK(replica_group.replica_ids_size() == 2); int64_t a = replica_group.replica_ids(0); int64_t b = replica_group.replica_ids(1); res.emplace_back(a, b); } return res; } absl::Status SetSendRecvValidationForPeeledInstr(HloInstruction* new_instr, HloInstruction* old_instr) { TF_RET_CHECK( new_instr->opcode() == old_instr->opcode() && "cloned instruction and original instruction have different opcodes"); if (!HloPredicateIsOp<HloOpcode::kCollectivePermute, HloOpcode::kCollectivePermuteStart, HloOpcode::kSend, HloOpcode::kRecv>(old_instr)) { return absl::OkStatus(); } const auto& attribute_map = new_instr->frontend_attributes().map(); if (!attribute_map.contains(kSendRecvValidationAttr)) { return absl::OkStatus(); } VLOG(3) << "Original send-recv iterations: " << attribute_map.at(kSendRecvValidationAttr); TF_ASSIGN_OR_RETURN( auto send_recv_validation_attr, ParseVectorOfPairs(attribute_map.at(kSendRecvValidationAttr))); uint64_t n_pairs = send_recv_validation_attr.size(); if (n_pairs == 0) { return absl::OkStatus(); } std::vector<Interval> send_recv_validation_attr_updated(n_pairs, {1, 0}); for (std::uint64_t i = 0; i < send_recv_validation_attr.size(); i++) { if (send_recv_validation_attr[i].first <= 0 && send_recv_validation_attr[i].second >= 0) { send_recv_validation_attr_updated[i] = {0, 0}; } } hlo_instruction_utils::AddOrUpdateVectorOfPairsAsAttribute( new_instr, kSendRecvValidationAttr, send_recv_validation_attr_updated); return absl::OkStatus(); } absl::Status SetSendRecvValidation(HloInstruction* cp1, HloInstruction* cp2, bool is_peeled) { TF_RET_CHECK( cp2->opcode() == cp1->opcode() && "cloned instruction and original instruction have different opcodes"); if (!HloPredicateIsOp<HloOpcode::kCollectivePermute, HloOpcode::kCollectivePermuteStart, HloOpcode::kSend, HloOpcode::kRecv>(cp1)) { return absl::OkStatus(); } const auto& attribute_map = cp2->frontend_attributes().map(); if (!attribute_map.contains(kSendRecvValidationAttr)) { return absl::OkStatus(); } VLOG(3) << "Original send-recv iterations: " << attribute_map.at(kSendRecvValidationAttr); TF_ASSIGN_OR_RETURN( auto send_recv_validation_attr, ParseVectorOfPairs(attribute_map.at(kSendRecvValidationAttr))); if (send_recv_validation_attr.size() == 0) { return absl::OkStatus(); } std::vector<Interval> send_recv_iterations_new_instr1, send_recv_iterations_new_instr2; send_recv_iterations_new_instr1.reserve(send_recv_validation_attr.size()); send_recv_iterations_new_instr2.reserve(send_recv_validation_attr.size()); for (const Interval& pair : send_recv_validation_attr) { int64_t a = pair.first; int64_t b = pair.second; if (is_peeled) { send_recv_iterations_new_instr1.emplace_back( std::floor(a / 2.0), std::max(0.0, std::floor((b - 1) / 2.0))); send_recv_iterations_new_instr2.emplace_back( std::max(0.0, std::floor((a - 1) / 2.0)), std::max(0.0, std::floor((b - 2) / 2.0))); } else { send_recv_iterations_new_instr1.emplace_back(std::floor((a + 1) / 2.0), std::floor(b / 2.0)); send_recv_iterations_new_instr2.emplace_back( std::floor(a / 2.0), std::max(0.0, std::floor((b - 1) / 2.0))); } } hlo_instruction_utils::AddOrUpdateVectorOfPairsAsAttribute( cp1, kSendRecvValidationAttr, send_recv_iterations_new_instr1); hlo_instruction_utils::AddOrUpdateVectorOfPairsAsAttribute( cp2, kSendRecvValidationAttr, send_recv_iterations_new_instr2); VLOG(3) << "Updated send-recv iterations for " << cp1->name() << " : " << cp1->frontend_attributes().map().at(kSendRecvValidationAttr); VLOG(3) << "Updated send-recv iterations for " << cp2->name() << " : " << cp2->frontend_attributes().map().at(kSendRecvValidationAttr); return absl::OkStatus(); } absl::Status HandleControlDependencies( const HloComputation* while_body, const absl::flat_hash_map<HloInstruction*, HloInstruction*>& old_to_new_map, HloInstruction::InstructionVector* old_loop_roots, HloInstruction* input_parameter, const absl::flat_hash_set<HloInstruction*>& skip_control_dep_injection) { for (HloInstruction* old_instr : while_body->MakeInstructionPostOrder()) { if (old_to_new_map.find(old_instr) != old_to_new_map.end()) { HloInstruction* new_instr = old_to_new_map.at(old_instr); VLOG(2) << "Processing control predecessors for " << new_instr->ToString(); std::vector<HloInstruction*> new_control_pred; new_control_pred.reserve(old_instr->control_predecessors().size()); for (HloInstruction* pred : old_instr->control_predecessors()) { if (!old_to_new_map.contains(pred)) { continue; } new_control_pred.push_back(old_to_new_map.at(pred)); } TF_RETURN_IF_ERROR(new_instr->DropAllControlDeps()); for (HloInstruction* new_pred : new_control_pred) { TF_RETURN_IF_ERROR(new_pred->AddControlDependencyTo(new_instr)); VLOG(2) << "Adding " << new_pred->ToString() << " to control dependency of " << new_instr->ToString(); } } } for (HloInstruction* input_consumer : input_parameter->users()) { for (HloInstruction* old_input : input_consumer->users()) { if (old_to_new_map.find(old_input) != old_to_new_map.end()) { HloInstruction* new_input = old_to_new_map.at(old_input); if (skip_control_dep_injection.find(old_input) == skip_control_dep_injection.end() && !IsCollective(old_input)) { for (HloInstruction* old_root : *old_loop_roots) { TF_RETURN_IF_ERROR(old_root->AddControlDependencyTo(new_input)); } } } } } return absl::OkStatus(); } absl::StatusOr<bool> FullyUnroll(HloInstruction* while_instr, HloModule* module) { HloComputation* while_body = while_instr->while_body(); bool changed = false; VLOG(2) << "Processing root " << while_body->root_instruction()->ToString(); auto loop_roots = while_body->root_instruction()->mutable_operands(); HloInstruction* input_parameter = while_body->parameter_instruction(0); VLOG(2) << "Processing input parameter " << input_parameter->ToString(); absl::flat_hash_map<HloInstruction*, HloInstruction*> old_to_new_map; absl::flat_hash_set<HloInstruction*> skip_control_dep_injection; std::string clone_suffix = "full_unroll_clone"; TF_ASSIGN_OR_RETURN(WhileLoopBackendConfig config, while_instr->backend_config<WhileLoopBackendConfig>()); std::vector<HloInstruction*> ops_to_clone; ops_to_clone.reserve(while_body->MakeInstructionPostOrder().size()); HloInstruction* old_input_parameter = input_parameter; HloInstruction* new_input_parameter = while_body->root_instruction(); absl::flat_hash_set<HloInstruction*> seen_ops; for (HloInstruction* old_instr : while_body->MakeInstructionPostOrder()) { if (seen_ops.contains(old_instr)) { continue; } ops_to_clone.push_back(old_instr); seen_ops.insert(old_instr); } int n = config.known_trip_count().n(); while (--n) { std::vector<HloInstruction*> new_ops_to_clone; old_to_new_map[old_input_parameter] = new_input_parameter; for (HloInstruction* old_instr : ops_to_clone) { if (old_to_new_map.contains(old_instr)) { continue; } VLOG(2) << "Cloning instruction " << old_instr->ToString(); std::vector<HloInstruction*> new_operands; for (HloInstruction* old_operand : old_instr->mutable_operands()) { new_operands.push_back(old_to_new_map[old_operand]); } HloInstruction* new_instr = while_body->AddInstruction(old_instr->CloneWithNewOperands( old_instr->shape(), new_operands, clone_suffix)); if (old_instr->IsElementwiseBinary() && old_instr->HasConstantOperand()) { skip_control_dep_injection.insert(old_instr); } SetChannelIdForNewCollective(new_instr, module); old_to_new_map[old_instr] = new_instr; new_ops_to_clone.push_back(new_instr); VLOG(2) << "Added instruction " << new_instr->ToString(); } while_body->set_root_instruction( old_to_new_map[while_body->root_instruction()]); VLOG(2) << "Replaced with new root " << while_body->root_instruction()->ToString(); TF_RETURN_IF_ERROR(HandleControlDependencies( while_body, old_to_new_map, &loop_roots, old_input_parameter, skip_control_dep_injection)); old_to_new_map.clear(); skip_control_dep_injection.clear(); loop_roots = while_body->root_instruction()->mutable_operands(); old_input_parameter = new_input_parameter; new_input_parameter = while_body->root_instruction(); ops_to_clone = std::move(new_ops_to_clone); changed = true; } WhileLoopBackendConfig new_config; new_config.mutable_known_trip_count()->set_n(1); TF_RETURN_IF_ERROR(while_instr->set_backend_config(new_config)); return changed; } absl::Status PeelInstructionsForOddTripCount(HloModule* module, HloInstruction* while_instr) { std::string suffix = "peeled_double_buffer"; absl::flat_hash_map<HloInstruction*, HloInstruction*> old_to_new_map; HloComputation* while_body = while_instr->while_body(); HloInstruction* input_parameter = while_body->parameter_instruction(0); HloInstruction* input_tuple = while_instr->mutable_operand(0); auto old_loop_roots = while_body->root_instruction()->mutable_operands(); HloComputation* parent_comp = while_instr->parent(); old_to_new_map[input_parameter] = input_tuple; for (HloInstruction* old_instr : while_body->MakeInstructionPostOrder()) { if (old_to_new_map.find(old_instr) != old_to_new_map.end()) { continue; } VLOG(2) << "Peeling instruction " << old_instr->ToString(); std::vector<HloInstruction*> new_operands(old_instr->operand_count()); for (int64_t i = 0; i < old_instr->operand_count(); i++) { new_operands[i] = old_to_new_map[old_instr->mutable_operand(i)]; } HloInstruction* new_instr = parent_comp->AddInstruction(old_instr->CloneWithNewOperands( old_instr->shape(), new_operands, suffix)); SetChannelIdForNewCollective(new_instr, module); TF_CHECK_OK(SetSendRecvValidationForPeeledInstr(new_instr, old_instr)); old_to_new_map[old_instr] = new_instr; VLOG(2) << "Added instruction " << new_instr->ToString() << " to parent computation."; } std::vector<HloInstruction*> new_roots; for (HloInstruction* instr : old_loop_roots) { new_roots.push_back(old_to_new_map[instr]); } TF_RETURN_IF_ERROR(while_instr->ReplaceOperandWith( 0, old_to_new_map[while_body->root_instruction()])); VLOG(2) << "Replaced with new input tuple " << while_instr->operand(0)->ToString(); for (HloInstruction* old_instr : while_body->MakeInstructionPostOrder()) { if (old_to_new_map.find(old_instr) != old_to_new_map.end()) { HloInstruction* new_instr = old_to_new_map[old_instr]; VLOG(2) << "Processing control predecessors for peeled instruction " << new_instr->ToString(); std::vector<HloInstruction*> new_control_pred( old_instr->control_predecessors().size()); for (HloInstruction* pred : old_instr->control_predecessors()) { new_control_pred.push_back(old_to_new_map[pred]); } TF_RETURN_IF_ERROR(new_instr->DropAllControlDeps()); for (HloInstruction* new_pred : new_control_pred) { TF_RETURN_IF_ERROR(new_pred->AddControlDependencyTo(new_instr)); VLOG(2) << "Adding " << new_pred->ToString() << " to control dependency of peeled instruction: " << new_instr->ToString(); } } } return absl::OkStatus(); } absl::StatusOr<bool> DoubleBufferingUnroll(HloInstruction* while_instr, HloModule* module) { TF_ASSIGN_OR_RETURN(auto config, while_instr->backend_config<WhileLoopBackendConfig>()); CHECK(config.has_known_trip_count()) << "Only loops with known trip count are supported."; int64_t exact_trip_count = config.known_trip_count().n(); VLOG(2) << "Processing while loop " << while_instr->ToString() << " with trip count: " << exact_trip_count; HloComputation* while_body = while_instr->while_body(); VLOG(2) << "Processing root " << while_body->root_instruction()->ToString(); auto old_loop_roots = while_body->root_instruction()->mutable_operands(); HloInstruction* input_parameter = while_body->parameter_instruction(0); VLOG(2) << "Processing input parameter " << input_parameter->ToString(); absl::flat_hash_map<HloInstruction*, HloInstruction*> old_to_new_map; absl::flat_hash_set<HloInstruction*> skip_control_dep_injection; bool peel_one_iteration = exact_trip_count % 2; if (peel_one_iteration) { VLOG(2) << "Found loops with odd trip count, 1 iteration will be peeled " "outside of the main body."; TF_RETURN_IF_ERROR(PeelInstructionsForOddTripCount(module, while_instr)); exact_trip_count -= 1; } std::string suffix = "double_buffer_clone"; old_to_new_map[input_parameter] = while_body->root_instruction(); for (HloInstruction* old_instr : while_body->MakeInstructionPostOrder()) { if (old_to_new_map.find(old_instr) != old_to_new_map.end()) { continue; } VLOG(2) << "Cloning instruction " << old_instr->ToString(); std::vector<HloInstruction*> new_operands; for (HloInstruction* old_operand : old_instr->mutable_operands()) { new_operands.push_back(old_to_new_map[old_operand]); } HloInstruction* new_instr = while_body->AddInstruction(old_instr->CloneWithNewOperands( old_instr->shape(), new_operands, suffix)); if (old_instr->IsElementwiseBinary() && old_instr->HasConstantOperand()) { skip_control_dep_injection.insert(old_instr); } SetChannelIdForNewCollective(new_instr, module); TF_CHECK_OK(SetSendRecvValidation(old_instr, new_instr, peel_one_iteration)); old_to_new_map[old_instr] = new_instr; VLOG(2) << "Added instruction " << new_instr->ToString(); } while_body->set_root_instruction( old_to_new_map[while_body->root_instruction()]); VLOG(2) << "Replaced with new root " << while_body->root_instruction()->ToString(); TF_RETURN_IF_ERROR(HandleControlDependencies(while_body, old_to_new_map, &old_loop_roots, input_parameter, skip_control_dep_injection)); WhileLoopBackendConfig new_config; new_config.mutable_known_trip_count()->set_n(exact_trip_count / 2); TF_RETURN_IF_ERROR(while_instr->set_backend_config(new_config)); return true; } absl::StatusOr<bool> AutoUnroll(HloInstruction* while_instr, HloModule* module) { CHECK_EQ(while_instr->opcode(), HloOpcode::kWhile); bool any_collective_present = absl::c_any_of( while_instr->while_body()->MakeInstructionPostOrder(), [](HloInstruction* instr) { return hlo_query::IsCollectiveCommunicationOp(instr->opcode()); }); if (any_collective_present) { return DoubleBufferingUnroll(while_instr, module); } return false; } } absl::StatusOr<bool> DoubleBufferLoopUnrolling::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool changed = false; std::vector<HloInstruction*> while_instrs; for (auto comp : module->MakeNonfusionComputations()) { absl::c_copy_if(comp->instructions(), std::back_inserter(while_instrs), HloPredicateIsOp<HloOpcode::kWhile>); } VLOG(2) << "Processing " << while_instrs.size() << " while loops."; for (HloInstruction* while_instr : while_instrs) { TF_ASSIGN_OR_RETURN(WhileLoopBackendConfig config, while_instr->backend_config<WhileLoopBackendConfig>()); if (!config.has_known_trip_count()) { VLOG(2) << while_instr->ToString() << " doesn't have exact trip count, skipping loop unrolling."; continue; } if (config.known_trip_count().n() == 1) { VLOG(2) << while_instr->ToString() << " has an iteration count of one, skipping unrolling."; continue; } if (unroll_strategy_ == UnrollStrategy::kFullUnroll) { TF_ASSIGN_OR_RETURN(changed, FullyUnroll(while_instr, module)); } else if (unroll_strategy_ == UnrollStrategy::kDoubleBuffer) { TF_ASSIGN_OR_RETURN(changed, DoubleBufferingUnroll(while_instr, module)); } else if (unroll_strategy_ == UnrollStrategy::kAuto) { TF_ASSIGN_OR_RETURN(changed, AutoUnroll(while_instr, module)); } else { LOG(FATAL) << absl::StrCat("Unhandled unrolling strategy: ", unroll_strategy_); } } VLOG(2) << "LoopDoubleBufferTransformer output: " << module->ToString(); if (changed) { TF_RETURN_IF_ERROR( FlattenCallGraph().Run(module, execution_threads).status()); } return changed; } } }
#include "xla/service/gpu/transforms/double_buffer_loop_unrolling.h" #include <cstdint> #include <memory> #include <optional> #include "absl/container/flat_hash_set.h" #include "absl/log/log.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/pass/hlo_pass_pipeline.h" #include "xla/hlo/utils/hlo_query.h" #include "xla/service/tuple_simplifier.h" #include "xla/test.h" #include "xla/tests/filecheck.h" #include "xla/tests/hlo_test_base.h" #include "xla/xla.pb.h" #include "xla/xla_data.pb.h" #include "tsl/platform/status_matchers.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { using ::tsl::testing::IsOkAndHolds; int64_t CountInstructions(HloComputation& computation, HloOpcode opcode) { int64_t count = 0; hlo_query::ForEachInstructionWithOpcode( computation, opcode, [&count](HloInstruction* instr) { count++; }); return count; } int64_t CountInstructions(HloModule& module, HloOpcode opcode) { int64_t count = 0; hlo_query::ForEachInstructionWithOpcode( module, opcode, [&count](HloInstruction* instr) { count++; }); return count; } using GpuLoopDoubleBufferTransformerTest = HloTestBase; TEST_F(GpuLoopDoubleBufferTransformerTest, AutoUnrollLoopWhenCollectivesArePresent) { absl::string_view kModuleString = R"( HloModule m condition { input_tuple = (f32[], s32[]) parameter(0) cond = s32[] get-tuple-element(input_tuple), index=1 trip_count = s32[] constant(10) ROOT done = pred[] compare(cond, trip_count), direction=LT } ar_add { Arg_1 = f32[] parameter(1) Arg_0 = f32[] parameter(0) ROOT add_ar = f32[] add(Arg_1, Arg_0) } body { input_tuple = (f32[], s32[]) parameter(0) param_0 = f32[] get-tuple-element(input_tuple), index=0 cond = s32[] get-tuple-element(input_tuple), index=1 all-reduce-start = f32[] all-reduce-start(param_0), channel_id=8, replica_groups={{0}}, to_apply=ar_add, backend_config="{\"is_sync\":false}" one = s32[] constant(1) all-reduce-done = f32[] all-reduce-done(all-reduce-start) cond_plus_1 = s32[] add(cond, one) ROOT output_tuple = (f32[], s32[]) tuple(all-reduce-done, cond_plus_1) } ENTRY main { param_0 = f32[] parameter(0) param_2 = s32[] constant(0) tuple = (f32[], s32[]) tuple(param_0, param_2) ROOT while = (f32[], s32[]) while(tuple), condition=condition, body=body, backend_config={"known_trip_count":{"n":"10"}} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module, ParseAndReturnVerifiedModule(kModuleString)); HloPassPipeline pipeline("double-buffering-pipeline"); DoubleBufferLoopUnrolling unroller( DoubleBufferLoopUnrolling::UnrollStrategy::kAuto); TF_ASSERT_OK_AND_ASSIGN(bool changed, unroller.Run(module.get())); EXPECT_TRUE(changed); HloInstruction* while_instruction = hlo_query::GetFirstInstructionWithOpcode( *module->entry_computation(), HloOpcode::kWhile); TF_ASSERT_OK_AND_ASSIGN( WhileLoopBackendConfig config, while_instruction->backend_config<WhileLoopBackendConfig>()); EXPECT_EQ(config.known_trip_count().n(), 5); EXPECT_EQ(CountInstructions((*while_instruction->while_body()), HloOpcode::kAllReduceStart), 2); } TEST_F(GpuLoopDoubleBufferTransformerTest, DoNotAutoUnrollLoopWhenCollectivesAreNotPresent) { absl::string_view kModuleString = R"( HloModule m condition { input_tuple = (s32[]) parameter(0) cond = s32[] get-tuple-element(input_tuple), index=0 trip_count = s32[] constant(10) ROOT done = pred[] compare(cond, trip_count), direction=LT } body { input_tuple = (s32[]) parameter(0) cond = s32[] get-tuple-element(input_tuple), index=0 one = s32[] constant(1) cond_plus_1 = s32[] add(cond, one) ROOT output_tuple = (s32[]) tuple(cond_plus_1) } ENTRY main { param_0 = s32[] constant(0) tuple = (s32[]) tuple(param_0) ROOT while = (s32[]) while(tuple), condition=condition, body=body, backend_config={"known_trip_count":{"n":"10"}} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module, ParseAndReturnVerifiedModule(kModuleString)); DoubleBufferLoopUnrolling unroller( DoubleBufferLoopUnrolling::UnrollStrategy::kAuto); TF_ASSERT_OK_AND_ASSIGN(bool changed, unroller.Run(module.get())); EXPECT_FALSE(changed); HloInstruction* while_instruction = hlo_query::GetFirstInstructionWithOpcode( *module->entry_computation(), HloOpcode::kWhile); TF_ASSERT_OK_AND_ASSIGN( WhileLoopBackendConfig config, while_instruction->backend_config<WhileLoopBackendConfig>()); EXPECT_EQ(config.known_trip_count().n(), 10); } TEST_F(GpuLoopDoubleBufferTransformerTest, FullUnrollOddTripCountTest) { const char* const kModuleString = R"( HloModule all_gather_overlapping condition { input_tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) parameter(0) cond = s32[] get-tuple-element(input_tuple), index=3 trip_count = s32[] constant(10) ROOT done = pred[] compare(cond, trip_count), direction=LT } body { input_tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) parameter(0) param_0 = f32[1,128] get-tuple-element(input_tuple), index=0 param_1 = f32[2,128] get-tuple-element(input_tuple), index=2 cond = s32[] get-tuple-element(input_tuple), index=3 c0 = f32[] constant(0) splat_c0 = f32[1,128] broadcast(c0), dimensions={} add = f32[1,128] add(splat_c0, param_0) all-gather-start = (f32[1,128], f32[2,128]) all-gather-start(add), channel_id=1337, replica_groups={{0,1}}, dimensions={0}, use_global_device_ids=true c1_s32 = s32[] constant(1) c0_s32 = s32[] constant(0) one = s32[] constant(1) cond_plus_1 = s32[] add(cond, one) dynamic-slice = f32[1,128] dynamic-slice(param_1, c1_s32, c0_s32), dynamic_slice_sizes={1,128} all-gather-done = f32[2,128] all-gather-done(all-gather-start) ROOT output_tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) tuple(param_0, dynamic-slice, all-gather-done, cond_plus_1) } ENTRY main { param_0 = f32[1,128] parameter(0) param_1 = f32[2,128] parameter(1) param_2 = s32[] constant(0) tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) tuple(param_0, param_0, param_1, param_2) ROOT while = (f32[1,128], f32[1,128], f32[2,128], s32[]) while(tuple), condition=condition, body=body, backend_config={"known_trip_count":{"n":"11"}} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module, ParseAndReturnVerifiedModule(kModuleString)); DoubleBufferLoopUnrolling double_buffer( DoubleBufferLoopUnrolling::UnrollStrategy::kFullUnroll); TupleSimplifier tuple_simp; bool changed; TF_ASSERT_OK_AND_ASSIGN(changed, double_buffer.Run(module.get())); EXPECT_TRUE(changed); TF_ASSERT_OK_AND_ASSIGN(changed, tuple_simp.Run(module.get())); EXPECT_TRUE(changed); HloInstruction* while_instruction = hlo_query::GetFirstInstructionWithOpcode( *module->entry_computation(), HloOpcode::kWhile); TF_ASSERT_OK_AND_ASSIGN( WhileLoopBackendConfig config, while_instruction->backend_config<WhileLoopBackendConfig>()); int64_t exact_trip_count = config.known_trip_count().n(); EXPECT_EQ(exact_trip_count, 1); EXPECT_EQ(CountInstructions((*while_instruction->while_body()), HloOpcode::kAllGatherStart), 11); EXPECT_EQ(CountInstructions((*module), HloOpcode::kAllGatherStart), 11); } TEST_F(GpuLoopDoubleBufferTransformerTest, FullUnrollEvenTripCountTest) { const char* const kModuleString = R"( HloModule all_gather_overlapping condition { input_tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) parameter(0) cond = s32[] get-tuple-element(input_tuple), index=3 trip_count = s32[] constant(10) ROOT done = pred[] compare(cond, trip_count), direction=LT } body { input_tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) parameter(0) param_0 = f32[1,128] get-tuple-element(input_tuple), index=0 param_1 = f32[2,128] get-tuple-element(input_tuple), index=2 cond = s32[] get-tuple-element(input_tuple), index=3 c0 = f32[] constant(0) splat_c0 = f32[1,128] broadcast(c0), dimensions={} add = f32[1,128] add(splat_c0, param_0) all-gather-start = (f32[1,128], f32[2,128]) all-gather-start(add), channel_id=1337, replica_groups={{0,1}}, dimensions={0}, use_global_device_ids=true c1_s32 = s32[] constant(1) c0_s32 = s32[] constant(0) one = s32[] constant(1) cond_plus_1 = s32[] add(cond, one) dynamic-slice = f32[1,128] dynamic-slice(param_1, c1_s32, c0_s32), dynamic_slice_sizes={1,128} all-gather-done = f32[2,128] all-gather-done(all-gather-start) ROOT output_tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) tuple(param_0, dynamic-slice, all-gather-done, cond_plus_1) } ENTRY main { param_0 = f32[1,128] parameter(0) param_1 = f32[2,128] parameter(1) param_2 = s32[] constant(0) tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) tuple(param_0, param_0, param_1, param_2) ROOT while = (f32[1,128], f32[1,128], f32[2,128], s32[]) while(tuple), condition=condition, body=body, backend_config={"known_trip_count":{"n":"10"}} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module, ParseAndReturnVerifiedModule(kModuleString)); DoubleBufferLoopUnrolling double_buffer( DoubleBufferLoopUnrolling::UnrollStrategy::kFullUnroll); TupleSimplifier tuple_simp; bool changed; TF_ASSERT_OK_AND_ASSIGN(changed, double_buffer.Run(module.get())); EXPECT_TRUE(changed); TF_ASSERT_OK_AND_ASSIGN(changed, tuple_simp.Run(module.get())); EXPECT_TRUE(changed); HloInstruction* while_instruction; for (auto instr : module->entry_computation()->instructions()) { if (instr->opcode() == HloOpcode::kWhile) { while_instruction = instr; } } TF_ASSERT_OK_AND_ASSIGN( WhileLoopBackendConfig config, while_instruction->backend_config<WhileLoopBackendConfig>()); int64_t exact_trip_count = config.known_trip_count().n(); EXPECT_EQ(exact_trip_count, 1); EXPECT_EQ(CountInstructions((*while_instruction->while_body()), HloOpcode::kAllGatherStart), 10); EXPECT_EQ(CountInstructions((*module), HloOpcode::kAllGatherStart), 10); } TEST_F(GpuLoopDoubleBufferTransformerTest, UnrolledLoopEvenTripCount) { const char* const kModuleString = R"( HloModule all_gather_overlapping condition { input_tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) parameter(0) cond = s32[] get-tuple-element(input_tuple), index=3 trip_count = s32[] constant(10) ROOT done = pred[] compare(cond, trip_count), direction=LT } body { input_tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) parameter(0) param_0 = f32[1,128] get-tuple-element(input_tuple), index=0 param_1 = f32[2,128] get-tuple-element(input_tuple), index=2 cond = s32[] get-tuple-element(input_tuple), index=3 c0 = f32[] constant(0) splat_c0 = f32[1,128] broadcast(c0), dimensions={} add = f32[1,128] add(splat_c0, param_0) all-gather-start = (f32[1,128], f32[2,128]) all-gather-start(add), channel_id=1337, replica_groups={{0,1}}, dimensions={0}, use_global_device_ids=true c1_s32 = s32[] constant(1) c0_s32 = s32[] constant(0) one = s32[] constant(1) cond_plus_1 = s32[] add(cond, one) dynamic-slice = f32[1,128] dynamic-slice(param_1, c1_s32, c0_s32), dynamic_slice_sizes={1,128} all-gather-done = f32[2,128] all-gather-done(all-gather-start) ROOT output_tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) tuple(param_0, dynamic-slice, all-gather-done, cond_plus_1) } ENTRY main { param_0 = f32[1,128] parameter(0) param_1 = f32[2,128] parameter(1) param_2 = s32[] constant(0) tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) tuple(param_0, param_0, param_1, param_2) ROOT while = (f32[1,128], f32[1,128], f32[2,128], s32[]) while(tuple), condition=condition, body=body, backend_config={"known_trip_count":{"n":"10"}} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module, ParseAndReturnVerifiedModule(kModuleString)); DoubleBufferLoopUnrolling double_buffer; TupleSimplifier tuple_simp; bool changed; TF_ASSERT_OK_AND_ASSIGN(changed, double_buffer.Run(module.get())); EXPECT_TRUE(changed); TF_ASSERT_OK_AND_ASSIGN(changed, tuple_simp.Run(module.get())); EXPECT_TRUE(changed); HloInstruction* while_instruction = hlo_query::GetFirstInstructionWithOpcode( *module->entry_computation(), HloOpcode::kWhile); TF_ASSERT_OK_AND_ASSIGN( WhileLoopBackendConfig config, while_instruction->backend_config<WhileLoopBackendConfig>()); int64_t exact_trip_count = config.known_trip_count().n(); EXPECT_EQ(exact_trip_count, 5); EXPECT_EQ(CountInstructions((*while_instruction->while_body()), HloOpcode::kAllGatherStart), 2); EXPECT_EQ(CountInstructions((*module), HloOpcode::kAllGatherStart), 2); } TEST_F(GpuLoopDoubleBufferTransformerTest, UnrolledLoopOddTripCount) { const char* const kModuleString = R"( HloModule all_gather_overlapping condition { input_tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) parameter(0) cond = s32[] get-tuple-element(input_tuple), index=3 trip_count = s32[] constant(10) ROOT done = pred[] compare(cond, trip_count), direction=LT } body { input_tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) parameter(0) param_0 = f32[1,128] get-tuple-element(input_tuple), index=0 param_1 = f32[2,128] get-tuple-element(input_tuple), index=2 cond = s32[] get-tuple-element(input_tuple), index=3 c0 = f32[] constant(0) splat_c0 = f32[1,128] broadcast(c0), dimensions={} add = f32[1,128] add(splat_c0, param_0) all-gather-start = (f32[1,128], f32[2,128]) all-gather-start(add), channel_id=1337, replica_groups={{0,1}}, dimensions={0}, use_global_device_ids=true c1_s32 = s32[] constant(1) c0_s32 = s32[] constant(0) one = s32[] constant(1) cond_plus_1 = s32[] add(cond, one) dynamic-slice = f32[1,128] dynamic-slice(param_1, c1_s32, c0_s32), dynamic_slice_sizes={1,128} all-gather-done = f32[2,128] all-gather-done(all-gather-start) ROOT output_tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) tuple(param_0, dynamic-slice, all-gather-done, cond_plus_1) } ENTRY main { param_0 = f32[1,128] parameter(0) param_1 = f32[2,128] parameter(1) param_2 = s32[] constant(0) tuple = (f32[1,128], f32[1,128], f32[2,128], s32[]) tuple(param_0, param_0, param_1, param_2) ROOT while = (f32[1,128], f32[1,128], f32[2,128], s32[]) while(tuple), condition=condition, body=body, backend_config={"known_trip_count":{"n":"11"}} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module, ParseAndReturnVerifiedModule(kModuleString)); DoubleBufferLoopUnrolling double_buffer; TupleSimplifier tuple_simp; EXPECT_THAT(double_buffer.Run(module.get()), IsOkAndHolds(true)); EXPECT_THAT(tuple_simp.Run(module.get()), IsOkAndHolds(true)); HloInstruction* while_instruction = hlo_query::GetFirstInstructionWithOpcode( *module->entry_computation(), HloOpcode::kWhile); TF_ASSERT_OK_AND_ASSIGN( WhileLoopBackendConfig config, while_instruction->backend_config<WhileLoopBackendConfig>()); int64_t exact_trip_count = config.known_trip_count().n(); EXPECT_EQ(exact_trip_count, 5); EXPECT_EQ(CountInstructions((*while_instruction->while_body()), HloOpcode::kAllGatherStart), 2); EXPECT_EQ(CountInstructions((*module), HloOpcode::kAllGatherStart), 3); EXPECT_EQ(while_instruction->operand(0)->operand(2)->opcode(), HloOpcode::kAllGatherDone); } TEST_F(GpuLoopDoubleBufferTransformerTest, UnrolledLoopNoControlDepsForConstantAdd) { const char* const kModuleString = R"( HloModule loop_unrolling_no_deps condition { input_tuple = (f32[], s32[]) parameter(0) cond = s32[] get-tuple-element(input_tuple), index=1 trip_count = s32[] constant(10) ROOT done = pred[] compare(cond, trip_count), direction=LT } body { input_tuple = (f32[], s32[]) parameter(0) param_0 = f32[] get-tuple-element(input_tuple), index=0 cond = s32[] get-tuple-element(input_tuple), index=1 c2 = f32[] constant(2) add = f32[] add(c2, param_0) one = s32[] constant(1) cond_plus_1 = s32[] add(cond, one) ROOT output_tuple = (f32[], s32[]) tuple(add, cond_plus_1) } ENTRY main { param_0 = f32[] parameter(0) param_2 = s32[] constant(0) tuple = (f32[], s32[]) tuple(param_0, param_2) ROOT while = (f32[], s32[]) while(tuple), condition=condition, body=body, backend_config={"known_trip_count":{"n":"11"}} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module, ParseAndReturnVerifiedModule(kModuleString)); DoubleBufferLoopUnrolling double_buffer; TupleSimplifier tuple_simp; EXPECT_THAT(double_buffer.Run(module.get()), IsOkAndHolds(true)); EXPECT_THAT(tuple_simp.Run(module.get()), IsOkAndHolds(true)); HloInstruction* while_instruction = hlo_query::GetFirstInstructionWithOpcode( *module->entry_computation(), HloOpcode::kWhile); TF_ASSERT_OK_AND_ASSIGN( WhileLoopBackendConfig config, while_instruction->backend_config<WhileLoopBackendConfig>()); int64_t exact_trip_count = config.known_trip_count().n(); EXPECT_EQ(exact_trip_count, 5); EXPECT_EQ( CountInstructions((*while_instruction->while_body()), HloOpcode::kAdd), 4); EXPECT_EQ(while_instruction->while_body() ->root_instruction() ->operand(0) ->control_predecessors() .size(), 0); } TEST_F(GpuLoopDoubleBufferTransformerTest, UnrolledLoopNoControlDepsForCollective) { const char* const kModuleString = R"( HloModule loop_unrolling_no_deps condition { input_tuple = (f32[], s32[]) parameter(0) cond = s32[] get-tuple-element(input_tuple), index=1 trip_count = s32[] constant(10) ROOT done = pred[] compare(cond, trip_count), direction=LT } ar_add { Arg_1 = f32[] parameter(1) Arg_0 = f32[] parameter(0) ROOT add_ar = f32[] add(Arg_1, Arg_0) } body { input_tuple = (f32[], s32[]) parameter(0) param_0 = f32[] get-tuple-element(input_tuple), index=0 cond = s32[] get-tuple-element(input_tuple), index=1 all-reduce-start = f32[] all-reduce-start(param_0), channel_id=8, replica_groups={{0}}, to_apply=ar_add, backend_config="{\"is_sync\":false}" one = s32[] constant(1) all-reduce-done = f32[] all-reduce-done(all-reduce-start) cond_plus_1 = s32[] add(cond, one) ROOT output_tuple = (f32[], s32[]) tuple(all-reduce-done, cond_plus_1) } ENTRY main { param_0 = f32[] parameter(0) param_2 = s32[] constant(0) tuple = (f32[], s32[]) tuple(param_0, param_2) ROOT while = (f32[], s32[]) while(tuple), condition=condition, body=body, backend_config={"known_trip_count":{"n":"10"}} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module, ParseAndReturnVerifiedModule(kModuleString)); DoubleBufferLoopUnrolling double_buffer; TupleSimplifier tuple_simp; EXPECT_THAT(double_buffer.Run(module.get()), IsOkAndHolds(true)); EXPECT_THAT(tuple_simp.Run(module.get()), IsOkAndHolds(true)); HloInstruction* while_instruction = hlo_query::GetFirstInstructionWithOpcode( *module->entry_computation(), HloOpcode::kWhile); TF_ASSERT_OK_AND_ASSIGN( WhileLoopBackendConfig config, while_instruction->backend_config<WhileLoopBackendConfig>()); int64_t exact_trip_count = config.known_trip_count().n(); EXPECT_EQ(exact_trip_count, 5); EXPECT_EQ(CountInstructions((*while_instruction->while_body()), HloOpcode::kAllReduceStart), 2); absl::flat_hash_set<int64_t> channel_ids; hlo_query::ForEachInstructionWithOpcode( *while_instruction->while_body(), HloOpcode::kAllReduceStart, [&channel_ids](HloInstruction* ar) { EXPECT_EQ(ar->control_predecessors().size(), 0); channel_ids.insert(*(ar->channel_id())); }); EXPECT_EQ(channel_ids.size(), 2); } TEST_F(GpuLoopDoubleBufferTransformerTest, FullyUnrolledLoopNoControlDepsForCollective) { const char* const kModuleString = R"( HloModule loop_unrolling_no_deps condition { input_tuple = (f32[], s32[]) parameter(0) cond = s32[] get-tuple-element(input_tuple), index=1 trip_count = s32[] constant(10) ROOT done = pred[] compare(cond, trip_count), direction=LT } ar_add { Arg_1 = f32[] parameter(1) Arg_0 = f32[] parameter(0) ROOT add_ar = f32[] add(Arg_1, Arg_0) } body { input_tuple = (f32[], s32[]) parameter(0) param_0 = f32[] get-tuple-element(input_tuple), index=0 cond = s32[] get-tuple-element(input_tuple), index=1 all-reduce-start = f32[] all-reduce-start(param_0), channel_id=8, replica_groups={{0}}, to_apply=ar_add, backend_config="{\"is_sync\":false}" one = s32[] constant(1) all-reduce-done = f32[] all-reduce-done(all-reduce-start) cond_plus_1 = s32[] add(cond, one) ROOT output_tuple = (f32[], s32[]) tuple(all-reduce-done, cond_plus_1) } ENTRY main { param_0 = f32[] parameter(0) param_2 = s32[] constant(0) tuple = (f32[], s32[]) tuple(param_0, param_2) ROOT while = (f32[], s32[]) while(tuple), condition=condition, body=body, backend_config={"known_trip_count":{"n":"10"}} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module, ParseAndReturnVerifiedModule(kModuleString)); DoubleBufferLoopUnrolling double_buffer( DoubleBufferLoopUnrolling::UnrollStrategy::kFullUnroll); TupleSimplifier tuple_simp; EXPECT_THAT(double_buffer.Run(module.get()), IsOkAndHolds(true)); EXPECT_THAT(tuple_simp.Run(module.get()), IsOkAndHolds(true)); HloInstruction* while_instruction = hlo_query::GetFirstInstructionWithOpcode( *module->entry_computation(), HloOpcode::kWhile); TF_ASSERT_OK_AND_ASSIGN( WhileLoopBackendConfig config, while_instruction->backend_config<WhileLoopBackendConfig>()); int64_t exact_trip_count = config.known_trip_count().n(); EXPECT_EQ(exact_trip_count, 1); EXPECT_EQ(CountInstructions((*while_instruction->while_body()), HloOpcode::kAllReduceStart), 10); absl::flat_hash_set<int64_t> channel_ids; hlo_query::ForEachInstructionWithOpcode( *while_instruction->while_body(), HloOpcode::kAllReduceStart, [&channel_ids](HloInstruction* ar) { EXPECT_EQ(ar->control_predecessors().size(), 0); channel_ids.insert(*(ar->channel_id())); }); EXPECT_EQ(channel_ids.size(), 10); } TEST_F(GpuLoopDoubleBufferTransformerTest, NestedWhileLoopRemainsFlattened) { const char* const kModuleString = R"( HloModule loop_unrolling_nested_while_loop_remains_flattened condition_nested { input_tuple = (s32[]) parameter(0) cond = s32[] get-tuple-element(input_tuple), index=0 trip_count = s32[] constant(10) ROOT done = pred[] compare(cond, trip_count), direction=LT } body_nested { input_tuple = (s32[]) parameter(0) cond = s32[] get-tuple-element(input_tuple), index=0 one = s32[] constant(1) cond_plus_1 = s32[] add(cond, one) ROOT output = (s32[]) tuple(cond_plus_1) } condition { input_tuple = (s32[]) parameter(0) cond = s32[] get-tuple-element(input_tuple), index=0 trip_count = s32[] constant(10) ROOT done = pred[] compare(cond, trip_count), direction=LT } body { input_tuple = (s32[]) parameter(0) ROOT output = (s32[]) while(input_tuple), condition=condition_nested, body=body_nested } ENTRY main { param_0 = (s32[]) parameter(0) ROOT while = (s32[]) while(param_0), condition=condition, body=body, backend_config={"known_trip_count":{"n":"10"}} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module, ParseAndReturnVerifiedModule(kModuleString)); DoubleBufferLoopUnrolling double_buffer; EXPECT_THAT(double_buffer.Run(module.get()), IsOkAndHolds(true)); absl::flat_hash_set<const HloComputation*> while_loops_callees; hlo_query::ForEachInstructionWithOpcode( *module, HloOpcode::kWhile, [&while_loops_callees](HloInstruction* instr) { EXPECT_TRUE( while_loops_callees.insert(instr->while_condition()).second); EXPECT_TRUE(while_loops_callees.insert(instr->while_body()).second); }); EXPECT_EQ(while_loops_callees.size(), 6); } TEST_F(GpuLoopDoubleBufferTransformerTest, NestedWhileLoopRemainsFlattenedOddTripCount) { const char* const kModuleString = R"( HloModule loop_unrolling_nested_while_loop_remains_flattened condition_nested { input_tuple = (s32[]) parameter(0) cond = s32[] get-tuple-element(input_tuple), index=0 trip_count = s32[] constant(10) ROOT done = pred[] compare(cond, trip_count), direction=LT } body_nested { input_tuple = (s32[]) parameter(0) cond = s32[] get-tuple-element(input_tuple), index=0 one = s32[] constant(1) cond_plus_1 = s32[] add(cond, one) ROOT output = (s32[]) tuple(cond_plus_1) } condition { input_tuple = (s32[]) parameter(0) cond = s32[] get-tuple-element(input_tuple), index=0 trip_count = s32[] constant(10) ROOT done = pred[] compare(cond, trip_count), direction=LT } body { input_tuple = (s32[]) parameter(0) ROOT output = (s32[]) while(input_tuple), condition=condition_nested, body=body_nested } ENTRY main { param_0 = (s32[]) parameter(0) ROOT while = (s32[]) while(param_0), condition=condition, body=body, backend_config={"known_trip_count":{"n":"11"}} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module, ParseAndReturnVerifiedModule(kModuleString)); DoubleBufferLoopUnrolling double_buffer; EXPECT_THAT(double_buffer.Run(module.get()), IsOkAndHolds(true)); absl::flat_hash_set<const HloComputation*> while_loops_callees; hlo_query::ForEachInstructionWithOpcode( *module, HloOpcode::kWhile, [&while_loops_callees](HloInstruction* instr) { EXPECT_TRUE( while_loops_callees.insert(instr->while_condition()).second); EXPECT_TRUE(while_loops_callees.insert(instr->while_body()).second); }); EXPECT_EQ(while_loops_callees.size(), 8); } TEST_F(GpuLoopDoubleBufferTransformerTest, NestedWhileLoopRemainsFlattenedWhenFullyUnrolled) { const char* const kModuleString = R"( HloModule loop_unrolling_nested_while_loop_remains_flattened condition_nested { input_tuple = (s32[]) parameter(0) cond = s32[] get-tuple-element(input_tuple), index=0 trip_count = s32[] constant(10) ROOT done = pred[] compare(cond, trip_count), direction=LT } body_nested { input_tuple = (s32[]) parameter(0) cond = s32[] get-tuple-element(input_tuple), index=0 one = s32[] constant(1) cond_plus_1 = s32[] add(cond, one) ROOT output = (s32[]) tuple(cond_plus_1) } condition { input_tuple = (s32[]) parameter(0) cond = s32[] get-tuple-element(input_tuple), index=0 trip_count = s32[] constant(10) ROOT done = pred[] compare(cond, trip_count), direction=LT } body { input_tuple = (s32[]) parameter(0) ROOT output = (s32[]) while(input_tuple), condition=condition_nested, body=body_nested } ENTRY main { param_0 = (s32[]) parameter(0) ROOT while = (s32[]) while(param_0), condition=condition, body=body, backend_config={"known_trip_count":{"n":"10"}} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module, ParseAndReturnVerifiedModule(kModuleString)); DoubleBufferLoopUnrolling double_buffer( DoubleBufferLoopUnrolling::UnrollStrategy::kFullUnroll); EXPECT_THAT(double_buffer.Run(module.get()), IsOkAndHolds(true)); absl::flat_hash_set<const HloComputation*> while_loops_callees; hlo_query::ForEachInstructionWithOpcode( *module, HloOpcode::kWhile, [&while_loops_callees](HloInstruction* instr) { EXPECT_TRUE( while_loops_callees.insert(instr->while_condition()).second); EXPECT_TRUE(while_loops_callees.insert(instr->while_body()).second); }); hlo_query::ForEachInstructionWithOpcode( *module->entry_computation(), HloOpcode::kWhile, [](HloInstruction* instr) { TF_ASSERT_OK_AND_ASSIGN( WhileLoopBackendConfig config, instr->backend_config<WhileLoopBackendConfig>()); int64_t exact_trip_count = config.known_trip_count().n(); EXPECT_EQ(exact_trip_count, 1); }); EXPECT_EQ(while_loops_callees.size(), 22); } TEST_F(GpuLoopDoubleBufferTransformerTest, NestedWhileLoopAreUnrolled) { const char* const kModuleString = R"( HloModule loop_unrolling_nested_are_unrolled condition_nested { input_tuple = (s32[]) parameter(0) cond = s32[] get-tuple-element(input_tuple), index=0 trip_count = s32[] constant(10) ROOT done = pred[] compare(cond, trip_count), direction=LT } body_nested { input_tuple = (s32[]) parameter(0) cond = s32[] get-tuple-element(input_tuple), index=0 one = s32[] constant(1) cond_plus_1 = s32[] add(cond, one) ROOT output = (s32[]) tuple(cond_plus_1) } condition { input_tuple = (s32[]) parameter(0) cond = s32[] get-tuple-element(input_tuple), index=0 trip_count = s32[] constant(10) ROOT done = pred[] compare(cond, trip_count), direction=LT } body { input_tuple = (s32[]) parameter(0) ROOT output = (s32[]) while(input_tuple), condition=condition_nested, body=body_nested, backend_config={"known_trip_count":{"n":"11"}} } ENTRY main { param_0 = (s32[]) parameter(0) ROOT while = (s32[]) while(param_0), condition=condition, body=body, backend_config={"known_trip_count":{"n":"11"}} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module, ParseAndReturnVerifiedModule(kModuleString)); DoubleBufferLoopUnrolling double_buffer; EXPECT_THAT(double_buffer.Run(module.get()), IsOkAndHolds(true)); int64_t num_whiles = 0; hlo_query::ForEachInstructionWithOpcode( *module, HloOpcode::kWhile, [&num_whiles](HloInstruction* instr) { EXPECT_EQ(instr->backend_config<WhileLoopBackendConfig>() ->known_trip_count() .n(), 5); ++num_whiles; }); EXPECT_EQ(num_whiles, 4); } TEST_F(GpuLoopDoubleBufferTransformerTest, NestedWhileLoopAreFullyUnrolled) { const char* const kModuleString = R"( HloModule loop_unrolling_nested_are_unrolled condition_nested { input_tuple = (s32[]) parameter(0) cond = s32[] get-tuple-element(input_tuple), index=0 trip_count = s32[] constant(10) ROOT done = pred[] compare(cond, trip_count), direction=LT } body_nested { input_tuple = (s32[]) parameter(0) cond = s32[] get-tuple-element(input_tuple), index=0 one = s32[] constant(1) cond_plus_1 = s32[] add(cond, one) ROOT output = (s32[]) tuple(cond_plus_1) } condition { input_tuple = (s32[]) parameter(0) cond = s32[] get-tuple-element(input_tuple), index=0 trip_count = s32[] constant(10) ROOT done = pred[] compare(cond, trip_count), direction=LT } body { input_tuple = (s32[]) parameter(0) ROOT output = (s32[]) while(input_tuple), condition=condition_nested, body=body_nested, backend_config={"known_trip_count":{"n":"11"}} } ENTRY main { param_0 = (s32[]) parameter(0) ROOT while = (s32[]) while(param_0), condition=condition, body=body, backend_config={"known_trip_count":{"n":"11"}} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module, ParseAndReturnVerifiedModule(kModuleString)); DoubleBufferLoopUnrolling double_buffer( DoubleBufferLoopUnrolling::UnrollStrategy::kFullUnroll); EXPECT_THAT(double_buffer.Run(module.get()), IsOkAndHolds(true)); int64_t num_whiles = 0; hlo_query::ForEachInstructionWithOpcode( *module, HloOpcode::kWhile, [&num_whiles](HloInstruction* instr) { EXPECT_EQ(instr->backend_config<WhileLoopBackendConfig>() ->known_trip_count() .n(), 1); ++num_whiles; }); EXPECT_EQ(num_whiles, 12); } TEST_F(GpuLoopDoubleBufferTransformerTest, WhileLoopWithCollectivePermute) { const char* kModuleString = R"( HloModule loop_unrolling_no_deps condition { input_tuple = (f32[], s32[]) parameter(0) cond = s32[] get-tuple-element(input_tuple), index=1 trip_count = s32[] constant(10) ROOT done = pred[] compare(cond, trip_count), direction=LT } ar_add { Arg_1 = f32[] parameter(1) Arg_0 = f32[] parameter(0) ROOT add_ar = f32[] add(Arg_1, Arg_0) } body { input_tuple = (f32[], s32[]) parameter(0) param_0 = f32[] get-tuple-element(input_tuple), index=0 cond = s32[] get-tuple-element(input_tuple), index=1 collective-permute = f32[] collective-permute(param_0), channel_id=1, source_target_pairs={{0,1},{1,2},{2,3},{3,0}}, frontend_attributes={_xla_send_recv_validation="{{0,6},{1,7},{2,8},{3,9}}"} one = s32[] constant(1) cond_plus_1 = s32[] add(cond, one) ROOT output_tuple = (f32[], s32[]) tuple(collective-permute, cond_plus_1) } ENTRY main { param_0 = f32[] parameter(0) param_2 = s32[] constant(0) tuple = (f32[], s32[]) tuple(param_0, param_2) ROOT while = (f32[], s32[]) while(tuple), condition=condition, body=body, backend_config={"known_trip_count":{"n":"10"}} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module, ParseAndReturnVerifiedModule(kModuleString)); DoubleBufferLoopUnrolling double_buffer( DoubleBufferLoopUnrolling::UnrollStrategy::kDoubleBuffer); EXPECT_THAT(double_buffer.Run(module.get()), IsOkAndHolds(true)); VLOG(1) << module->ToString(); EXPECT_TRUE(*RunFileCheck(module->ToString(), R"( )")); } TEST_F(GpuLoopDoubleBufferTransformerTest, WhileLoopWithCollectivePermutePeeled) { const char* kModuleString = R"( HloModule loop_unrolling_no_deps condition { input_tuple = (f32[], s32[]) parameter(0) cond = s32[] get-tuple-element(input_tuple), index=1 trip_count = s32[] constant(15) ROOT done = pred[] compare(cond, trip_count), direction=LT } ar_add { Arg_1 = f32[] parameter(1) Arg_0 = f32[] parameter(0) ROOT add_ar = f32[] add(Arg_1, Arg_0) } body { input_tuple = (f32[], s32[]) parameter(0) param_0 = f32[] get-tuple-element(input_tuple), index=0 cond = s32[] get-tuple-element(input_tuple), index=1 collective-permute = f32[] collective-permute(param_0), channel_id=1, source_target_pairs={{0,1},{1,2},{2,3},{3,4},{4,5},{5,6},{6,7},{7,0}}, frontend_attributes={_xla_send_recv_validation="{{0,7},{1,8},{2,9},{3,10},{4,11},{5,12},{6,13},{7,14}}"} one = s32[] constant(1) cond_plus_1 = s32[] add(cond, one) ROOT output_tuple = (f32[], s32[]) tuple(collective-permute, cond_plus_1) } ENTRY main { param_0 = f32[] parameter(0) param_2 = s32[] constant(0) tuple = (f32[], s32[]) tuple(param_0, param_2) ROOT while = (f32[], s32[]) while(tuple), condition=condition, body=body, backend_config={"known_trip_count":{"n":"15"}} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module, ParseAndReturnVerifiedModule(kModuleString)); DoubleBufferLoopUnrolling double_buffer( DoubleBufferLoopUnrolling::UnrollStrategy::kDoubleBuffer); EXPECT_THAT(double_buffer.Run(module.get()), IsOkAndHolds(true)); VLOG(1) << module->ToString(); EXPECT_TRUE(*RunFileCheck(module->ToString(), R"( )")); } TEST_F(GpuLoopDoubleBufferTransformerTest, WhileLoopWithCollectivePermuteBackwardCycle) { const char* kModuleString = R"( HloModule loop_unrolling_no_deps condition { input_tuple = (f32[], s32[]) parameter(0) cond = s32[] get-tuple-element(input_tuple), index=1 trip_count = s32[] constant(14) ROOT done = pred[] compare(cond, trip_count), direction=LT } ar_add { Arg_1 = f32[] parameter(1) Arg_0 = f32[] parameter(0) ROOT add_ar = f32[] add(Arg_1, Arg_0) } body { input_tuple = (f32[], s32[]) parameter(0) param_0 = f32[] get-tuple-element(input_tuple), index=0 cond = s32[] get-tuple-element(input_tuple), index=1 collective-permute = f32[] collective-permute(param_0), channel_id=1, source_target_pairs={{0,7},{1,0},{2,1},{3,2},{4,3},{5,4},{6,5},{7,6}}, frontend_attributes={_xla_send_recv_validation="{{7,13},{6,12},{5,11},{4,10},{3,9},{2,8},{1,7},{0,6}}"} one = s32[] constant(1) cond_plus_1 = s32[] add(cond, one) ROOT output_tuple = (f32[], s32[]) tuple(collective-permute, cond_plus_1) } ENTRY main { param_0 = f32[] parameter(0) param_2 = s32[] constant(0) tuple = (f32[], s32[]) tuple(param_0, param_2) ROOT while = (f32[], s32[]) while(tuple), condition=condition, body=body, backend_config={"known_trip_count":{"n":"14"}} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module, ParseAndReturnVerifiedModule(kModuleString)); DoubleBufferLoopUnrolling double_buffer( DoubleBufferLoopUnrolling::UnrollStrategy::kDoubleBuffer); EXPECT_THAT(double_buffer.Run(module.get()), IsOkAndHolds(true)); EXPECT_TRUE(*RunFileCheck(module->ToString(), R"( )")); } TEST_F(GpuLoopDoubleBufferTransformerTest, WhileLoopWithCollectivePermuteBackwardCyclePeeled) { const char* kModuleString = R"( HloModule loop_unrolling_no_deps condition { input_tuple = (f32[], s32[]) parameter(0) cond = s32[] get-tuple-element(input_tuple), index=1 trip_count = s32[] constant(15) ROOT done = pred[] compare(cond, trip_count), direction=LT } ar_add { Arg_1 = f32[] parameter(1) Arg_0 = f32[] parameter(0) ROOT add_ar = f32[] add(Arg_1, Arg_0) } body { input_tuple = (f32[], s32[]) parameter(0) param_0 = f32[] get-tuple-element(input_tuple), index=0 cond = s32[] get-tuple-element(input_tuple), index=1 collective-permute = f32[] collective-permute(param_0), channel_id=1, source_target_pairs={{0,7},{1,0},{2,1},{3,2},{4,3},{5,4},{6,5},{7,6}}, frontend_attributes={_xla_send_recv_validation="{{7,14},{6,13},{5,12},{4,11},{3,10},{2,9},{1,8},{0,7}}"} one = s32[] constant(1) cond_plus_1 = s32[] add(cond, one) ROOT output_tuple = (f32[], s32[]) tuple(collective-permute, cond_plus_1) } ENTRY main { param_0 = f32[] parameter(0) param_2 = s32[] constant(0) tuple = (f32[], s32[]) tuple(param_0, param_2) ROOT while = (f32[], s32[]) while(tuple), condition=condition, body=body, backend_config={"known_trip_count":{"n":"15"}} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module, ParseAndReturnVerifiedModule(kModuleString)); DoubleBufferLoopUnrolling double_buffer( DoubleBufferLoopUnrolling::UnrollStrategy::kDoubleBuffer); EXPECT_THAT(double_buffer.Run(module.get()), IsOkAndHolds(true)); EXPECT_TRUE(*RunFileCheck(module->ToString(), R"( )")); } TEST_F(GpuLoopDoubleBufferTransformerTest, WhileLoopWithCollectivePermuteStartDone) { const char* kModuleString = R"( HloModule loop_unrolling_no_deps condition { input_tuple = (f32[], s32[]) parameter(0) cond = s32[] get-tuple-element(input_tuple), index=1 trip_count = s32[] constant(10) ROOT done = pred[] compare(cond, trip_count), direction=LT } ar_add { Arg_1 = f32[] parameter(1) Arg_0 = f32[] parameter(0) ROOT add_ar = f32[] add(Arg_1, Arg_0) } body { input_tuple = (f32[], s32[]) parameter(0) param_0 = f32[] get-tuple-element(input_tuple), index=0 cond = s32[] get-tuple-element(input_tuple), index=1 collective-permute-start = (f32[], f32[], u32[], u32[]) collective-permute-start(param_0), channel_id=1, source_target_pairs={{0,1},{1,2},{2,3},{3,0}}, frontend_attributes={_xla_send_recv_validation="{{0,6},{1,7},{2,8},{3,9}}"} collective-permute = f32[] collective-permute-done(collective-permute-start) one = s32[] constant(1) cond_plus_1 = s32[] add(cond, one) ROOT output_tuple = (f32[], s32[]) tuple(collective-permute, cond_plus_1) } ENTRY main { param_0 = f32[] parameter(0) param_2 = s32[] constant(0) tuple = (f32[], s32[]) tuple(param_0, param_2) ROOT while = (f32[], s32[]) while(tuple), condition=condition, body=body, backend_config={"known_trip_count":{"n":"10"}} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module, ParseAndReturnVerifiedModule(kModuleString)); DoubleBufferLoopUnrolling double_buffer( DoubleBufferLoopUnrolling::UnrollStrategy::kDoubleBuffer); EXPECT_THAT(double_buffer.Run(module.get()), IsOkAndHolds(true)); EXPECT_TRUE(*RunFileCheck(module->ToString(), R"( )")); } TEST_F(GpuLoopDoubleBufferTransformerTest, WhileLoopWithRecvDone) { const char* kModuleString = R"( HloModule loop_unrolling_no_deps condition { input_tuple = (f32[], s32[]) parameter(0) cond = s32[] get-tuple-element(input_tuple), index=1 trip_count = s32[] constant(10) ROOT done = pred[] compare(cond, trip_count), direction=LT } ar_add { Arg_1 = f32[] parameter(1) Arg_0 = f32[] parameter(0) ROOT add_ar = f32[] add(Arg_1, Arg_0) } body { input_tuple = (f32[], s32[]) parameter(0) param_0 = f32[] get-tuple-element(input_tuple), index=0 cond = s32[] get-tuple-element(input_tuple), index=1 after-all.0 = token[] after-all() recv.0 = (f32[], u32[], token[]) recv(after-all.0), channel_id=1, frontend_attributes={ _xla_send_recv_source_target_pairs="{{0,1},{1,2},{2,3},{3,0}}", _xla_send_recv_pipeline="0", _xla_send_recv_validation="{{0,6},{1,7},{2,8},{3,9}}" } recv-done.0 = (f32[], token[]) recv-done(recv.0), channel_id=1, frontend_attributes={ _xla_send_recv_pipeline="0" } recv-data = f32[] get-tuple-element(recv-done.0), index=0 one = s32[] constant(1) cond_plus_1 = s32[] add(cond, one) ROOT output_tuple = (f32[], s32[]) tuple(recv-data, cond_plus_1) } ENTRY main { param_0 = f32[] parameter(0) param_2 = s32[] constant(0) tuple = (f32[], s32[]) tuple(param_0, param_2) ROOT while = (f32[], s32[]) while(tuple), condition=condition, body=body, backend_config={"known_trip_count":{"n":"10"}} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module, ParseAndReturnVerifiedModule(kModuleString)); DoubleBufferLoopUnrolling double_buffer( DoubleBufferLoopUnrolling::UnrollStrategy::kDoubleBuffer); EXPECT_THAT(double_buffer.Run(module.get()), IsOkAndHolds(true)); EXPECT_TRUE(*RunFileCheck(module->ToString(), R"( )")); } TEST_F(GpuLoopDoubleBufferTransformerTest, WhileLoopWithSendDone) { const char* kModuleString = R"( HloModule loop_unrolling_no_deps condition { input_tuple = (f32[], s32[]) parameter(0) cond = s32[] get-tuple-element(input_tuple), index=1 trip_count = s32[] constant(10) ROOT done = pred[] compare(cond, trip_count), direction=LT } ar_add { Arg_1 = f32[] parameter(1) Arg_0 = f32[] parameter(0) ROOT add_ar = f32[] add(Arg_1, Arg_0) } body { input_tuple = (f32[], s32[]) parameter(0) param_0 = f32[] get-tuple-element(input_tuple), index=0 cond = s32[] get-tuple-element(input_tuple), index=1 after-all.0 = token[] after-all() send.0 = (f32[], u32[], token[]) send(param_0, after-all.0), channel_id=1, frontend_attributes={ _xla_send_recv_source_target_pairs="{{0,1},{1,2},{2,3},{3,0}}", _xla_send_recv_pipeline="0", _xla_send_recv_validation="{{0,6},{1,7},{2,8},{3,9}}" } send-done.0 = token[] send-done(send.0), channel_id=1, frontend_attributes={ _xla_send_recv_pipeline="0" } one = s32[] constant(1) cond_plus_1 = s32[] add(cond, one) ROOT output_tuple = (f32[], s32[]) tuple(param_0, cond_plus_1) } ENTRY main { param_0 = f32[] parameter(0) param_2 = s32[] constant(0) tuple = (f32[], s32[]) tuple(param_0, param_2) ROOT while = (f32[], s32[]) while(tuple), condition=condition, body=body, backend_config={"known_trip_count":{"n":"10"}} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module, ParseAndReturnVerifiedModule(kModuleString)); DoubleBufferLoopUnrolling double_buffer( DoubleBufferLoopUnrolling::UnrollStrategy::kDoubleBuffer); EXPECT_THAT(double_buffer.Run(module.get()), IsOkAndHolds(true)); EXPECT_TRUE(*RunFileCheck(module->ToString(), R"( )")); } TEST_F(GpuLoopDoubleBufferTransformerTest, WhileLoopWithTripCount1ShouldBeSkipped) { const char* const kModuleString = R"( HloModule loop_unrolling_skipped condition_nested { input_tuple = (s32[]) parameter(0) cond = s32[] get-tuple-element(input_tuple), index=0 trip_count = s32[] constant(0) ROOT done = pred[] compare(cond, trip_count), direction=LT } body_nested { input_tuple = (s32[]) parameter(0) cond = s32[] get-tuple-element(input_tuple), index=0 one = s32[] constant(1) cond_plus_1 = s32[] add(cond, one) ROOT output = (s32[]) tuple(cond_plus_1) } condition { input_tuple = (s32[]) parameter(0) cond = s32[] get-tuple-element(input_tuple), index=0 trip_count = s32[] constant(0) ROOT done = pred[] compare(cond, trip_count), direction=LT } body { input_tuple = (s32[]) parameter(0) ROOT output = (s32[]) while(input_tuple), condition=condition_nested, body=body_nested, backend_config={"known_trip_count":{"n":"1"}} } ENTRY main { param_0 = (s32[]) parameter(0) ROOT while = (s32[]) while(param_0), condition=condition, body=body, backend_config={"known_trip_count":{"n":"1"}} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<xla::HloModule> module, ParseAndReturnVerifiedModule(kModuleString)); DoubleBufferLoopUnrolling double_buffer( DoubleBufferLoopUnrolling::UnrollStrategy::kFullUnroll); EXPECT_THAT(double_buffer.Run(module.get()), IsOkAndHolds(false)); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/double_buffer_loop_unrolling.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/double_buffer_loop_unrolling_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
eae41410-587c-4d36-bdfa-6cbf3503e709
cpp
tensorflow/tensorflow
scheduling_instruction_annotator
third_party/xla/xla/service/gpu/transforms/scheduling_instruction_annotator.cc
third_party/xla/xla/service/gpu/transforms/scheduling_instruction_annotator_test.cc
#include "xla/service/gpu/transforms/scheduling_instruction_annotator.h" #include <string> #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "tsl/platform/statusor.h" namespace xla::gpu { namespace { absl::StatusOr<bool> AnnotateSchedulingInstructionNames( HloComputation& computation) { bool changed = false; for (HloInstruction* inst : computation.instructions()) { if (!inst->metadata().scheduling_name().empty()) { continue; } if (inst->opcode() == HloOpcode::kConstant) { continue; } inst->set_metadata_scheduling_name(inst->name()); changed = true; } return changed; } } absl::StatusOr<bool> SchedulingInstructionAnnotator::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { CHECK(module->has_schedule()) << "The pass is supposed to run in the beginning of post-scheduling!"; bool changed = false; for (HloComputation* computation : module->MakeComputationPostOrder(execution_threads)) { TF_ASSIGN_OR_RETURN(bool result, AnnotateSchedulingInstructionNames(*computation)); changed |= result; } return changed; } }
#include "xla/service/gpu/transforms/scheduling_instruction_annotator.h" #include <memory> #include <gtest/gtest.h> #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/tests/filecheck.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/statusor.h" namespace xla::gpu { namespace { using SchedulingInstructionAnnotatorTest = HloTestBase; TEST_F(SchedulingInstructionAnnotatorTest, AnnotatesAllInstructionsWithTheirRespectiveNames) { constexpr absl::string_view kHloString = R"( HloModule module, is_scheduled=true ENTRY entry { p0 = f32[1] parameter(0) p1 = f32[1] parameter(1) add0 = f32[1] add(p0,p1) ROOT exp0 = f32[1] exponential(add0) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloString)); SchedulingInstructionAnnotator pass; TF_ASSERT_OK_AND_ASSIGN(bool changed, pass.Run(module.get())); ASSERT_TRUE(changed); for (const auto* comp : module->computations()) { for (const auto* instruction : comp->instructions()) { EXPECT_EQ(instruction->name(), instruction->metadata().scheduling_name()); } } constexpr absl::string_view kExpected = R"( )"; TF_ASSERT_OK_AND_ASSIGN( bool filecheck_matches, RunFileCheck( module->ToString(HloPrintOptions().set_print_operand_shape(false)), kExpected)); EXPECT_TRUE(filecheck_matches); } TEST_F(SchedulingInstructionAnnotatorTest, SkipsAnnotatingConstants) { constexpr absl::string_view kHloString = R"( HloModule module, is_scheduled=true ENTRY entry { p0 = f32[1] parameter(0) c1 = f32[1] constant(42) ROOT add0 = f32[1] add(p0, c1) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloString)); SchedulingInstructionAnnotator pass; TF_ASSERT_OK_AND_ASSIGN(bool changed, pass.Run(module.get())); ASSERT_TRUE(changed); constexpr absl::string_view kExpected = R"( )"; TF_ASSERT_OK_AND_ASSIGN( bool filecheck_matches, RunFileCheck( module->ToString(HloPrintOptions().set_print_operand_shape(false)), kExpected)); EXPECT_TRUE(filecheck_matches); } TEST_F(SchedulingInstructionAnnotatorTest, DoesNotAnnotateAllInstructionsWithTheirRespectiveNames) { constexpr absl::string_view kHloString = R"( HloModule module, is_scheduled=true ENTRY entry { p0 = f32[1] parameter(0), metadata={scheduling_name="p0"} p1 = f32[1] parameter(1), metadata={scheduling_name="p1"} add0 = f32[1] add(p0,p1), metadata={scheduling_name="add0"} ROOT exp0 = f32[1] exponential(add0), metadata={scheduling_name="exp0"} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(kHloString)); SchedulingInstructionAnnotator pass; TF_ASSERT_OK_AND_ASSIGN(bool changed, pass.Run(module.get())); EXPECT_FALSE(changed); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/scheduling_instruction_annotator.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/scheduling_instruction_annotator_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
744b13d6-0162-45d3-85b9-beec62d260a6
cpp
tensorflow/tensorflow
copy_fusion
third_party/xla/xla/service/gpu/transforms/copy_fusion.cc
third_party/xla/xla/service/gpu/transforms/copy_fusion_test.cc
#include "xla/service/gpu/transforms/copy_fusion.h" #include <cstdint> #include <queue> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_set.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/gpu/gpu_fusible.h" #include "xla/service/gpu/hlo_traversal.h" #include "xla/service/gpu/ir_emission_utils.h" #include "xla/service/gpu/reduction_utils.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" namespace xla { namespace gpu { bool OnlyElementwiseOpsReachableFromParams(HloComputation* fused_computation) { std::queue<const HloInstruction*> q; absl::flat_hash_set<const HloInstruction*> visited; for (auto param : fused_computation->parameter_instructions()) { q.push(param); visited.insert(param); } while (!q.empty()) { const HloInstruction* hlo = q.front(); q.pop(); for (auto user : hlo->users()) { if ((!user->IsElementwiseOnOperand(user->operand_index(hlo)) || user->opcode() == HloOpcode::kCopy) && user->opcode() != HloOpcode::kBitcast && user->opcode() != HloOpcode::kTuple) { return false; } if (visited.insert(user).second) { q.push(user); } } } return true; } absl::StatusOr<bool> CopyFusion::DoCopyFusion(HloComputation* computation) { bool changed = false; std::vector<HloInstruction*> defs_before_uses = computation->MakeInstructionPostOrder(); for (HloInstruction* hlo : defs_before_uses) { if (hlo->opcode() != HloOpcode::kFusion) { continue; } std::vector<HloInstruction*> copies; std::vector<HloInstruction*> other_users; HloComputation* fused_computation = hlo->fused_instructions_computation(); if (!OnlyElementwiseOpsReachableFromParams(fused_computation)) { continue; } HloInstruction* root = fused_computation->root_instruction(); if (IsReductionFromOrToContiguousDimensions(*root) || root->opcode() == HloOpcode::kScatter || (hlo->IsMultiOutputFusion() && absl::c_all_of(root->operands(), [](const HloInstruction* slice) { return slice->opcode() == HloOpcode::kSlice; }))) { continue; } for (auto user : hlo->users()) { HloInstruction* copy_user = user; if (copy_user->opcode() == HloOpcode::kGetTupleElement && copy_user->user_count() == 1) { if (IsReductionFromOrToContiguousDimensions( *(root->operand(copy_user->tuple_index())))) { other_users.push_back(user); continue; } copy_user = copy_user->users()[0]; } if (copy_user->opcode() == HloOpcode::kBitcast && copy_user->user_count() == 1) { copy_user = copy_user->users()[0]; } if (copy_user->opcode() == HloOpcode::kCopy && copy_user->shape() == copy_user->operand(0)->shape() && !copy_user->shape().IsTuple() && !copy_user->HasControlDependencies()) { copies.push_back(copy_user); } else { other_users.push_back(user); } } if (copies.empty()) { continue; } auto fusion_adaptor = HloFusionAdaptor::ForComputation(fused_computation); auto dynamic_update_slices = GetOutputDefiningDynamicUpdateSlices(fusion_adaptor->GetRoots()); if (!dynamic_update_slices.empty() && (root->opcode() != HloOpcode::kTuple || dynamic_update_slices.size() == root->shape().tuple_shapes_size())) { continue; } changed = true; HloInstruction::InstructionVector tuple_elements; int64_t num_outputs = hlo->IsMultiOutputFusion() ? root->operand_count() : int64_t{1}; tuple_elements.reserve(copies.size() + num_outputs); if (hlo->IsMultiOutputFusion()) { for (HloInstruction* operand : root->operands()) { tuple_elements.push_back(operand); } } else { tuple_elements.push_back(root); } for (auto copy : copies) { HloInstruction* user = copy; std::vector<HloInstruction*> operand_chain; operand_chain.push_back(user); while (user->operand(0) != hlo) { user = user->mutable_operand(0); operand_chain.push_back(user); } HloInstruction* clone_operand = root; if (hlo->IsMultiOutputFusion()) { clone_operand = root->mutable_operand(user->tuple_index()); CHECK_EQ(operand_chain.back()->opcode(), HloOpcode::kGetTupleElement); operand_chain.pop_back(); } for (int64_t i = operand_chain.size() - 1; i >= 0; --i) { HloInstruction* user = operand_chain[i]; clone_operand = fused_computation->AddInstruction( user->CloneWithNewOperands(user->shape(), {clone_operand})); } tuple_elements.push_back(clone_operand); } HloInstruction* new_root = fused_computation->AddInstruction( HloInstruction::CreateTuple(tuple_elements)); fused_computation->set_root_instruction(new_root, true); *hlo->mutable_shape() = new_root->shape(); if (root->opcode() == HloOpcode::kTuple) { TF_RETURN_IF_ERROR(fused_computation->RemoveInstruction(root)); } else { auto get_tuple_element_root = computation->AddInstruction( HloInstruction::CreateGetTupleElement(hlo, 0)); TF_RETURN_IF_ERROR(hlo->ReplaceAllUsesWithDifferentShape( other_users, get_tuple_element_root)); } for (int64_t i = 0; i < copies.size(); ++i) { auto get_tuple_element = computation->AddInstruction( HloInstruction::CreateGetTupleElement(hlo, num_outputs + i)); TF_RETURN_IF_ERROR( computation->ReplaceInstruction(copies[i], get_tuple_element)); } } return changed; } absl::StatusOr<bool> CopyFusion::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { return DoCopyFusion(module->entry_computation()); } } }
#include "xla/service/gpu/transforms/copy_fusion.h" #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/strings/str_cat.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/tests/hlo_test_base.h" namespace xla { namespace gpu { namespace m = ::xla::match; class CopyFusionTest : public HloTestBase { public: CopyFusion cf_; }; const char kModulePrefix[] = R"( HloModule test_module scalar_add_computation { scalar_lhs.0 = f32[] parameter(0) scalar_rhs.0 = f32[] parameter(1) ROOT add.0 = f32[] add(scalar_lhs.0, scalar_rhs.0) } scalar_mul_computation { scalar_lhs.1 = f32[] parameter(0) scalar_rhs.1 = f32[] parameter(1) ROOT mul.1 = f32[] multiply(scalar_lhs.1, scalar_rhs.1) })"; TEST_F(CopyFusionTest, CopyFusionTransposeOfBroadcastedConstantTwoCopies) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_computation { two = f32[] constant(2.0) broadcast = f32[16,32]{1,0} broadcast(two), dimensions={} s.1 = f32[16,32]{1,0} sqrt(broadcast) ROOT c.1 = f32[32,16]{1,0} transpose(s.1), dimensions={1,0} } ENTRY main { fusion = f32[32,16]{1,0} fusion(), kind=kInput, calls=fused_computation copy.1 = f32[32,16]{1,0} copy(fusion) copy.2 = f32[32,16]{1,0} copy(fusion) ROOT t = (f32[32,16]{1,0}, f32[32,16]{1,0}) tuple(copy.2, copy.1) })")) .value(); ASSERT_TRUE(cf_.Run(module.get()).value()); SCOPED_TRACE(module->ToString()); const HloInstruction* root = module->entry_computation()->root_instruction(); const HloInstruction* fusion = nullptr; ASSERT_THAT(root, GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion)), m::GetTupleElement()))); EXPECT_THAT(fusion->fused_expression_root(), GmockMatch(m::Tuple(m::Transpose(), m::Copy(), m::Copy()))); } TEST_F(CopyFusionTest, CopyFusionTransposeTwoCopies) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_computation { param_0.1 = f32[16,32]{1,0} parameter(0) s.1 = f32[16,32]{1,0} sqrt(param_0.1) ROOT c.1 = f32[32,16]{1,0} transpose(s.1), dimensions={1,0} } ENTRY main { p = f32[16,32]{1,0} parameter(0) fusion = f32[32,16]{1,0} fusion(p), kind=kInput, calls=fused_computation copy.1 = f32[32,16]{1,0} copy(fusion) copy.2 = f32[32,16]{1,0} copy(fusion) ROOT t = (f32[32,16]{1,0}, f32[32,16]{1,0}) tuple(copy.2, copy.1) })")) .value(); ASSERT_FALSE(cf_.Run(module.get()).value()); } TEST_F(CopyFusionTest, CopyFusionNegateAndTwoCopies) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_computation { p1.1 = f32[128,512,28,28]{3,2,1,0} parameter(0) mul = f32[128,512,28,28]{3,2,1,0} multiply(p1.1, p1.1) ROOT neg = f32[128,512,28,28]{3,2,1,0} negate(mul) } ENTRY entry { p0 = f32[128,512,28,28]{3,2,1,0} parameter(0) fusion = f32[128,512,28,28]{3,2,1,0} fusion(p0), kind=kInput, calls=fused_computation copy.1 = f32[128,512,28,28]{3,2,1,0} copy(fusion) copy.2 = f32[128,512,28,28]{3,2,1,0} copy(fusion) ROOT root = (f32[128,512,28,28]{3,2,1,0}, f32[128,512,28,28]{3,2,1,0}) tuple(copy.1, copy.2) })")) .value(); ASSERT_TRUE(cf_.Run(module.get()).value()); SCOPED_TRACE(module->ToString()); const HloInstruction* root = module->entry_computation()->root_instruction(); const HloInstruction* fusion = nullptr; ASSERT_THAT(root, GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion)), m::GetTupleElement()))); EXPECT_THAT(fusion->fused_expression_root(), GmockMatch(m::Tuple(m::Negate(), m::Copy(), m::Copy()))); } TEST_F(CopyFusionTest, CopyFusionShouldNotRunWithReduce) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_computation { p1.1 = f32[128,512,28,28]{3,2,1,0} parameter(1) mul = f32[128,512,28,28]{3,2,1,0} multiply(p1.1, p1.1) const.1 = f32[] parameter(0) ROOT reduce.1 = f32[512]{0} reduce(mul, const.1), dimensions={0,2,3}, to_apply=scalar_add_computation } ENTRY entry { p0 = f32[] parameter(0) p1 = f32[128,512,28,28]{3,2,1,0} parameter(1) fusion = f32[512] fusion(p0, p1), kind=kInput, calls=fused_computation copy.1 = f32[512]{0} copy(fusion) copy.2 = f32[512]{0} copy(fusion) ROOT root = (f32[512]{0}, f32[512]{0}) tuple(copy.1, copy.2) })")) .value(); ASSERT_FALSE(cf_.Run(module.get()).value()); } TEST_F(CopyFusionTest, CopyFusionShouldRunWithUncopiedReduce) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_computation { two = f32[] constant(2.0) broadcast = f32[128,512,28,28]{3,2,1,0} broadcast(two) mul = f32[128,512,28,28]{3,2,1,0} multiply(broadcast, broadcast) const = f32[] constant(0.0) reduce = f32[512]{0} reduce(mul, const), dimensions={0,2,3}, to_apply=scalar_add_computation ROOT tuple = (f32[128,512,28,28]{3,2,1,0}, f32[512]{0}) tuple(mul, reduce) } ENTRY entry { fusion = (f32[128,512,28,28]{3,2,1,0}, f32[512]) fusion(), kind=kInput, calls=fused_computation gte = f32[128,512,28,28]{3,2,1,0} get-tuple-element(fusion), index=0 gte.2 = f32[512]{0} get-tuple-element(fusion), index=1 copy.1 = f32[128,512,28,28]{3,2,1,0} copy(gte) ROOT root = (f32[128,512,28,28]{3,2,1,0}, f32[512]{0}) tuple(copy.1, gte.2) })")) .value(); ASSERT_TRUE(cf_.Run(module.get()).value()); SCOPED_TRACE(module->ToString()); const HloInstruction* root = module->entry_computation()->root_instruction(); const HloInstruction* fusion = nullptr; ASSERT_THAT(root, GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion)), m::GetTupleElement()))); EXPECT_THAT(fusion->fused_expression_root(), GmockMatch(m::Tuple(m::Multiply(), m::Reduce(), m::Copy()))); } TEST_F(CopyFusionTest, CopyFusionShouldNotFuseForSliceMultioutputFusion) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_computation { p1 = f32[128,512,28,28]{3,2,1,0} parameter(0) mul = f32[128,512,28,28]{3,2,1,0} multiply(p1, p1) slice1 = f32[128,100,28,28]{3,2,1,0} slice(mul), slice={[0:128],[0:100],[0:28],[0:28]} slice2 = f32[128,200,28,28]{3,2,1,0} slice(mul), slice={[0:128],[50:250],[0:28],[0:28]} ROOT tuple = (f32[128,100,28,28]{3,2,1,0}, f32[128,200,28,28]{3,2,1,0}) tuple(slice1, slice2) } ENTRY entry { p1 = f32[128,512,28,28]{3,2,1,0} parameter(0) ROOT fusion = (f32[128,100,28,28]{3,2,1,0}, f32[128,200,28,28]{3,2,1,0}) fusion(p1), kind=kInput, calls=fused_computation })")) .value(); ASSERT_FALSE(cf_.Run(module.get()).value()); } TEST_F(CopyFusionTest, CopyFusionShouldNotRunWithScatter) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_computation { p0 = f32[50,49,48,47,46]{4,3,2,1,0} parameter(0) scatter_indices = s64[10,9,8,7,5]{4,3,2,1,0} parameter(1) updates = f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0} parameter(2) input_tensor = f32[50,49,48,47,46]{4,3,2,1,0} negate(p0) ROOT %scatter = f32[50,49,48,47,46]{4,3,2,1,0} scatter(input_tensor, scatter_indices, updates), update_window_dims={4,5,6,7,8}, inserted_window_dims={}, scatter_dims_to_operand_dims={0,1,2,3,4}, index_vector_dim=4, to_apply=scalar_add_computation } ENTRY entry { param.0 = f32[50,49,48,47,46]{4,3,2,1,0} parameter(0) param.1 = s64[10,9,8,7,5]{4,3,2,1,0} parameter(1) param.2 = f32[10,9,8,7,30,29,28,27,26]{8,7,6,5,4,3,2,1,0} parameter(2) fusion = f32[50,49,48,47,46]{4,3,2,1,0} fusion(param.0, param.1, param.2), kind=kInput, calls=fused_computation ROOT copy = f32[50,49,48,47,46]{4,3,2,1,0} copy(fusion) })")) .value(); ASSERT_FALSE(cf_.Run(module.get()).value()); } TEST_F(CopyFusionTest, CopyFusionShouldNotRunOutsideEntryComputation) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_computation.549 { param_0.8511 = bf16[15,1,2,2048,48,128]{3,5,4,2,1,0} parameter(0) bitcast.52601 = bf16[15,1,2,48,128,2048]{5,4,3,2,1,0} bitcast(param_0.8511) slice = bf16[15,1,2,48,128,1]{5,4,3,2,1,0} slice(bitcast.52601), slice={[0:15:1], [0:1:1], [0:2:1], [0:48:1], [0:128:1], [0:1:1]} bitcast = bf16[15,1,2,48,128]{4,3,2,1,0} bitcast(slice) ROOT broadcast = bf16[15,1,2,48,128,2048]{5,4,3,2,1,0} broadcast(bitcast), dimensions={0,1,2,3,4} } condition { constant_6915 = s32[] constant(15) param.218 = (bf16[15,1,2,2048,48,128]{3,5,4,2,1,0}, s32[]) parameter(0) get-tuple-element.3714 = s32[] get-tuple-element(param.218), index=1 ROOT compare.1738 = pred[] compare(get-tuple-element.3714, constant_6915), direction=LT } body { tuple_param = (bf16[15,1,2,2048,48,128]{3,5,4,2,1,0}, s32[]) parameter(0) param_0 = bf16[15,1,2,2048,48,128]{3,5,4,2,1,0} get-tuple-element(tuple_param), index=0 param_1 = s32[] get-tuple-element(tuple_param), index=1 fusion.549 = bf16[15,1,2,48,128,2048]{5,4,3,2,1,0} fusion(param_0), kind=kLoop, calls=fused_computation.549 bitcast = bf16[15,1,2,2048,48,128]{3,5,4,2,1,0} bitcast(fusion.549) copy = bf16[15,1,2,2048,48,128]{3,5,4,2,1,0} copy(bitcast) constant_one = s32[] constant(1) add = s32[] add(param_1, constant_one), control-predecessors={fusion.549} ROOT tuple = (bf16[15,1,2,2048,48,128]{3,5,4,2,1,0}, s32[]) tuple(copy, add) } ENTRY main { param_0 = bf16[15,1,2,2048,48,128]{3,5,4,2,1,0} parameter(0) zero = s32[] constant(0) copy.0 = bf16[15,1,2,2048,48,128]{3,5,4,2,1,0} copy(param_0) copy.1 = s32[] copy(zero) tuple = tuple(copy.0, copy.1) ROOT while = (bf16[15,1,2,2048,48,128]{3,5,4,2,1,0}, s32[]) while(tuple), condition=condition, body=body, backend_config="{\"known_trip_count\":{\"n\":\"15\"}}" })")) .value(); ASSERT_FALSE(cf_.Run(module.get()).value()); } TEST_F(CopyFusionTest, CopyFusionShouldNotRunWithDynamicUpdateSliceInplace) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_computation { p.0 = f16[50,96,1024]{2,1,0} parameter(0) p.1 = f16[1,96,1024]{2,1,0} parameter(1) c.0 = s32[3]{0} constant({0, 0, 0}) ROOT %dynamic-update-slice = f16[50,96,1024]{2,1,0} dynamic-update-slice(p.0, p.1, c.0) } ENTRY entry { p0 = f16[50,96,1024]{2,1,0} parameter(0) p1 = f16[1,96,1024]{2,1,0} parameter(1) fusion = f16[50,96,1024]{2,1,0} fusion(p0, p1), kind=kInput, calls=fused_computation copy.1 = f16[50,96,1024]{2,1,0} copy(fusion) copy.2 = f16[50,96,1024]{2,1,0} copy(fusion) ROOT root = (f16[50,96,1024]{2,1,0}, f16[50,96,1024]{2,1,0}) tuple(copy.1, copy.2) })")) .value(); ASSERT_FALSE(cf_.Run(module.get()).value()); } TEST_F(CopyFusionTest, CopyFusionWithDynamicUpdateSliceNotInplace) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_computation { one = f32[] constant(1.0) zero = f32[] constant(0.0) p.0 = f16[50,96,1024]{2,1,0} broadcast(one), dimensions={} p.1 = f16[1,96,1024]{2,1,0} broadcast(zero), dimensions={} c.0 = s32[3]{0} constant({0, 0, 0}) dynamic-update-slice = f16[50,96,1024]{2,1,0} dynamic-update-slice(p.0, p.1, c.0) neg = f16[50,96,1024]{2,1,0} negate(dynamic-update-slice) ROOT tuple = (f16[50,96,1024]{2,1,0}, f16[50,96,1024]{2,1,0}) tuple(dynamic-update-slice, neg) } ENTRY entry { fusion = (f16[50,96,1024]{2,1,0}, f16[50,96,1024]{2,1,0}) fusion(), kind=kInput, calls=fused_computation gte.0 = f16[50,96,1024]{2,1,0} get-tuple-element(fusion), index=0 gte.1 = f16[50,96,1024]{2,1,0} get-tuple-element(fusion), index=1 bitcast = f16[1,50,96,1024]{3,2,1,0} bitcast(gte.0) copy = f16[1,50,96,1024]{3,2,1,0} copy(bitcast) ROOT root = (f16[1,50,96,1024]{3,2,1,0}, f16[50,96,1024]{2,1,0}) tuple(copy, gte.1) })")) .value(); ASSERT_TRUE(cf_.Run(module.get()).value()); SCOPED_TRACE(module->ToString()); const HloInstruction* root = module->entry_computation()->root_instruction(); const HloInstruction* fusion = nullptr; ASSERT_THAT(root, GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion)), m::GetTupleElement()))); EXPECT_THAT( fusion->fused_expression_root(), GmockMatch(m::Tuple(m::DynamicUpdateSlice(), m::Negate(), m::Copy()))); } TEST_F(CopyFusionTest, CopyFusionTransposeAndThreeCopies) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_computation { two = f32[] constant(2.0) param_0.1 = f32[16,32]{1,0} broadcast(two), dimensions={} s.1 = f32[16,32]{1,0} sqrt(param_0.1) ROOT c.1 = f32[32,16]{1,0} transpose(s.1), dimensions={1,0} } ENTRY entry { fusion = f32[32,16]{1,0} fusion(), kind=kInput, calls=fused_computation copy.1 = f32[32,16]{1,0} copy(fusion) copy.2 = f32[32,16]{1,0} copy(fusion) copy.3 = f32[32,16]{1,0} copy(fusion) ROOT root = (f32[32,16]{1,0}, f32[32,16]{1,0}, f32[32,16]{1,0}) tuple(copy.1, copy.2, copy.3) })")) .value(); ASSERT_TRUE(cf_.Run(module.get()).value()); SCOPED_TRACE(module->ToString()); const HloInstruction* root = module->entry_computation()->root_instruction(); const HloInstruction* fusion = nullptr; ASSERT_THAT(root, GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion)), m::GetTupleElement(), m::GetTupleElement()))); EXPECT_THAT( fusion->fused_expression_root(), GmockMatch(m::Tuple(m::Transpose(), m::Copy(), m::Copy(), m::Copy()))); } TEST_F(CopyFusionTest, CopyFusionRunWithOnlyOneCopy) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_computation { p1.1 = f32[128,512,28,28]{3,2,1,0} parameter(0) mul = f32[128,512,28,28]{3,2,1,0} multiply(p1.1, p1.1) ROOT neg = f32[128,512,28,28]{3,2,1,0} negate(mul) } ENTRY entry { p0 = f32[128,512,28,28]{3,2,1,0} parameter(0) fusion = f32[128,512,28,28]{3,2,1,0} fusion(p0), kind=kInput, calls=fused_computation ROOT copy.1 = f32[128,512,28,28]{3,2,1,0} copy(fusion) })")) .value(); ASSERT_TRUE(cf_.Run(module.get()).value()); SCOPED_TRACE(module->ToString()); const HloInstruction* root = module->entry_computation()->root_instruction(); const HloInstruction* fusion = nullptr; ASSERT_THAT(root, GmockMatch(m::GetTupleElement(m::Fusion(&fusion)))); EXPECT_THAT(fusion->fused_expression_root(), GmockMatch(m::Tuple(m::Negate(), m::Copy()))); } TEST_F(CopyFusionTest, CopyFusionNegateAndTwoCopiesAndTransposeCopy) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_computation { p1.1 = f32[128,512,28,28]{3,2,1,0} parameter(0) mul = f32[128,512,28,28]{3,2,1,0} multiply(p1.1, p1.1) ROOT neg = f32[128,512,28,28]{3,2,1,0} negate(mul) } ENTRY entry { p0 = f32[128,512,28,28]{3,2,1,0} parameter(0) fusion = f32[128,512,28,28]{3,2,1,0} fusion(p0), kind=kInput, calls=fused_computation copy.1 = f32[128,512,28,28]{3,2,1,0} copy(fusion) transpose = f32[128,512,28,28]{2,3,0,1} copy(fusion) bitcast = f32[512,128,28,28]{3,2,1,0} bitcast(transpose) copy.2 = f32[128,512,28,28]{3,2,1,0} copy(fusion) ROOT root = (f32[128,512,28,28]{3,2,1,0}, f32[512,128,28,28]{3,2,1,0}, f32[128,512,28,28]{3,2,1,0}) tuple(copy.1, bitcast, copy.2) })")) .value(); ASSERT_TRUE(cf_.Run(module.get()).value()); SCOPED_TRACE(module->ToString()); const HloInstruction* root = module->entry_computation()->root_instruction(); const HloInstruction* fusion = nullptr; ASSERT_THAT(root, GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion)), m::Bitcast(), m::GetTupleElement()))); EXPECT_THAT(fusion->fused_expression_root(), GmockMatch(m::Tuple(m::Negate(), m::Copy(), m::Copy()))); } TEST_F(CopyFusionTest, CopyFusionRunWithOnlyOneNonTransposeCopy) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_computation { p1.1 = f32[128,512,28,28]{3,2,1,0} parameter(0) mul = f32[128,512,28,28]{3,2,1,0} multiply(p1.1, p1.1) ROOT neg = f32[128,512,28,28]{3,2,1,0} negate(mul) } ENTRY entry { p0 = f32[128,512,28,28]{3,2,1,0} parameter(0) fusion = f32[128,512,28,28]{3,2,1,0} fusion(p0), kind=kInput, calls=fused_computation copy.1 = f32[128,512,28,28]{3,2,1,0} copy(fusion) transpose.1 = f32[128,512,28,28]{2,3,0,1} copy(fusion) bitcast.1 = f32[512,128,28,28]{3,2,1,0} bitcast(transpose.1) transpose.2 = f32[128,512,28,28]{2,3,0,1} copy(fusion) bitcast.2 = f32[512,128,28,28]{3,2,1,0} bitcast(transpose.2) ROOT root = (f32[128,512,28,28]{3,2,1,0}, f32[512,128,28,28]{3,2,1,0}, f32[512,128,28,28]{3,2,1,0}) tuple(copy.1, bitcast.1, bitcast.2) })")) .value(); ASSERT_TRUE(cf_.Run(module.get()).value()); SCOPED_TRACE(module->ToString()); const HloInstruction* root = module->entry_computation()->root_instruction(); const HloInstruction* fusion = nullptr; ASSERT_THAT(root, GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion)), m::Bitcast(), m::Bitcast()))); EXPECT_THAT(fusion->fused_expression_root(), GmockMatch(m::Tuple(m::Negate(), m::Copy()))); } TEST_F(CopyFusionTest, CopyFusionSkipTupleCopies) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_computation { p1.1 = f32[128,512,28,28]{3,2,1,0} parameter(0) mul = f32[128,512,28,28]{3,2,1,0} multiply(p1.1, p1.1) neg.1 = f32[128,512,28,28]{3,2,1,0} negate(mul) neg.2 = f32[128,512,28,28]{3,2,1,0} negate(mul) ROOT tuple = (f32[128,512,28,28]{3,2,1,0}, f32[128,512,28,28]{3,2,1,0}) tuple(neg.1, neg.2) } ENTRY entry { p0 = f32[128,512,28,28]{3,2,1,0} parameter(0) fusion = (f32[128,512,28,28]{3,2,1,0}, f32[128,512,28,28]{3,2,1,0}) fusion(p0), kind=kInput, calls=fused_computation copy.1 = (f32[128,512,28,28]{3,2,1,0}, f32[128,512,28,28]{3,2,1,0}) copy(fusion) copy.2 = (f32[128,512,28,28]{3,2,1,0}, f32[128,512,28,28]{3,2,1,0}) copy(fusion) ROOT root = ((f32[128,512,28,28]{3,2,1,0}, f32[128,512,28,28]{3,2,1,0}),(f32[128,512,28,28]{3,2,1,0}, f32[128,512,28,28]{3,2,1,0})) tuple(copy.1, copy.2) })")) .value(); ASSERT_FALSE(cf_.Run(module.get()).value()); } TEST_F(CopyFusionTest, CopyFusionTupleAndGetTuple) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_computation { p1.1 = f32[128,512,28,28]{3,2,1,0} parameter(0) mul = f32[128,512,28,28]{3,2,1,0} multiply(p1.1, p1.1) neg.1 = f32[128,512,28,28]{3,2,1,0} negate(mul) neg.2 = f32[128,512,28,28]{3,2,1,0} negate(mul) ROOT tuple = (f32[128,512,28,28]{3,2,1,0}, f32[128,512,28,28]{3,2,1,0}) tuple(neg.1, neg.2) } ENTRY entry { p0 = f32[128,512,28,28]{3,2,1,0} parameter(0) fusion = (f32[128,512,28,28]{3,2,1,0}, f32[128,512,28,28]{3,2,1,0}) fusion(p0), kind=kInput, calls=fused_computation gte.1 = f32[128,512,28,28]{3,2,1,0} get-tuple-element(fusion), index=0 gte.2 = f32[128,512,28,28]{3,2,1,0} get-tuple-element(fusion), index=1 copy.1 = f32[128,512,28,28]{3,2,1,0} copy(gte.1) copy.2 = f32[128,512,28,28]{3,2,1,0} copy(gte.2) ROOT root = (f32[128,512,28,28]{3,2,1,0}, f32[128,512,28,28]{3,2,1,0}) tuple(copy.1, copy.2) })")) .value(); ASSERT_TRUE(cf_.Run(module.get()).value()); SCOPED_TRACE(module->ToString()); const HloInstruction* root = module->entry_computation()->root_instruction(); const HloInstruction* fusion = nullptr; ASSERT_THAT(root, GmockMatch(m::Tuple(m::GetTupleElement(m::Fusion(&fusion)), m::GetTupleElement()))); EXPECT_THAT( fusion->fused_expression_root(), GmockMatch(m::Tuple(m::Negate(), m::Negate(), m::Copy(), m::Copy()))); } TEST_F(CopyFusionTest, CopyFusionWithFusionReturningTupleAndOtherUser) { auto module = ParseAndReturnVerifiedModule(absl::StrCat(kModulePrefix, R"( fused_computation { p1.1 = f32[128,512,28,28]{3,2,1,0} parameter(0) mul = f32[128,512,28,28]{3,2,1,0} multiply(p1.1, p1.1) neg.1 = f32[128,512,28,28]{3,2,1,0} negate(mul) neg.2 = f32[128,512,28,28]{3,2,1,0} negate(mul) ROOT tuple = (f32[128,512,28,28]{3,2,1,0}, f32[128,512,28,28]{3,2,1,0}) tuple(neg.1, neg.2) } ENTRY entry { p0 = f32[128,512,28,28]{3,2,1,0} parameter(0) fusion = (f32[128,512,28,28]{3,2,1,0}, f32[128,512,28,28]{3,2,1,0}) fusion(p0), kind=kInput, calls=fused_computation gte.1 = f32[128,512,28,28]{3,2,1,0} get-tuple-element(fusion), index=0 gte.2 = f32[128,512,28,28]{3,2,1,0} get-tuple-element(fusion), index=1 copy.1 = f32[128,512,28,28]{3,2,1,0} copy(gte.1) copy.2 = f32[128,512,28,28]{3,2,1,0} copy(gte.2) transpose = f32[128,512,28,28]{2,3,0,1} copy(gte.1) bitcast = f32[512,128,28,28]{3,2,1,0} bitcast(transpose) ROOT root = (f32[128,512,28,28]{3,2,1,0}, f32[512,128,28,28]{3,2,1,0}, f32[128,512,28,28]{3,2,1,0}) tuple(copy.1, bitcast, copy.2) })")) .value(); ASSERT_TRUE(cf_.Run(module.get()).value()); SCOPED_TRACE(module->ToString()); const HloInstruction* root = module->entry_computation()->root_instruction(); const HloInstruction* fusion = nullptr; ASSERT_THAT(root, GmockMatch(m::Tuple(m::Copy(), m::Bitcast(), m::GetTupleElement(m::Fusion(&fusion))))); EXPECT_THAT(fusion->fused_expression_root(), GmockMatch(m::Tuple(m::Negate(), m::Negate(), m::Copy()))); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/copy_fusion.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/copy_fusion_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
9b2eab20-5371-4a38-8c1d-c212f158648d
cpp
tensorflow/tensorflow
alias_passthrough_params
third_party/xla/xla/service/gpu/transforms/alias_passthrough_params.cc
third_party/xla/xla/service/gpu/transforms/alias_passthrough_params_test.cc
#include "xla/service/gpu/transforms/alias_passthrough_params.h" #include <cstdint> #include "absl/container/flat_hash_set.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/shape_util.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" namespace xla { namespace gpu { absl::StatusOr<bool> AliasPassthroughParams::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { const HloInstruction* root = module->entry_computation()->root_instruction(); if (module->entry_computation()->num_parameters() == 0 || root->opcode() != HloOpcode::kTuple) { return false; } bool changed = false; absl::flat_hash_set<int64_t> used_params; for (int64_t i = 0; i < root->operand_count(); ++i) { if (root->operand(i)->opcode() == HloOpcode::kParameter && used_params.count(root->operand(i)->parameter_number()) == 0) { VLOG(2) << "Parameter " << root->operand(i)->parameter_number() << " with shape " << root->operand(i)->shape().ToString() << " in module " << module->name() << " is passed-through to root tuple element " << i << ": " << root->shape().ToString(); if (module->input_output_alias_config().OutputHasAlias({i}) || module->input_output_alias_config().ParameterHasAlias( root->operand(i)->parameter_number(), {})) { VLOG(2) << "Skip setting the above pass-through alias as an alias may" << " have been set up for alising resource update."; continue; } TF_RETURN_IF_ERROR(module->input_output_alias_config().SetUpAlias( {i}, root->operand(i)->parameter_number(), {})); used_params.insert(root->operand(i)->parameter_number()); changed = true; } } return changed; } } }
#include "xla/service/gpu/transforms/alias_passthrough_params.h" #include "xla/tests/hlo_test_base.h" #include "xla/tsl/lib/core/status_test_util.h" #include "tsl/platform/test.h" namespace xla { namespace gpu { class AliasPassthroughParamsTest : public HloTestBase {}; TEST_F(AliasPassthroughParamsTest, AliasPassThroughParams) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { p0 = f16[2048,1024] parameter(0) p1 = f16[2048,1024] parameter(1) sum = f16[2048,1024] add(p0, p1) ROOT root = (f16[2048,1024], f16[2048,1024], f16[2048,1024]) tuple(p0, sum, p1) })") .value(); EXPECT_TRUE(AliasPassthroughParams().Run(module.get()).value()); const auto& alias_config = module->input_output_alias_config(); EXPECT_EQ(0, alias_config.GetAliasedParameter({0})->parameter_number); EXPECT_FALSE(alias_config.OutputHasAlias({1})); EXPECT_EQ(1, alias_config.GetAliasedParameter({2})->parameter_number); } TEST_F(AliasPassthroughParamsTest, DoNotAliasPassThroughParamsMoreThanOnce) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { p0 = f16[2048,1024] parameter(0) ROOT root = (f16[2048,1024], f16[2048,1024]) tuple(p0, p0) })") .value(); EXPECT_TRUE(AliasPassthroughParams().Run(module.get()).value()); const auto& alias_config = module->input_output_alias_config(); EXPECT_EQ(0, alias_config.GetAliasedParameter({0})->parameter_number); EXPECT_FALSE(alias_config.OutputHasAlias({1})); } TEST_F(AliasPassthroughParamsTest, PresetAliases) { auto module = ParseAndReturnVerifiedModule(R"( HloModule TestModule ENTRY TestComputation { p0 = f16[2048,1024] parameter(0) p1 = f16[2048,1024] parameter(1) sum = f16[2048,1024] add(p0, p1) ROOT root = (f16[2048,1024], f16[2048,1024], f16[2048,1024]) tuple(p0, sum, p1) })") .value(); auto& preset_alias = module->input_output_alias_config(); TF_EXPECT_OK(preset_alias.SetUpAlias({1}, 0, {})); EXPECT_TRUE(AliasPassthroughParams().Run(module.get()).value()); const auto& alias_result = module->input_output_alias_config(); EXPECT_EQ(1, alias_result.GetAliasedParameter({2})->parameter_number); EXPECT_FALSE(alias_result.OutputHasAlias({0})); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/alias_passthrough_params.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/alias_passthrough_params_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
cee74dcb-bf1f-4997-86e7-5f7c7ff793fa
cpp
tensorflow/tensorflow
sort_rewriter
third_party/xla/xla/service/gpu/transforms/sort_rewriter.cc
third_party/xla/xla/service/gpu/transforms/sort_rewriter_test.cc
#include "xla/service/gpu/transforms/sort_rewriter.h" #include <algorithm> #include <cstdint> #include <memory> #include <optional> #include <utility> #include <vector> #include "absl/container/flat_hash_set.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/comparison_util.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/gpu/cublas_cudnn.h" #include "xla/service/gpu/runtime/cub_sort_thunk.h" #include "xla/service/stable_sort_expander.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { struct SortComputationAnalysis { int key_operand; bool descending; }; std::pair<int64_t, int64_t> ParametersFromCmpOperands( const HloCompareInstruction* cmp_op) { if (cmp_op == nullptr) { return std::pair<int64_t, int64_t>(-1, -1); } const HloParameterInstruction* param0 = DynCast<HloParameterInstruction>(cmp_op->operand(0)); const HloParameterInstruction* param1 = DynCast<HloParameterInstruction>(cmp_op->operand(1)); return (param0 && param1) ? std::make_pair(param0->parameter_number(), param1->parameter_number()) : std::pair<int64_t, int64_t>(-1, -1); } std::optional<SortComputationAnalysis> AnalyzeCompareOp( const HloInstruction* maybe_compare_op) { const HloCompareInstruction* compare = DynCast<HloCompareInstruction>(maybe_compare_op); if (compare == nullptr || compare->direction() == ComparisonDirection::kEq || compare->direction() == ComparisonDirection::kNe) { return std::nullopt; } auto [index0, index1] = ParametersFromCmpOperands(compare); if (index0 == -1 || index1 == -1) { return std::nullopt; } int first_index = std::min(index0, index1); if (first_index % 2 != 0 || std::max(index0, index1) != first_index + 1) { return std::nullopt; } bool descending = compare->direction() == ComparisonDirection::kGt || compare->direction() == ComparisonDirection::kGe; bool reverse = first_index != index0; return SortComputationAnalysis{first_index / 2, descending != reverse}; } std::optional<SortComputationAnalysis> AnalyzeComplexSortComputation( const HloSortInstruction& sort_op) { auto computation = sort_op.called_computations().front(); if (computation->num_parameters() != 4) { return std::nullopt; } int64_t iota_operand_index = StableSortExpander::IotaOperandIndexForStableSort(sort_op); if (iota_operand_index < 0) { return std::nullopt; } auto root = computation->root_instruction(); if (root->opcode() != HloOpcode::kSelect) { return std::nullopt; } auto iota_cmp = DynCast<HloCompareInstruction>(root->operand(1)); auto [iotap0, iotap1] = ParametersFromCmpOperands(iota_cmp); if (iota_cmp == nullptr || iota_cmp->direction() != ComparisonDirection::kLt || iotap0 != iota_operand_index * 2 || iotap1 != iota_operand_index * 2 + 1) { return std::nullopt; } auto eq_cmp = DynCast<HloCompareInstruction>(root->operand(0)); if (eq_cmp == nullptr || eq_cmp->direction() != ComparisonDirection::kEq) { return std::nullopt; } auto [p0, p1] = ParametersFromCmpOperands(eq_cmp); if (p0 < 0 || p1 < 0) { auto cmp = DynCast<HloCompareInstruction>(eq_cmp->operand(0)); auto cmp_reverse = DynCast<HloCompareInstruction>(eq_cmp->operand(1)); auto [a, b] = ParametersFromCmpOperands(cmp); auto [p, q] = ParametersFromCmpOperands(cmp_reverse); if (cmp == nullptr || cmp_reverse == nullptr || a < 0 || b < 0 || a != q || b != p || cmp->direction() != cmp_reverse->direction() || cmp->direction() == Comparison::Direction::kEq || cmp->direction() == Comparison::Direction::kNe) { return std::nullopt; } } return AnalyzeCompareOp(root->operand(2)); } std::optional<SortComputationAnalysis> AnalyzeSortOp( const HloSortInstruction& sort_op) { auto computation = sort_op.called_computations().front(); auto result = AnalyzeCompareOp(computation->root_instruction()); if (!result.has_value()) { result = AnalyzeComplexSortComputation(sort_op); } return result; } absl::StatusOr<std::unique_ptr<CubSortRunnerInterface>> CreateRunner( HloSortInstruction* sort_op, const SortComputationAnalysis& sort_config) { int value_index = 1 - sort_config.key_operand; return CubSortRunnerInterface::Create( sort_op->operand(sort_config.key_operand)->shape().element_type(), sort_op->operand_count() == 2 ? std::optional(sort_op->operand(value_index)->shape().element_type()) : std::nullopt); } bool IsCubCompatibleSort(HloSortInstruction* sort_op) { VLOG(1) << "Sort instruction: " << sort_op->name(); if (sort_op->operand_count() != 1 && sort_op->operand_count() != 2) { VLOG(2) << "Unsupported operand count: " << sort_op->operand_count(); return false; } const Shape& operand_shape = sort_op->operand(0)->shape(); if (sort_op->sort_dimension() != operand_shape.rank() - 1) { VLOG(2) << "Sort dimension should be the minor one"; return false; } if (Product(operand_shape.dimensions()) < SortRewriter::SortSizeThreshold()) { VLOG(2) << "Tensor shape size is too small to see an improvement"; return false; } auto sort_config = AnalyzeSortOp(*sort_op); if (!sort_config.has_value()) { VLOG(2) << "Only simple compare computations are supported"; return false; } if (!CreateRunner(sort_op, *sort_config).ok()) { VLOG(2) << "Unsupported operand types (no compiled CUB kernels)"; return false; } VLOG(2) << "Sort operation is compatible"; return true; } HloInstruction* UnpackResultPair(HloSortInstruction* sort_op, HloInstruction* custom_call, bool swap) { HloComputation* parent = sort_op->parent(); HloInstruction* gte0 = parent->AddInstruction(HloInstruction::CreateGetTupleElement( sort_op->operand(0)->shape(), custom_call, swap ? 1 : 0)); HloInstruction* gte1 = parent->AddInstruction(HloInstruction::CreateGetTupleElement( sort_op->operand(1)->shape(), custom_call, swap ? 0 : 1)); return parent->AddInstruction(HloInstruction::CreateTuple({gte0, gte1})); } } absl::StatusOr<bool> SortRewriter::RunOnInstruction( HloSortInstruction* sort_op) { SortComputationAnalysis sort_config = AnalyzeSortOp(*sort_op).value(); const Shape& operand_shape = sort_op->operand(0)->shape(); int64_t batch_size = Product(operand_shape.dimensions()) / operand_shape.dimensions(sort_op->sort_dimension()); TF_ASSIGN_OR_RETURN(auto runner, CreateRunner(sort_op, sort_config)); TF_ASSIGN_OR_RETURN( int64_t scratch_size, runner->GetScratchSize(Product(operand_shape.dimensions()), batch_size)); if (batch_size > 1) { scratch_size += sizeof(int) - scratch_size % sizeof(int); scratch_size += (batch_size + 1) * sizeof(int); } HloInstruction* keys = sort_op->mutable_operand(0); HloInstruction* values = nullptr; if (sort_op->operand_count() == 2) { values = sort_op->mutable_operand(1); if (sort_config.key_operand == 1) { std::swap(keys, values); } } std::vector<Shape> shapes{keys->shape()}; std::vector<HloInstruction*> operands{keys}; if (values != nullptr) { shapes.push_back(values->shape()); operands.push_back(values); } shapes.push_back(ShapeUtil::MakeShape(U8, {scratch_size})); Shape call_shape = ShapeUtil::MakeTupleShape(absl::MakeSpan(shapes)); HloInstruction* custom_call = sort_op->parent()->AddInstruction(HloInstruction::CreateCustomCall( call_shape, absl::MakeSpan(operands), kCubDeviceRadixSortTarget)); xla::SortOptions backend_config; backend_config.set_descending(sort_config.descending); TF_RETURN_IF_ERROR(custom_call->set_backend_config(backend_config)); HloInstruction* replacement; if (sort_op->operand_count() == 1) { replacement = sort_op->parent()->AddInstruction(HloInstruction::CreateGetTupleElement( sort_op->shape(), custom_call, 0)); } else { replacement = UnpackResultPair(sort_op, custom_call, sort_config.key_operand == 1); } TF_RETURN_IF_ERROR( sort_op->parent()->ReplaceInstruction(sort_op, replacement)); return true; } absl::StatusOr<bool> SortRewriter::RunOnComputation( HloComputation* computation) { std::vector<HloSortInstruction*> sort_ops; for (auto* inst : computation->instructions()) { HloSortInstruction* sort = DynCast<HloSortInstruction>(inst); if (sort != nullptr && IsCubCompatibleSort(sort)) { sort_ops.push_back(sort); } } bool changed = false; for (auto* sort : sort_ops) { TF_ASSIGN_OR_RETURN(bool result, RunOnInstruction(sort)); changed |= result; } return changed; } absl::StatusOr<bool> SortRewriter::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { XLA_VLOG_LINES(2, "SortRewriter::Run(), before:\n" + module->ToString()); bool changed = false; for (HloComputation* computation : module->MakeNonfusionComputations(execution_threads)) { TF_ASSIGN_OR_RETURN(bool result, RunOnComputation(computation)); changed |= result; } XLA_VLOG_LINES(2, "SortRewriter::Run(), after:\n" + module->ToString()); return changed; } } }
#include "xla/service/gpu/transforms/sort_rewriter.h" #include <utility> #include <gtest/gtest.h> #include "xla/error_spec.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/service/gpu/cublas_cudnn.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/tests/hlo_test_base.h" #include "xla/xla_data.pb.h" #include "tsl/platform/statusor.h" #include "tsl/platform/test.h" namespace xla { namespace gpu { namespace { namespace m = ::xla::match; class SortRewriterTest : public HloTestBase { public: void SetUp() override { HloTestBase::SetUp(); SortRewriter::SetSortSizeThresholdForTestingOnly(1000); } bool RunModuleAndPass(HloModule* module) { auto cloned = module->Clone(); bool changed = SortRewriter().Run(module).value(); if (changed) { EXPECT_TRUE(RunAndCompare(std::move(cloned), ErrorSpec{0, 0})); } return changed; } void ExpectDirection(const HloInstruction* instruction, bool descending) { auto config = instruction->backend_config<xla::SortOptions>(); EXPECT_EQ(config->descending(), descending); } }; TEST_F(SortRewriterTest, SortKeysLessThan) { constexpr char kHlo[] = R"( HloModule TestModule %compare { %lhs = f32[] parameter(0) %rhs = f32[] parameter(1) ROOT %lt = pred[] compare(%lhs, %rhs), direction=LT } ENTRY %main { %input = f32[1000] parameter(0) ROOT %sort = f32[1000] sort(%input), dimensions={0}, to_apply=%compare })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo)); EXPECT_TRUE(RunModuleAndPass(module.get())); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(m::GetTupleElement( m::CustomCall({kCubDeviceRadixSortTarget}, m::Parameter()), 0))); ExpectDirection(module->entry_computation()->root_instruction()->operand(0), false); } TEST_F(SortRewriterTest, SortKeysGreaterThan) { constexpr char kHlo[] = R"( HloModule TestModule %compare { %lhs = f32[] parameter(0) %rhs = f32[] parameter(1) ROOT %gt = pred[] compare(%lhs, %rhs), direction=GT } ENTRY %main { %input = f32[1000] parameter(0) ROOT %sort = f32[1000] sort(%input), dimensions={0}, to_apply=%compare })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo)); EXPECT_TRUE(RunModuleAndPass(module.get())); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(m::GetTupleElement( m::CustomCall({kCubDeviceRadixSortTarget}, m::Parameter()), 0))); ExpectDirection(module->entry_computation()->root_instruction()->operand(0), true); } TEST_F(SortRewriterTest, SortKeysGreaterThanSwapped) { constexpr char kHlo[] = R"( HloModule TestModule %compare { %lhs = f32[] parameter(1) %rhs = f32[] parameter(0) ROOT %gt = pred[] compare(%lhs, %rhs), direction=GT } ENTRY %main { %input = f32[1000] parameter(0) ROOT %sort = f32[1000] sort(%input), dimensions={0}, to_apply=%compare })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo)); EXPECT_TRUE(RunModuleAndPass(module.get())); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(m::GetTupleElement( m::CustomCall({kCubDeviceRadixSortTarget}, m::Parameter()), 0))); ExpectDirection(module->entry_computation()->root_instruction()->operand(0), false); } TEST_F(SortRewriterTest, SortPairs) { constexpr char kHlo[] = R"( HloModule TestModule %compare { %lhs_key = u32[] parameter(0) %rhs_key = u32[] parameter(1) %lhs_value = f32[] parameter(2) %rhs_value = f32[] parameter(3) ROOT %lt = pred[] compare(%lhs_key, %rhs_key), direction=LT } ENTRY %main { %input_keys = u32[1000] parameter(0) %input_values = f32[1000] parameter(1) ROOT %sort = (u32[1000], f32[1000]) sort(%input_keys, %input_values), dimensions={0}, to_apply=%compare })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo)); EXPECT_TRUE(RunModuleAndPass(module.get())); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Tuple(m::GetTupleElement(m::CustomCall(), 0), m::GetTupleElement(m::CustomCall(), 1)))); } TEST_F(SortRewriterTest, SortPairsSwapped) { constexpr char kHlo[] = R"( HloModule TestModule %compare { %lhs_value = f32[] parameter(0) %rhs_value = f32[] parameter(1) %lhs_key = u32[] parameter(2) %rhs_key = u32[] parameter(3) ROOT %lt = pred[] compare(%lhs_key, %rhs_key), direction=LT } ENTRY %main { %input_values = f32[1000] parameter(0) %input_keys = u32[1000] parameter(1) ROOT %sort = (f32[1000], u32[1000]) sort(%input_values, %input_keys), dimensions={0}, to_apply=%compare })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo)); EXPECT_TRUE(RunModuleAndPass(module.get())); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Tuple(m::GetTupleElement(m::CustomCall(), 1), m::GetTupleElement(m::CustomCall(), 0)))); } TEST_F(SortRewriterTest, NoRewriteManyTensors) { constexpr char kHlo[] = R"( HloModule TestModule %compare { %lhs = f32[] parameter(0) %rhs = f32[] parameter(1) %unused1 = f64[] parameter(2) %unused2 = f64[] parameter(3) %unused3 = u64[] parameter(4) %unused4 = u64[] parameter(5) ROOT %lt = pred[] compare(%lhs, %rhs), direction=LT } ENTRY %main { %input1 = f32[1000] parameter(0) %input2 = f64[1000] parameter(1) %input3 = u64[1000] parameter(2) ROOT %sort = (f32[1000], f64[1000], u64[1000]) sort(%input1, %input2, %input3), dimensions={0}, to_apply=%compare })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo)); EXPECT_FALSE(RunModuleAndPass(module.get())); } TEST_F(SortRewriterTest, NoRewriteNonMinorSortDimension) { constexpr char kHlo[] = R"( HloModule TestModule %compare { %lhs = f32[] parameter(0) %rhs = f32[] parameter(1) ROOT %lt = pred[] compare(%lhs, %rhs), direction=LT } ENTRY %main { %input = f32[1000,4] parameter(0) ROOT %sort = f32[1000,4] sort(%input), dimensions={0}, to_apply=%compare })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo)); EXPECT_FALSE(RunModuleAndPass(module.get())); } TEST_F(SortRewriterTest, NoRewriteUnsupportedType) { constexpr char kHlo[] = R"( HloModule TestModule %compare { %lhs = pred[] parameter(0) %rhs = pred[] parameter(1) ROOT %lt = pred[] compare(%lhs, %rhs), direction=LT } ENTRY %main { %input = pred[1000] parameter(0) ROOT %sort = pred[1000] sort(%input), dimensions={0}, to_apply=%compare })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo)); EXPECT_FALSE(RunModuleAndPass(module.get())); } TEST_F(SortRewriterTest, NoRewriteComplexComparer) { constexpr char kHlo[] = R"( HloModule TestModule %compare { %lhs = f32[] parameter(0) %lhs_scaled = f32[] multiply(%lhs, f32[] constant(2)) %rhs = f32[] parameter(1) ROOT %lt = pred[] compare(%lhs_scaled, %rhs), direction=LT } ENTRY %main { %input = f32[1000] parameter(0) ROOT %sort = f32[1000] sort(%input), dimensions={0}, to_apply=%compare })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo)); EXPECT_FALSE(RunModuleAndPass(module.get())); } TEST_F(SortRewriterTest, NoRewriteMixedKeysValues) { constexpr char kHlo[] = R"( HloModule TestModule %compare { %lhs_key = u32[] parameter(0) %rhs_key = u32[] parameter(1) %lhs_value = u32[] parameter(2) %rhs_value = u32[] parameter(3) ROOT %mixed = pred[] compare(%rhs_key, %lhs_value), direction=LT } ENTRY %main { %input_keys = u32[1000] parameter(0) %input_values = u32[1000] parameter(1) ROOT %sort = (u32[1000], u32[1000]) sort(%input_keys, %input_values), dimensions={0}, to_apply=%compare })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo)); EXPECT_FALSE(RunModuleAndPass(module.get())); } TEST_F(SortRewriterTest, NoRewriteSmallSize) { constexpr char kHlo[] = R"( HloModule TestModule %compare { %lhs = f32[] parameter(0) %rhs = f32[] parameter(1) ROOT %lt = pred[] compare(%lhs, %rhs), direction=LT } ENTRY %main { %input = f32[100] parameter(0) ROOT %sort = f32[100] sort(%input), dimensions={0}, to_apply=%compare })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo)); EXPECT_FALSE(RunModuleAndPass(module.get())); } TEST_F(SortRewriterTest, SortWithBatchDim) { constexpr char kHlo[] = R"( HloModule TestModule %compare { %lhs = f32[] parameter(0) %rhs = f32[] parameter(1) ROOT %lt = pred[] compare(%lhs, %rhs), direction=LT } ENTRY %main { %input = f32[10,100] parameter(0) ROOT %sort = f32[10,100] sort(%input), dimensions={1}, to_apply=%compare })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo)); EXPECT_TRUE(RunModuleAndPass(module.get())); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(m::GetTupleElement( m::CustomCall({kCubDeviceRadixSortTarget}, m::Parameter()), 0))); ExpectDirection(module->entry_computation()->root_instruction()->operand(0), false); } TEST_F(SortRewriterTest, SortWithMultipleBatchDims) { constexpr char kHlo[] = R"( HloModule TestModule %compare { %lhs = f32[] parameter(0) %rhs = f32[] parameter(1) ROOT %lt = pred[] compare(%lhs, %rhs), direction=LT } ENTRY %main { %input = f32[10,10,10] parameter(0) ROOT %sort = f32[10,10,10] sort(%input), dimensions={2}, to_apply=%compare })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo)); EXPECT_TRUE(RunModuleAndPass(module.get())); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(m::GetTupleElement( m::CustomCall({kCubDeviceRadixSortTarget}, m::Parameter()), 0))); ExpectDirection(module->entry_computation()->root_instruction()->operand(0), false); } TEST_F(SortRewriterTest, SortPairsIotaComparerSimple) { constexpr char kHlo[] = R"( HloModule TestModule %compare { %lhs = u16[] parameter(0) %rhs = u16[] parameter(1) %lhs_index = s32[] parameter(2) %rhs_index = s32[] parameter(3) cmp_indices = pred[] compare(%lhs_index, %rhs_index), direction=LT cmp_lr = pred[] compare(%lhs, %rhs), direction=GT cmp_eq = pred[] compare(%lhs, %rhs), direction=EQ ROOT %lt = pred[] select(cmp_eq, cmp_indices, cmp_lr) } ENTRY %main { %inputs = u16[1000] parameter(0) %iota = s32[1000] iota(), iota_dimension=0 ROOT %sort = (u16[1000], s32[1000]) sort(%inputs, %iota), dimensions={0}, to_apply=%compare })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo)); EXPECT_TRUE(RunModuleAndPass(module.get())); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Tuple(m::GetTupleElement(m::CustomCall(), 0), m::GetTupleElement(m::CustomCall(), 1)))); } TEST_F(SortRewriterTest, SortPairsIotaComparerLikeStableSortExpander) { constexpr char kHlo[] = R"( HloModule TestModule %compare { %lhs = u16[] parameter(0) %rhs = u16[] parameter(1) %lhs_index = s32[] parameter(2) %rhs_index = s32[] parameter(3) cmp_indices = pred[] compare(%lhs_index, %rhs_index), direction=LT cmp_lr = pred[] compare(%lhs, %rhs), direction=GT cmp_rl = pred[] compare(%rhs, %lhs), direction=GT cmp_eq = pred[] compare(cmp_lr, cmp_rl), direction=EQ ROOT %lt = pred[] select(cmp_eq, cmp_indices, cmp_lr) } ENTRY %main { %inputs = u16[1000] parameter(0) %iota = s32[1000] iota(), iota_dimension=0 ROOT %sort = (u16[1000], s32[1000]) sort(%inputs, %iota), dimensions={0}, to_apply=%compare })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(kHlo)); EXPECT_TRUE(RunModuleAndPass(module.get())); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Tuple(m::GetTupleElement(m::CustomCall(), 0), m::GetTupleElement(m::CustomCall(), 1)))); } TEST_F(SortRewriterTest, SortSizeThresholdIsSet) { EXPECT_EQ(SortRewriter::SortSizeThreshold(), 1000); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/sort_rewriter.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/sort_rewriter_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
e1b70a27-1ffd-49b8-993f-ba87bd51d44b
cpp
tensorflow/tensorflow
dot_operand_converter
third_party/xla/xla/service/gpu/transforms/dot_operand_converter.cc
third_party/xla/xla/service/gpu/transforms/dot_operand_converter_test.cc
#include "xla/service/gpu/transforms/dot_operand_converter.h" #include "absl/status/statusor.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/shape_util.h" #include "tsl/platform/errors.h" namespace xla::gpu { bool DotOperandConverter::InstructionMatchesPattern( HloInstruction* instruction) { if (instruction->opcode() != HloOpcode::kDot) { return false; } HloInstruction* lhs = instruction->mutable_operand(0); HloInstruction* rhs = instruction->mutable_operand(1); PrimitiveType lhs_type = lhs->shape().element_type(); PrimitiveType rhs_type = rhs->shape().element_type(); if (lhs_type == rhs_type) { return false; } absl::flat_hash_set<PrimitiveType> non_converting = {F8E4M3FN, F8E5M2}; if (non_converting.contains(lhs_type) && non_converting.contains(rhs_type)) { return false; } PrimitiveType desired_type = ShapeUtil::HigherPrecisionElementType(lhs->shape(), rhs->shape()); return desired_type == lhs_type || desired_type == rhs_type; } absl::StatusOr<HloInstruction*> DotOperandConverter::ExpandInstruction( HloInstruction* instruction) { HloInstruction* lhs = instruction->mutable_operand(0); HloInstruction* rhs = instruction->mutable_operand(1); PrimitiveType desired_type = ShapeUtil::HigherPrecisionElementType(lhs->shape(), rhs->shape()); int operand_index = desired_type == lhs->shape().element_type() ? 1 : 0; HloInstruction* inst_to_replace = desired_type == lhs->shape().element_type() ? rhs : lhs; auto upcast_shape = inst_to_replace->shape(); upcast_shape.set_element_type(desired_type); auto* convert_inst = instruction->AddInstruction( HloInstruction::CreateConvert(upcast_shape, inst_to_replace)); TF_RETURN_IF_ERROR(instruction->ReplaceOperandWithDifferentShape( operand_index, convert_inst)); return nullptr; } }
#include "xla/service/gpu/transforms/dot_operand_converter.h" #include <memory> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/strings/string_view.h" #include "absl/strings/substitute.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/utils/hlo_matchers.h" #include "xla/primitive_util.h" #include "xla/tests/hlo_test_base.h" #include "xla/xla_data.pb.h" #include "tsl/platform/statusor.h" namespace xla::gpu { namespace { namespace op = ::xla::testing::opcode_matchers; class DotOperandConverterTest : public HloTestBase { public: void TestConvert(bool left_less_precise, PrimitiveType lhs_type, PrimitiveType rhs_type, PrimitiveType result_type) { absl::string_view module_tmpl = R"( HloModule module ENTRY main { p0 = $0[2,3]{1,0} parameter(0) p1 = $1[3,2]{1,0} parameter(1) ROOT dot = $2[2,2]{1,0} dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} })"; auto module_string = absl::Substitute( module_tmpl, primitive_util::LowercasePrimitiveTypeName(lhs_type), primitive_util::LowercasePrimitiveTypeName(rhs_type), primitive_util::LowercasePrimitiveTypeName(result_type)); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); TF_ASSERT_OK_AND_ASSIGN(bool upcasted, DotOperandConverter().Run(module.get())); EXPECT_TRUE(upcasted); if (left_less_precise) { auto original_lhs = op::Parameter(0); auto upcasted_lhs = AllOf(op::Convert(original_lhs), op::Shape(absl::Substitute( "$0[2,3]{1,0}", primitive_util::LowercasePrimitiveTypeName(rhs_type)))); EXPECT_THAT( module->entry_computation()->root_instruction(), AllOf(op::Dot(upcasted_lhs, op::Parameter(1)), op::Shape(absl::Substitute( "$0[2,2]{1,0}", primitive_util::LowercasePrimitiveTypeName(result_type))))); } else { auto original_rhs = op::Parameter(1); auto upcasted_rhs = AllOf(op::Convert(original_rhs), op::Shape(absl::Substitute( "$0[3,2]{1,0}", primitive_util::LowercasePrimitiveTypeName(lhs_type)))); EXPECT_THAT( module->entry_computation()->root_instruction(), AllOf(op::Dot(op::Parameter(0), upcasted_rhs), op::Shape(absl::Substitute( "$0[2,2]{1,0}", primitive_util::LowercasePrimitiveTypeName(result_type))))); } } }; TEST_F(DotOperandConverterTest, ConvertsLeftAndRight) { TestConvert(true, S8, BF16, F32); TestConvert(false, BF16, S8, F32); } TEST_F(DotOperandConverterTest, NoConvertHappensWithSameTypes) { absl::string_view module_string = R"( HloModule module ENTRY main { p0 = s8[2,3]{1,0} parameter(0) p1 = s8[3,2]{1,0} parameter(1) ROOT dot = bf16[2,2]{1,0} dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); TF_ASSERT_OK_AND_ASSIGN(bool upcasted, DotOperandConverter().Run(module.get())); EXPECT_FALSE(upcasted); } TEST_F(DotOperandConverterTest, NoConvertFromF8toF8) { absl::string_view module_string = R"( HloModule module ENTRY main { p0 = f8e4m3fn[2,3]{1,0} parameter(0) p1 = f8e5m2[3,2]{1,0} parameter(1) ROOT dot = bf16[2,2]{1,0} dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(module_string)); TF_ASSERT_OK_AND_ASSIGN(bool upcasted, DotOperandConverter().Run(module.get())); EXPECT_FALSE(upcasted); } TEST_F(DotOperandConverterTest, CompilerOptimizesUsingDotOperandConverter) { absl::string_view module_string = R"( HloModule module ENTRY main { p0 = s8[2,3]{1,0} parameter(0) p1 = bf16[3,2]{1,0} parameter(1) ROOT dot = bf16[2,2]{1,0} dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} })"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, GetOptimizedModule(module_string)); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/dot_operand_converter.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/dot_operand_converter_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
b3cf634f-2abf-4078-a994-6116125692a0
cpp
tensorflow/tensorflow
collective_select_folder
third_party/xla/xla/service/gpu/transforms/collective_select_folder.cc
third_party/xla/xla/service/gpu/transforms/collective_select_folder_test.cc
#include "xla/service/gpu/transforms/collective_select_folder.h" #include <cstdint> #include <optional> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "xla/comparison_util.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace { using SourceTargetPair = std::pair<int64_t, int64_t>; using SourceTargetPairs = std::vector<SourceTargetPair>; struct SelectPredInfo { int64_t constant; Comparison::Direction direction; HloOpcode device_id_type; HloInstruction* true_operand; HloInstruction* false_operand; }; std::optional<SelectPredInfo> GetPredSelectInfo(HloInstruction* select) { if (select->opcode() != HloOpcode::kSelect) { return std::nullopt; } const HloInstruction* compare_candidate = select->operand(0); if (compare_candidate->opcode() != HloOpcode::kCompare) { compare_candidate = compare_candidate->operand(0); } if (compare_candidate->opcode() != HloOpcode::kCompare) { return std::nullopt; } const HloCompareInstruction* compare = DynCast<HloCompareInstruction>(compare_candidate); if ((compare->operand(0)->opcode() != HloOpcode::kReplicaId && compare->operand(0)->opcode() != HloOpcode::kPartitionId) || compare->operand(1)->opcode() != HloOpcode::kConstant) { return std::nullopt; } int64_t id_value = compare->operand(1)->literal().GetFirstInteger().value_or(-1); return SelectPredInfo{id_value, compare->direction(), compare->operand(0)->opcode(), select->mutable_operand(1), select->mutable_operand(2)}; } bool IsUniqueSource(int64_t device_id, const SourceTargetPairs& pairs) { if (pairs.size() == 1 && pairs[0].first == device_id) return true; return false; } bool IsNotPresentInSource(int64_t device_id, const SourceTargetPairs& pairs) { return absl::c_none_of( pairs, [device_id](const auto& pair) { return pair.first == device_id; }); } inline absl::StatusOr<bool> update(HloInstruction* cp, HloInstruction* data) { TF_RETURN_IF_ERROR(cp->ReplaceOperandWith(0, data)); return true; } bool IsShardingConsistent(HloCollectivePermuteInstruction* cp, HloOpcode device_id_type) { auto id = cp->channel_id(); return (device_id_type == HloOpcode::kPartitionId && id.has_value()) || (device_id_type == HloOpcode::kReplicaId && !id.has_value()); } absl::StatusOr<bool> TryFoldSelect(HloInstruction* in) { if (in->opcode() != HloOpcode::kCollectivePermute) return false; auto select_info_opt = GetPredSelectInfo(in->mutable_operand(0)); if (!select_info_opt.has_value()) return false; auto select_info = select_info_opt.value(); HloCollectivePermuteInstruction* cp = Cast<HloCollectivePermuteInstruction>(in); if (!IsShardingConsistent(cp, select_info.device_id_type)) return false; int64_t device_id = select_info.constant; SourceTargetPairs pairs = cp->source_target_pairs(); if (select_info.direction == Comparison::Direction::kEq) { if (IsUniqueSource(device_id, pairs)) { return update(cp, select_info.true_operand); } else if (IsNotPresentInSource(device_id, pairs)) { return update(cp, select_info.false_operand); } } if (select_info.direction == Comparison::Direction::kNe) { if (IsNotPresentInSource(device_id, pairs)) { return update(cp, select_info.true_operand); } else if (IsUniqueSource(device_id, pairs)) { return update(cp, select_info.false_operand); } } return false; } } absl::StatusOr<bool> CollectiveSelectFolder::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool changed = false; for (HloComputation* computation : module->computations()) { for (HloInstruction* instruction : computation->instructions()) { TF_ASSIGN_OR_RETURN(bool local_changed, TryFoldSelect(instruction)); changed |= local_changed; } } return changed; } }
#include "xla/service/gpu/transforms/collective_select_folder.h" #include <initializer_list> #include <memory> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/log/log.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/tests/hlo_test_base.h" #include "xla/tsl/lib/core/status_test_util.h" #include "tsl/platform/statusor.h" namespace xla { namespace { using ::testing::HasSubstr; class CollectiveSelectFolderTest : public HloTestBase { public: absl::Status ExpectNoTranform(std::string_view hlo_template) { return RunAndCheckHloRewrite(hlo_template, CollectiveSelectFolder(), false) .status(); } }; void VerifyDirectDataFeedSPMD(HloModule* module, std::string_view expected_fwd_operand, std::string_view expected_bwd_operand) { auto root = module->entry_computation()->root_instruction(); EXPECT_EQ(root->opcode(), HloOpcode::kSelect); EXPECT_EQ(root->operand(1)->opcode(), HloOpcode::kCollectivePermute); EXPECT_EQ(root->operand(2)->opcode(), HloOpcode::kCollectivePermute); EXPECT_THAT(root->operand(1)->operand(0)->name(), HasSubstr(expected_bwd_operand)) << root->operand(1)->name() << " is expected to operate on " << expected_bwd_operand; EXPECT_THAT(root->operand(2)->operand(0)->name(), HasSubstr(expected_fwd_operand)) << root->operand(2)->name() << " is expected to operate on " << expected_fwd_operand; } const char* kSPMD2cp = R"( HloModule test ENTRY circular_exchange { in_tpl = (f32[16], f32[16]) parameter(0) fwd_data = f32[16]{0} get-tuple-element(in_tpl), index=0 bwd_data = f32[16]{0} get-tuple-element(in_tpl), index=1 c_first_id = u32[] constant($first_id_constant) c_last_id = u32[] constant($last_id_constant) repl_id = u32[] partition-id() pred_first_id = pred[] compare(repl_id, c_first_id), direction=EQ is_first = pred[] broadcast(pred_first_id), dimensions={} pred_last_id = pred[] compare(repl_id, c_last_id), direction=EQ is_last = pred[] broadcast(pred_last_id), dimensions={} data_snd = f32[16] select(is_last, bwd_data, fwd_data) bwd_data_rcv = f32[16] collective-permute(data_snd), channel_id=1, source_target_pairs=$backward_pairs fwd_data_rcv = f32[16] collective-permute(data_snd), channel_id=2, source_target_pairs=$forward_pairs ROOT data_rcv = f32[16] select(is_first, bwd_data_rcv, fwd_data_rcv) } )"; TEST_F(CollectiveSelectFolderTest, SimpleForwardCycle) { TF_ASSERT_OK_AND_ASSIGN( auto module, RunAndCheckHloRewrite(kSPMD2cp, CollectiveSelectFolder(), true, {{"$first_id_constant", "0"}, {"$last_id_constant", "3"}, {"$forward_pairs", "{{0,1},{1,2},{2,3}}"}, {"$backward_pairs", "{{3,0}}"}})); VerifyDirectDataFeedSPMD(module.get(), "fwd_data", "bwd_data"); } TEST_F(CollectiveSelectFolderTest, SimpleBackwardCycle) { TF_ASSERT_OK_AND_ASSIGN( auto module, RunAndCheckHloRewrite(kSPMD2cp, CollectiveSelectFolder(), true, {{"$first_id_constant", "3"}, {"$last_id_constant", "0"}, {"$forward_pairs", "{{3,2},{2,1},{1,0}}"}, {"$backward_pairs", "{{0,3}}"}})); VerifyDirectDataFeedSPMD(module.get(), "fwd_data", "bwd_data"); } TEST_F(CollectiveSelectFolderTest, CompareNEForwardCycle) { TF_ASSERT_OK_AND_ASSIGN( auto module, RunAndCheckHloRewrite(kSPMD2cp, CollectiveSelectFolder(), true, {{"$first_id_constant", "0"}, {"$last_id_constant", "3"}, {"$forward_pairs", "{{0,1},{1,2},{2,3}}"}, {"$backward_pairs", "{{3,0}}"}, {"direction=EQ", "direction=NE"}})); VerifyDirectDataFeedSPMD(module.get(), "bwd_data", "fwd_data"); } TEST_F(CollectiveSelectFolderTest, LastDeviceIdMismatch) { TF_ASSERT_OK_AND_ASSIGN( auto module, RunAndCheckHloRewrite(kSPMD2cp, CollectiveSelectFolder(), true, {{"$first_id_constant", "0"}, {"$last_id_constant", "2"}, {"$forward_pairs", "{{0,1},{1,2},{2,3}}"}, {"$backward_pairs", "{{3,0}}"}})); VerifyDirectDataFeedSPMD(module.get(), "data_snd", "fwd_data"); } const char* kSelectBasecase = R"( HloModule test ENTRY computation1 { compare_true_data = f32[16] parameter(0) compare_false_data = f32[16] parameter(1) device_id_constant = u32[] constant($device_id_constant) repl_id = u32[] replica-id() prd = pred[] compare(repl_id, device_id_constant), direction=$direction bcast = pred[] broadcast(prd), dimensions={} selected_data = f32[16] select(bcast, compare_true_data, compare_false_data) ROOT data_rcv = f32[16] collective-permute(selected_data), source_target_pairs=$pairs } )"; TEST_F(CollectiveSelectFolderTest, EqualTrueBranchTransform) { TF_ASSERT_OK_AND_ASSIGN( auto module, RunAndCheckHloRewrite(kSelectBasecase, CollectiveSelectFolder(), true, {{"$device_id_constant", "3"}, {"$direction", "EQ"}, {"$pairs", "{{3,0}}"}})); auto root = module->entry_computation()->root_instruction(); EXPECT_EQ(root->operand(0)->name(), "compare_true_data"); } TEST_F(CollectiveSelectFolderTest, EqualFalseBranchTransform) { TF_ASSERT_OK_AND_ASSIGN( auto module, RunAndCheckHloRewrite(kSelectBasecase, CollectiveSelectFolder(), true, {{"$device_id_constant", "3"}, {"$direction", "EQ"}, {"$pairs", "{{0,1},{1,2}}"}})); auto root = module->entry_computation()->root_instruction(); EXPECT_EQ(root->operand(0)->name(), "compare_false_data"); } TEST_F(CollectiveSelectFolderTest, NotEqualFalseBranchTransform) { TF_ASSERT_OK_AND_ASSIGN( auto module, RunAndCheckHloRewrite(kSelectBasecase, CollectiveSelectFolder(), true, {{"$device_id_constant", "3"}, {"$direction", "NE"}, {"$pairs", "{{3,0}}"}})); auto root = module->entry_computation()->root_instruction(); EXPECT_EQ(root->operand(0)->name(), "compare_false_data"); } TEST_F(CollectiveSelectFolderTest, NotEqualTrueTrueTransform) { TF_ASSERT_OK_AND_ASSIGN( auto module, RunAndCheckHloRewrite(kSelectBasecase, CollectiveSelectFolder(), true, {{"$device_id_constant", "3"}, {"$direction", "NE"}, {"$pairs", "{{0,1},{1,2},{4,5},{5,6}}"}})); auto root = module->entry_computation()->root_instruction(); EXPECT_EQ(root->operand(0)->name(), "compare_true_data"); } TEST_F(CollectiveSelectFolderTest, MoreThanOnePair_NotTransformed) { TF_ASSERT_OK(RunAndCheckHloRewrite(kSelectBasecase, CollectiveSelectFolder(), false, {{"$device_id_constant", "1"}, {"$direction", "EQ"}, {"$pairs", "{{0,1},{1,2}}"}})); TF_ASSERT_OK(RunAndCheckHloRewrite(kSelectBasecase, CollectiveSelectFolder(), false, {{"$device_id_constant", "1"}, {"$direction", "NE"}, {"$pairs", "{{0,1},{1,2}}"}})); } const char* kSelectNoBroadcast = R"( HloModule test ENTRY computation1 { compare_true_data = f32[16] parameter(0) compare_false_data = f32[16] parameter(1) device_id_constant = u32[] constant($device_id_constant) repl_id = u32[] replica-id() prd = pred[] compare(repl_id, device_id_constant), direction=$direction selected_data = f32[16] select(prd, compare_true_data, compare_false_data) ROOT data_rcv = f32[16] collective-permute(selected_data), source_target_pairs=$pairs } )"; TEST_F(CollectiveSelectFolderTest, SelectNoBroadcastTransform) { TF_ASSERT_OK_AND_ASSIGN( auto module, RunAndCheckHloRewrite(kSelectNoBroadcast, CollectiveSelectFolder(), true, {{"$device_id_constant", "3"}, {"$direction", "EQ"}, {"$pairs", "{{3,0}}"}})); auto root = module->entry_computation()->root_instruction(); EXPECT_EQ(root->operand(0)->name(), "compare_true_data"); } TEST_F(CollectiveSelectFolderTest, ReplicaIdChannelIdMismatch_NotTransformed) { const absl::string_view hlo = R"( HloModule test ENTRY computation1 { compare_true_data = f32[16] parameter(0) compare_false_data = f32[16] parameter(1) device_id_constant = u32[] constant(0) repl_id = u32[] replica-id() prd = pred[] compare(repl_id, device_id_constant), direction=EQ selected_data = f32[16] select(prd, compare_true_data, compare_false_data) ROOT data_rcv = f32[16] collective-permute(selected_data), channel_id=1, source_target_pairs={{0,1}} } )"; TF_ASSERT_OK(ExpectNoTranform(hlo)); } TEST_F(CollectiveSelectFolderTest, PartIdChannelIdMismatch_NotTransformed) { const absl::string_view hlo = R"( HloModule test ENTRY computation1 { compare_true_data = f32[16] parameter(0) compare_false_data = f32[16] parameter(1) device_id_constant = u32[] constant(0) repl_id = u32[] partition-id() prd = pred[] compare(repl_id, device_id_constant), direction=EQ selected_data = f32[16] select(prd, compare_true_data, compare_false_data) ROOT data_rcv = f32[16] collective-permute(selected_data), source_target_pairs={{0,1}} } )"; TF_ASSERT_OK(ExpectNoTranform(hlo)); } TEST_F(CollectiveSelectFolderTest, WrongNesting_NotTransformed) { const absl::string_view hlo = R"( HloModule test ENTRY computation1 { compare_true_data = f32[16] parameter(0) compare_false_data = f32[16] parameter(1) device_id_constant = u32[] constant(0) repl_id = u32[] replica-id() sum = u32[] add(device_id_constant, repl_id) prd = pred[] compare(sum, device_id_constant), direction=EQ selected_data = f32[16] select(prd, compare_true_data, compare_false_data) ROOT data_rcv = f32[16] collective-permute(selected_data), source_target_pairs={{0,1}} } )"; TF_ASSERT_OK(ExpectNoTranform(hlo)); } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/collective_select_folder.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/collective_select_folder_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
4ebd16c3-9ade-4140-a597-d2ef6dfe067e
cpp
tensorflow/tensorflow
gemm_fusion
third_party/xla/xla/service/gpu/transforms/gemm_fusion.cc
third_party/xla/xla/service/gpu/transforms/gemm_fusion_test.cc
#include "xla/service/gpu/transforms/gemm_fusion.h" #include <array> #include <cstddef> #include <cstdint> #include <optional> #include <queue> #include <string> #include <tuple> #include <utility> #include <variant> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/dfs_hlo_visitor_with_default.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/cublas_padding_requirements.h" #include "xla/service/gpu/fusions/triton/triton_support.h" #include "xla/service/gpu/fusions/triton/triton_support_legacy.h" #include "xla/service/gpu/ir_emission_utils.h" #include "xla/service/gpu/matmul_utils.h" #include "xla/service/gpu/triton_fusion_analysis.h" #include "xla/service/gpu/triton_tiling_propagation.h" #include "xla/service/instruction_fusion.h" #include "xla/shape_util.h" #include "xla/stream_executor/device_description.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { using triton_fusion::CombineDotRequirements; using triton_fusion::DimensionOrder; using triton_fusion::DimOrderMap; using triton_fusion::DimOrdersAndReqs; using triton_fusion::DimOrdersAndReqsOrError; using triton_fusion::DotProperties; using triton_fusion::DotRequirements; using triton_fusion::DotRequirementsOrError; using triton_fusion::FusionContext; using triton_fusion::GetPropagatedDimOrdersAndRequirementsIfProfitablyFusible; using triton_fusion::TransformDirection; class AdjacencyList { public: using NodeId = int64_t; NodeId AddNode() { adj_.emplace_back(); return adj_.size() - 1; } const std::vector<NodeId>& GetOutNeighbors(NodeId node_id) const { return adj_.at(node_id); } void ReserveSpaceForOutNeighbors(NodeId node_id, size_t count) { adj_.at(node_id).reserve(count); } void AddArc(NodeId from, NodeId to) { adj_.at(from).push_back(to); } NodeId GetRoot() const { CHECK(!adj_.empty()); return 0; } private: std::vector<std::vector<NodeId>> adj_; }; struct HloAndDimOrder { const HloInstruction* original_hlo = nullptr; DimensionOrder dim_order; }; struct HloAndIterSpec { const HloInstruction* original_hlo; TensorIterationSpec iter_spec; auto ToTuple() const { return std::make_tuple(original_hlo, iter_spec); } bool operator==(const HloAndIterSpec& other) const { return ToTuple() == other.ToTuple(); } template <typename H> friend H AbslHashValue(H h, const HloAndIterSpec& key) { return H::combine(std::move(h), key.ToTuple()); } }; struct NodeFusionPlan { const HloInstruction* original_hlo = nullptr; bool should_fuse = false; }; struct FusionPlan { AdjacencyList graph; absl::flat_hash_map<AdjacencyList::NodeId, NodeFusionPlan> map; }; struct FusionPlanAndRequirements { FusionPlan fusion_plan; DotRequirements requirements; }; struct HlosAndRequirements { const HloInstruction* original_hlo = nullptr; const HloInstruction* fused_hlo = nullptr; DotRequirements requirements; }; HloInstruction& FuseDot(const HloDotInstruction& dot, const HloInstruction& fused_lhs, const HloInstruction& fused_rhs, std::optional<const HloInstruction*> fused_meta, HloComputation::Builder& builder ) { VLOG(3) << "Fusing " << dot.ToString(); std::vector<HloInstruction*> hlo_new_operands = { const_cast<HloInstruction*>(&fused_lhs), const_cast<HloInstruction*>(&fused_rhs)}; if (fused_meta.has_value()) { hlo_new_operands.push_back(const_cast<HloInstruction*>(fused_meta.value())); } return *builder.AddInstruction( dot.CloneWithNewOperands(dot.shape(), hlo_new_operands)); } int64_t NumAddedParameters(const HloInstruction& hlo) { if (hlo.opcode() == HloOpcode::kParameter || (hlo.opcode() == HloOpcode::kConstant && !ShapeUtil::IsScalar(hlo.shape()))) { return 0; } return hlo.operand_count() - 1; } std::optional<DimOrdersAndReqs> GetOperandDimOrdersAndCombinedReqs( const HloInstruction& hlo, const DimensionOrder& dim_order, const DotProperties& properties, const se::GpuComputeCapability& gpu_version, const DotRequirements& requirements) { DimOrdersAndReqsOrError dim_orders_and_new_reqs = GetPropagatedDimOrdersAndRequirements( hlo, dim_order, TransformDirection::kOutputToInput, properties); if (std::holds_alternative<FusionDecision>(dim_orders_and_new_reqs)) { VLOG(5) << "Not fusing " << hlo.ToString() << " to the output due to the decision: " << std::get<FusionDecision>(dim_orders_and_new_reqs).Explain(); return std::nullopt; } DotRequirementsOrError combined_reqs = CombineDotRequirements( requirements, std::get<DimOrdersAndReqs>(dim_orders_and_new_reqs).requirements); if (std::holds_alternative<FusionDecision>(combined_reqs)) { VLOG(5) << "Not fusing " << hlo.ToString() << " to the output due to the decision: " << std::get<FusionDecision>(combined_reqs).Explain(); return std::nullopt; } return DimOrdersAndReqs{ std::get<DimOrdersAndReqs>(dim_orders_and_new_reqs).dim_orders, std::get<DotRequirements>(combined_reqs)}; } std::optional<DimOrdersAndReqs> GetOperandDimOrdersAndCombinedReqsIfProfitable( const HloInstruction& hlo, const DimensionOrder& dim_order, const DotProperties& properties, const se::GpuComputeCapability& gpu_version, const DotRequirements& requirements) { DimOrdersAndReqsOrError dim_orders_and_new_reqs = GetPropagatedDimOrdersAndRequirementsIfProfitablyFusible( hlo, TransformDirection::kOutputToInput, std::nullopt, dim_order, gpu_version, properties); if (std::holds_alternative<FusionDecision>(dim_orders_and_new_reqs)) { VLOG(5) << "Not fusing " << hlo.ToString() << " to the output due to the decision: " << std::get<FusionDecision>(dim_orders_and_new_reqs).Explain(); return std::nullopt; } DotRequirementsOrError combined_reqs = CombineDotRequirements( requirements, std::get<DimOrdersAndReqs>(dim_orders_and_new_reqs).requirements); if (std::holds_alternative<FusionDecision>(combined_reqs)) { VLOG(5) << "Not fusing " << hlo.ToString() << " to the output due to the decision: " << std::get<FusionDecision>(combined_reqs).Explain(); return std::nullopt; } return DimOrdersAndReqs{ std::get<DimOrdersAndReqs>(dim_orders_and_new_reqs).dim_orders, std::get<DotRequirements>(combined_reqs)}; } std::optional<DimOrdersAndReqs> GetUserDimOrdersAndCombinedReqsIfProfitable( const HloInstruction& hlo, const DimensionOrder& hlo_dim_order, const HloInstruction& user, const DotProperties& properties, const se::GpuComputeCapability& gpu_version, const DotRequirements& requirements) { DimOrdersAndReqsOrError dim_orders_and_new_reqs = GetPropagatedDimOrdersAndRequirementsIfProfitablyFusible( user, TransformDirection::kInputToOutput, user.operand_index(&hlo), hlo_dim_order, gpu_version, properties); if (std::holds_alternative<FusionDecision>(dim_orders_and_new_reqs)) { VLOG(5) << "Not fusing " << user.ToString() << " to the input due to the decision: " << std::get<FusionDecision>(dim_orders_and_new_reqs).Explain(); return std::nullopt; } DotRequirementsOrError combined_reqs = CombineDotRequirements( requirements, std::get<DimOrdersAndReqs>(dim_orders_and_new_reqs).requirements); if (std::holds_alternative<FusionDecision>(combined_reqs)) { VLOG(5) << "Not fusing " << user.ToString() << " to the input due to the decision: " << std::get<FusionDecision>(combined_reqs).Explain(); return std::nullopt; } return DimOrdersAndReqs{ std::get<DimOrdersAndReqs>(dim_orders_and_new_reqs).dim_orders, std::get<DotRequirements>(combined_reqs)}; } FusionPlanAndRequirements BuildFusionPlanTowardOperands( const HloInstruction& root_hlo, const DimensionOrder& root_dim_order, const std::optional<int>& max_params, const se::GpuComputeCapability& gpu_version, const DotProperties& properties, const DotRequirements& requirements_so_far) { CHECK(!max_params.has_value() || max_params.value() >= 1); AdjacencyList graph; absl::flat_hash_map<AdjacencyList::NodeId, HloAndDimOrder> hlo_and_dim_order_map; absl::flat_hash_map<AdjacencyList::NodeId, NodeFusionPlan> fusion_plan_map; absl::flat_hash_map<HloAndIterSpec, AdjacencyList::NodeId> node_reuse_map; DotRequirements combined_reqs = requirements_so_far; auto get_or_create_fusion_node = [&](const HloInstruction& hlo, const DimensionOrder& dim_order, bool* is_new_node = nullptr) -> AdjacencyList::NodeId { HloAndIterSpec reuse_key = {&hlo, dim_order.ToTensorIterationSpec()}; if (auto it = node_reuse_map.find(reuse_key); it != node_reuse_map.end()) { if (is_new_node != nullptr) { *is_new_node = false; } return it->second; } AdjacencyList::NodeId node_id = graph.AddNode(); CHECK(hlo_and_dim_order_map.insert({node_id, {&hlo, dim_order}}).second); CHECK(node_reuse_map.insert({reuse_key, node_id}).second); if (is_new_node != nullptr) { *is_new_node = true; } return node_id; }; AdjacencyList::NodeId root = get_or_create_fusion_node(root_hlo, root_dim_order); absl::flat_hash_set<AdjacencyList::NodeId> inputs({root}); std::queue<AdjacencyList::NodeId> queue({root}); int64_t num_requeued = 0; while (queue.size() > num_requeued) { AdjacencyList::NodeId node_id = queue.front(); queue.pop(); const HloAndDimOrder& hlo_and_dim_order = hlo_and_dim_order_map.at(node_id); const HloInstruction& original_hlo = *hlo_and_dim_order.original_hlo; const DimensionOrder& dim_order = hlo_and_dim_order.dim_order; if (max_params.has_value() && inputs.size() + NumAddedParameters(original_hlo) > max_params.value()) { queue.push(node_id); ++num_requeued; continue; } num_requeued = 0; if (original_hlo.opcode() == HloOpcode::kParameter) { CHECK(fusion_plan_map .insert({node_id, {&original_hlo, false}}) .second); continue; } auto opt_result = GetOperandDimOrdersAndCombinedReqsIfProfitable( original_hlo, dim_order, properties, gpu_version, combined_reqs); if (!opt_result.has_value()) { CHECK(fusion_plan_map .insert({node_id, {&original_hlo, false}}) .second); continue; } const DimOrderMap operand_dim_orders = std::move(opt_result->dim_orders); combined_reqs = std::move(opt_result->requirements); inputs.erase(node_id); graph.ReserveSpaceForOutNeighbors(node_id, original_hlo.operand_count()); for (int64_t i = 0; i < original_hlo.operand_count(); ++i) { const HloInstruction& operand = *original_hlo.operand(i); const DimensionOrder& operand_dim_order = operand_dim_orders.at(&operand); bool is_new_node = false; AdjacencyList::NodeId operand_node_id = get_or_create_fusion_node(operand, operand_dim_order, &is_new_node); graph.AddArc(node_id, operand_node_id); if (is_new_node) { VLOG(6) << "Enqueueing " << operand.ToString() << ":" << operand_dim_order.ToString(); inputs.insert(operand_node_id); queue.push(operand_node_id); } } CHECK( fusion_plan_map.insert({node_id, {&original_hlo, true}}) .second); } while (!queue.empty()) { AdjacencyList::NodeId node_id = queue.front(); queue.pop(); const HloAndDimOrder& hlo_and_dim_order = hlo_and_dim_order_map.at(node_id); CHECK(fusion_plan_map .insert({node_id, {hlo_and_dim_order.original_hlo, false}}) .second); } return {{std::move(graph), std::move(fusion_plan_map)}, std::move(combined_reqs)}; } HloInstruction& BuildFusionTowardOperandsImpl( AdjacencyList::NodeId node_id, const FusionPlan& fusion_plan, absl::flat_hash_map<AdjacencyList::NodeId, HloInstruction*>& fused_hlo_map, HloComputation::Builder& builder, std::vector<HloInstruction*>& fusion_params ) { if (auto it = fused_hlo_map.find(node_id); it != fused_hlo_map.end()) { return *it->second; } const NodeFusionPlan& node_fusion_plan = fusion_plan.map.at(node_id); const bool should_fuse = node_fusion_plan.should_fuse; const HloInstruction& original_hlo = *node_fusion_plan.original_hlo; HloInstruction* fused_hlo = nullptr; if (should_fuse) { HloInstruction::InstructionVector new_operands; for (AdjacencyList::NodeId operand_id : fusion_plan.graph.GetOutNeighbors(node_id)) { new_operands.push_back(&BuildFusionTowardOperandsImpl( operand_id, fusion_plan, fused_hlo_map, builder, fusion_params)); } fused_hlo = builder.AddInstruction( original_hlo.CloneWithNewOperands(original_hlo.shape(), new_operands)); } else { fusion_params.push_back(const_cast<HloInstruction*>(&original_hlo)); fused_hlo = builder.AddInstruction(HloInstruction::CreateParameter( fusion_params.size() - 1, original_hlo.shape(), absl::StrCat("parameter_", fusion_params.size() - 1))); } CHECK(fused_hlo_map.insert({node_id, fused_hlo}).second); return *fused_hlo; } HloInstruction& BuildFusionTowardOperands( const FusionPlan& fusion_plan, HloComputation::Builder& builder, std::vector<HloInstruction*>& fusion_params ) { absl::flat_hash_map<AdjacencyList::NodeId, HloInstruction*> fused_hlo_map; return BuildFusionTowardOperandsImpl(fusion_plan.graph.GetRoot(), fusion_plan, fused_hlo_map, builder, fusion_params); } HlosAndRequirements FuseTowardOperands( const HloInstruction& root_hlo, const DimensionOrder& root_dim_order, const std::optional<int>& max_params, const se::GpuComputeCapability& gpu_version, const DotProperties& properties, const DotRequirements& requirements_so_far, HloComputation::Builder& builder, std::vector<HloInstruction*>& fusion_params ) { FusionPlanAndRequirements fusion_plan_and_reqs = BuildFusionPlanTowardOperands(root_hlo, root_dim_order, max_params, gpu_version, properties, requirements_so_far); HloInstruction& fused_hlo_or_param = BuildFusionTowardOperands( fusion_plan_and_reqs.fusion_plan, builder, fusion_params); return HlosAndRequirements{&root_hlo, &fused_hlo_or_param, fusion_plan_and_reqs.requirements}; } absl::StatusOr<HlosAndRequirements> FuseDotOperand( const HloInstruction& dot, int operand_index, const se::GpuComputeCapability& gpu_version, HloComputation::Builder& builder, std::vector<HloInstruction*>& fusion_params ) { TF_ASSIGN_OR_RETURN(const FusionContext context, FusionContext::FromDotOperand(dot, operand_index)); const HloInstruction& operand = *dot.operand(operand_index); return FuseTowardOperands(operand, context.dim_orders().at(&operand), TritonFusionAnalysis::kMaxParameterPerDotOperand, gpu_version, context.dot_properties(), context.requirements(), builder, fusion_params); } HlosAndRequirements FuseTowardUsers( const HloInstruction& hlo, const HloInstruction& fused_hlo, const DimensionOrder& hlo_dim_order, const se::GpuComputeCapability& gpu_version, const DotProperties& properties, const DotRequirements& requirements, HloComputation::Builder& builder, std::vector<HloInstruction*>& fusion_params ) { const HlosAndRequirements existing_hlos_and_requirements = {&hlo, &fused_hlo, requirements}; if (hlo.user_count() != 1) { return existing_hlos_and_requirements; } const HloInstruction& user = *hlo.users()[0]; if (!legacy_triton::IsDistributiveOverAddition(user)) { return existing_hlos_and_requirements; } auto opt_user_result = GetUserDimOrdersAndCombinedReqsIfProfitable( hlo, hlo_dim_order, user, properties, gpu_version, requirements); if (!opt_user_result.has_value()) { return existing_hlos_and_requirements; } DimensionOrder user_dim_order = opt_user_result->dim_orders.at(&user); DotRequirements combined_requirements = opt_user_result->requirements; HloInstruction::InstructionVector new_operands; if (user.operand_count() == 1) { new_operands.push_back(const_cast<HloInstruction*>(&fused_hlo)); } else { auto opt_operand_result = GetOperandDimOrdersAndCombinedReqs( user, user_dim_order, properties, gpu_version, combined_requirements); if (!opt_operand_result.has_value()) { return existing_hlos_and_requirements; } DimOrderMap operand_dim_orders = opt_operand_result->dim_orders; combined_requirements = opt_operand_result->requirements; for (int i = 0; i < user.operand_count(); ++i) { const HloInstruction& operand = *user.operand(i); if (&operand == &hlo) { new_operands.push_back(const_cast<HloInstruction*>(&fused_hlo)); } else { HlosAndRequirements hlos_and_requirements = FuseTowardOperands( operand, operand_dim_orders.at(&operand), std::nullopt, gpu_version, properties, combined_requirements, builder, fusion_params); new_operands.push_back( const_cast<HloInstruction*>(hlos_and_requirements.fused_hlo)); combined_requirements = hlos_and_requirements.requirements; } } } const HloInstruction& fused_user = *builder.AddInstruction( user.CloneWithNewOperands(user.shape(), new_operands)); return FuseTowardUsers(user, fused_user, user_dim_order, gpu_version, properties, combined_requirements, builder, fusion_params); } HlosAndRequirements FuseDotOutput( const HloInstruction& dot, const HloInstruction& fused_dot, const se::GpuComputeCapability& gpu_version, const DotRequirements& requirements, HloComputation::Builder& builder, std::vector<HloInstruction*>& fusion_params ) { const auto context = FusionContext::FromDotOutput(dot, 1, requirements); return FuseTowardUsers(dot, fused_dot, context.dim_orders().at(&dot), gpu_version, context.dot_properties(), context.requirements(), builder, fusion_params); } namespace { class Decision { public: bool CanFuse() const { return fusing_decision_.CanFuse() || able_to_fuse_; } bool WantToFuse() const { return fusing_decision_.CanFuse(); } static Decision Allow() { return {FusionDecision::Allow(), true}; }; static Decision Deny(std::string_view value) { return {FusionDecision::Forbid(value), false}; } static Decision NotProfitable(std::string_view value) { return {FusionDecision::Forbid(value), true}; } private: Decision(FusionDecision decision, bool able_to_fuse) : fusing_decision_(std::move(decision)), able_to_fuse_(able_to_fuse) {} FusionDecision fusing_decision_; bool able_to_fuse_; }; } absl::StatusOr<Decision> CreateDotFusion( const HloDotInstruction& dot, const se::GpuComputeCapability gpu_version, HloComputation::Builder& builder, std::vector<HloInstruction*>& fusion_inputs, HloInstruction** fusion_output_ptr) { VLOG(5) << dot.ToString(); if (CodegenDecision is_supported = legacy_triton::IsTritonSupportedInstruction(dot, gpu_version); !is_supported) { VLOG(3) << is_supported.Explain(); return Decision::Deny(is_supported.Explain()); } if (dot.sparse_operands()) { const SparsityDescriptor& descriptor = dot.sparsity().front(); if (dot.sparse_operands() != 1 || descriptor.index() != 0) { return InvalidArgument("Sparsity is only supported on left operand"); } if (descriptor.type() != SparsityType::SPARSITY_STRUCTURED_N_M || descriptor.n() != 2 || descriptor.m() != 4) { return InvalidArgument("Only 2:4 structured sparsity is supported"); } CHECK_EQ(descriptor.dimension(), dot.operand(0)->shape().rank() - 1); } TF_ASSIGN_OR_RETURN(HlosAndRequirements lhs_hlos_and_reqs, FuseDotOperand(dot, 0, gpu_version, builder, fusion_inputs)); TF_ASSIGN_OR_RETURN(HlosAndRequirements rhs_hlos_and_reqs, FuseDotOperand(dot, 1, gpu_version, builder, fusion_inputs)); std::optional<const HloInstruction*> meta_hlo; if (dot.sparse_operands()) { TF_ASSIGN_OR_RETURN(HlosAndRequirements meta_hlos_and_reqs, FuseDotOperand(dot, 2, gpu_version, builder, fusion_inputs)); meta_hlo.emplace(meta_hlos_and_reqs.fused_hlo); } HloInstruction& fused_dot = FuseDot(dot, *lhs_hlos_and_reqs.fused_hlo, *rhs_hlos_and_reqs.fused_hlo, meta_hlo, builder); HlosAndRequirements fused_output_and_reqs = FuseDotOutput(dot, fused_dot, gpu_version, lhs_hlos_and_reqs.requirements, builder, fusion_inputs); if (fusion_output_ptr != nullptr) { *fusion_output_ptr = const_cast<HloInstruction*>(fused_output_and_reqs.original_hlo); } bool has_int4_param = absl::c_any_of(fusion_inputs, [](const HloInstruction* hlo) { return hlo->shape().element_type() == PrimitiveType::S4; }); if (has_int4_param) { auto analysis_or = TritonFusionAnalysis::Execute(dot); if (analysis_or.ok()) { const auto& analysis = analysis_or.value(); if (!analysis.IsBatchDimMinorForInt4Parameter( dot, TritonFusionAnalysis::Scope::LHS) || !analysis.IsBatchDimMinorForInt4Parameter( dot, TritonFusionAnalysis::Scope::RHS)) { return Decision::Deny( "Fusion is not possible because the parameter with the type S4 has " "minor batch dimension."); } } } const PrecisionConfig::Algorithm algorithm = dot.precision_config().algorithm(); if (algorithm == PrecisionConfig::ALG_DOT_BF16_BF16_F32_X6 || algorithm == PrecisionConfig::ALG_DOT_BF16_BF16_F32_X3 || algorithm == PrecisionConfig::ALG_DOT_BF16_BF16_F32 || algorithm == PrecisionConfig::ALG_DOT_TF32_TF32_F32_X3 || dot.GetModule()->config().debug_options().xla_gpu_triton_gemm_any() || dot.sparse_operands()) { return Decision::Allow(); } bool is_pure_matmul = true; (void)builder.ForEachInstruction([&](const HloInstruction* fused_hlo) { static constexpr std::array<HloOpcode, 4> kPureOpcodes = { HloOpcode::kBitcast, HloOpcode::kDot, HloOpcode::kParameter, HloOpcode::kReshape}; if (absl::c_find(kPureOpcodes, fused_hlo->opcode()) == kPureOpcodes.end()) { is_pure_matmul = false; return absl::CancelledError(); } return absl::OkStatus(); }); if (is_pure_matmul) return Decision::NotProfitable("Pure Matmul"); return Decision::Allow(); } class GemmFusionVisitor : public DfsHloRewriteVisitor { public: explicit GemmFusionVisitor(const se::GpuComputeCapability& gpu_version) : gpu_version_(gpu_version) {} absl::Status HandleDot(HloInstruction* dot) override { CHECK_EQ(dot->opcode(), HloOpcode::kDot); int64_t gemm_rewrite_size_threshold = dot->GetModule() ->config() .debug_options() .xla_gpu_gemm_rewrite_size_threshold(); TF_ASSIGN_OR_RETURN(bool is_matmul_tiny, IsMatrixMultiplicationTooSmallForRewriting( *dot, gemm_rewrite_size_threshold)); if (is_matmul_tiny && IsDotSupportedByClassicalEmitters(*dot)) { return absl::OkStatus(); } std::string fusion_name = absl::StrCat("gemm_fusion_", dot->name()); HloComputation::Builder builder(absl::StrCat(fusion_name, "_computation")); std::vector<HloInstruction*> fusion_inputs; HloInstruction* fusion_output = nullptr; TF_ASSIGN_OR_RETURN( const Decision decision, CreateDotFusion(*Cast<HloDotInstruction>(dot), gpu_version_, builder, fusion_inputs, &fusion_output)); if (!decision.CanFuse()) { return absl::OkStatus(); } if (std::holds_alternative<se::CudaComputeCapability>(gpu_version_)) { if (!CublasRequiresPadding( *Cast<HloDotInstruction>(dot), std::get<se::CudaComputeCapability>(gpu_version_)) && !decision.WantToFuse()) { return absl::OkStatus(); } } HloComputation* computation = dot->GetModule()->AddComputationAndUnifyNamesAndIds(builder.Build(), false); HloInstruction* dot_fusion = dot->parent()->AddInstruction(HloInstruction::CreateFusion( computation->root_instruction()->shape(), HloInstruction::FusionKind::kCustom, fusion_inputs, computation)); dot_fusion->set_metadata(dot->metadata()); dot_fusion->GetModule()->SetAndUniquifyInstrName(dot_fusion, fusion_name); TF_ASSIGN_OR_RETURN(auto gpu_config, dot_fusion->backend_config<GpuBackendConfig>()); FusionBackendConfig& backend_config = *gpu_config.mutable_fusion_backend_config(); backend_config.set_kind(std::string(kTritonGemmFusionKind)); TF_RETURN_IF_ERROR(dot_fusion->set_backend_config(gpu_config)); if (fusion_output->IsRoot()) { fusion_output->parent()->set_root_instruction(dot_fusion); TF_RETURN_IF_ERROR( fusion_output->parent()->RemoveInstructionAndUnusedOperands( fusion_output)); MarkAsChanged(); } else { TF_RETURN_IF_ERROR(ReplaceInstruction(fusion_output, dot_fusion)); } XLA_VLOG_LINES(5, computation->ToString(HloPrintOptions::ShortParsable())); return absl::OkStatus(); } private: se::GpuComputeCapability gpu_version_; }; absl::StatusOr<bool> RunOnComputation( HloComputation* computation, const se::GpuComputeCapability& gpu_version) { GemmFusionVisitor visitor(gpu_version); TF_RETURN_IF_ERROR(computation->Accept(&visitor)); return visitor.changed(); } } bool ShouldTritonHandleGEMM(HloDotInstruction& dot, const se::GpuComputeCapability& gpu_version) { std::vector<HloInstruction*> fusion_inputs; HloComputation::Builder builder("disposable"); return CreateDotFusion(dot, gpu_version, builder, fusion_inputs, nullptr) ->WantToFuse(); } absl::StatusOr<bool> GemmFusion::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { TF_RETURN_IF_ERROR( EnsureTritonSupportsComputeCapability(compute_capability_)); bool changed = false; for (HloComputation* computation : module->MakeNonfusionComputations(execution_threads)) { TF_ASSIGN_OR_RETURN(bool result, RunOnComputation(computation, compute_capability_)); changed |= result; } return changed; } } }
#include "xla/service/gpu/transforms/gemm_fusion.h" #include <memory> #include <string> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/status/status.h" #include "absl/strings/string_view.h" #include "xla/autotuning.pb.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/gpu/cublas_padding_requirements.h" #include "xla/service/gpu/triton_fusion_analysis.h" #include "xla/service/pattern_matcher.h" #include "xla/service/pattern_matcher_gmock.h" #include "xla/stream_executor/device_description.h" #include "xla/tests/filecheck.h" #include "xla/tests/hlo_test_base.h" #include "xla/tests/verified_hlo_module.h" #include "xla/xla.pb.h" #include "xla/xla_data.pb.h" #include "tsl/platform/status_matchers.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { using ::testing::ElementsAre; using ::testing::FieldsAre; namespace m = ::xla::match; class GemmFusionTest : public HloTestBase { public: GemmFusionTest() : HloTestBase(true, false) {} DebugOptions GetDebugOptionsForTest() override { DebugOptions debug_options = HloTestBase::GetDebugOptionsForTest(); debug_options.set_xla_gpu_triton_gemm_any(false); debug_options.set_xla_gpu_gemm_rewrite_size_threshold(0); return debug_options; } se::GpuComputeCapability gpu_version_{ se::CudaComputeCapability{se::CudaComputeCapability::AMPERE, 0}}; void MatchHloModule(HloModule& module, absl::string_view pattern) { TF_ASSERT_OK_AND_ASSIGN(bool filecheck_result, RunFileCheck(module.ToString(), pattern)); EXPECT_TRUE(filecheck_result); } }; TEST_F(GemmFusionTest, TransposeSubdimensionGroup) { auto module = ParseAndReturnVerifiedModule(R"( HloModule m ENTRY e { p0 = f32[32,3] parameter(0) t1 = f32[3,32] transpose(p0), dimensions={1,0} r1 = f32[3,8,4] reshape(t1) r0 = f32[3,32] reshape(r1) p1 = f16[32,7] parameter(1) c1 = f32[32,7] convert(p1) ROOT d = f32[3,7] dot(r0, c1), lhs_contracting_dims={1}, rhs_contracting_dims={0} })") .value(); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Fusion(m::Parameter(), m::Parameter()))); } TEST_F(GemmFusionTest, UnsupportedTransposeIsNotFused) { auto module = ParseAndReturnVerifiedModule(R"( ENTRY e { p0 = f16[1,512,8,1024]{3,1,0,2} parameter(0) c = f16[1,512,8,1024]{3,2,1,0} copy(p0) b = f16[4096,1024]{1,0} bitcast(c) p1 = f16[128,1024]{1,0} parameter(1) ROOT d = f16[4096,128]{1,0} dot(b, p1), lhs_contracting_dims={1}, rhs_contracting_dims={1} })") .value(); EXPECT_FALSE(GemmFusion(gpu_version_).Run(module.get()).value()); } TEST_F(GemmFusionTest, BitcastChain) { auto module = ParseAndReturnVerifiedModule(R"( HloModule m ENTRY e { p0 = s8[60,5] parameter(0) r0 = s8[3,20,5] reshape(p0) c0 = f16[3,20,5] convert(r0) p1 = f16[3,200] parameter(1) r12 = f16[600] reshape(p1) r11 = f16[30,20] reshape(r12) r1 = f16[3,10,20] reshape(r11) ROOT d = f16[3,5,10] dot(c0, r1), lhs_contracting_dims={1}, rhs_contracting_dims={2}, lhs_batch_dims={0}, rhs_batch_dims={0} })") .value(); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Fusion(m::Parameter(), m::Parameter()))); } TEST_F(GemmFusionTest, SplitDimensionTwice) { auto module = ParseAndReturnVerifiedModule(R"( ENTRY e { p0 = s8[4,2,32,4,2] parameter(0) r1 = s8[8,32,8] reshape(p0) t1 = s8[32,8,8] transpose(r1), dimensions={1,0,2} r0 = s8[32,64] reshape(t1) p1 = s8[32,32] parameter(1) c0 = f16[32,32] convert(p1) ROOT d = f16[64,32] dot(r0, c0), lhs_contracting_dims={0}, rhs_contracting_dims={1} })") .value(); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Fusion(m::Parameter(), m::Parameter()))); } TEST_F(GemmFusionTest, DoNotTriggerOnUnsupportedOutputConversions) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { p0 = f16[128,256] parameter(0) p1 = f16[256,512] parameter(1) r = f16[128,512] dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT c = u8[128,512] convert(r) })")); EXPECT_FALSE(GemmFusion(gpu_version_).Run(module.get()).value()); } TEST_F(GemmFusionTest, FuseDotWithTrivialNoncontractingDim) { auto module = ParseAndReturnVerifiedModule(R"( HloModule m ENTRY e { p0 = s8[60,5] parameter(0) r0 = s8[3,20,5] reshape(p0) c0 = f16[3,20,5] convert(r0) p1 = f16[3,1,20] parameter(1) ROOT d = f16[3,5,1] dot(c0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={2}, lhs_batch_dims={0}, rhs_batch_dims={0} })") .value(); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Fusion(m::Parameter(), m::Parameter()))); } TEST_F(GemmFusionTest, HandleDotIfCublasRequiresPadding) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( HloModule m ENTRY e { p0 = f16[5,3] parameter(0) p1 = f16[5,7] parameter(1) ROOT d = f16[3,7] dot(p0, p1), lhs_contracting_dims={0}, rhs_contracting_dims={0} })")); const se::CudaComputeCapability cc{se::CudaComputeCapability::AMPERE, 0}; EXPECT_TRUE(CublasRequiresPadding( *xla::Cast<HloDotInstruction>( module->entry_computation()->root_instruction()), cc)); EXPECT_TRUE(GemmFusion(cc).Run(module.get()).value()); } TEST_F(GemmFusionTest, FuseSliceOfParameterWithOtherUsers) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { p0 = f32[97,121] parameter(0) s0 = f32[7,101] slice(p0), slice={[3:10], [10:111]} p1 = f32[101,16] parameter(1) d = f32[16,7] dot(p1, s0), lhs_contracting_dims={0}, rhs_contracting_dims={1} s1 = f32[3,33] slice(p0), slice={[10:13], [20:53]} ROOT t = tuple(d, s1) })")); const se::CudaComputeCapability cc{se::CudaComputeCapability::AMPERE, 0}; EXPECT_TRUE(GemmFusion(cc).Run(module.get()).value()); } TEST_F(GemmFusionTest, DoNotFuseSliceOfMixedDimensions) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { p0 = bf16[768,64] parameter(0) s0 = bf16[768,32] slice(p0), slice={[0:768], [0:32]} b0 = bf16[256,3,32] reshape(s0) b1 = bf16[256,96] reshape(b0) p1 = bf16[256,96] parameter(1) ROOT d = bf16[96,96] dot(b1, p1), lhs_contracting_dims={0}, rhs_contracting_dims={0} })")); const se::CudaComputeCapability cc{se::CudaComputeCapability::AMPERE, 0}; EXPECT_FALSE(GemmFusion(cc).Run(module.get()).value()); } TEST_F(GemmFusionTest, DoNotFuseSlicesOfNonMajorFragments) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { p0 = f32[2,2,256,256] parameter(0) s0 = f32[1,1,256,256] slice(p0), slice={[0:1], [0:1], [0:256], [0:256]} r0 = f32[256,256] reshape(s0) p1 = f16[2,2,256,256] parameter(1) s1 = f16[1,1,256,256] slice(p1), slice={[0:1], [0:1], [0:256], [0:256]} r1 = f16[256,256] reshape(s1) ROOT d = f32[256,256] dot(r0, r1), lhs_contracting_dims={1}, rhs_contracting_dims={0} })")); const se::CudaComputeCapability cc{se::CudaComputeCapability::AMPERE, 0}; EXPECT_FALSE(GemmFusion(cc).Run(module.get()).value()); } TEST_F(GemmFusionTest, DynamicSliceIsFused) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { dot_lhs = f32[2,18] parameter(0) dynamic_slice_input = f32[2,64,2] parameter(1) start_index0 = s32[] parameter(2) start_index1_2 = s32[] constant(0) dynamic_slice = f32[1,64,2] dynamic-slice(dynamic_slice_input, start_index0, start_index1_2, start_index1_2), dynamic_slice_sizes={1,64,2} reshape = f32[64,2] reshape(dynamic_slice) ROOT dot = f16[18,64] dot(dot_lhs, reshape), lhs_contracting_dims={0}, rhs_contracting_dims={1} })")); EXPECT_TRUE(GemmFusion(se::CudaComputeCapability{ se::CudaComputeCapability::AMPERE, 0}) .Run(module.get()) .value()); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch((m::Fusion(m::Parameter(), m::Parameter(), m::Parameter(), m::Constant())))); } TEST_F(GemmFusionTest, DynamicSlicesAreFusedEvenIfTheyShareIndices) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { p0 = f32[2,64,2] parameter(0) p1 = s32[] parameter(1) p2 = s32[] parameter(2) p3 = s32[] parameter(3) ds0 = f32[1,64,2] dynamic-slice(p0, p1, p2, p3), dynamic_slice_sizes={1,64,2} a = f32[64,2] reshape(ds0) ds1 = f32[1,64,2] dynamic-slice(p0, p3, p2, p1), dynamic_slice_sizes={1,64,2} b = f32[64,2] reshape(ds1) ROOT d = f16[64,64] dot(a, b), lhs_contracting_dims={1}, rhs_contracting_dims={1} })")); EXPECT_TRUE(GemmFusion(se::CudaComputeCapability{ se::CudaComputeCapability::AMPERE, 0}) .Run(module.get()) .value()); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch((m::Fusion(m::Parameter(), m::Parameter(), m::Parameter(), m::Parameter(), m::Parameter(), m::Parameter(), m::Parameter(), m::Parameter())))); } TEST_F(GemmFusionTest, DoNotFuseDynamicSliceOfNonMajorFragments) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { dot_lhs = f32[2,4]{1,0} parameter(0) dynamic_slice_input = f32[4,5,2]{2,1,0} parameter(1) c0 = s32[] constant(0) c2 = s32[] constant(2) dynamic_slice = f32[4,1,2]{2,1,0} dynamic-slice(dynamic_slice_input, c0, c2, c0), dynamic_slice_sizes={4,1,2} reshape = f32[4,2]{1,0} reshape(dynamic_slice) ROOT dot = f32[4,4]{1,0} dot(dot_lhs, reshape), lhs_contracting_dims={0}, rhs_contracting_dims={1} })")); const se::CudaComputeCapability cc{se::CudaComputeCapability::AMPERE, 0}; EXPECT_FALSE(GemmFusion(cc).Run(module.get()).value()); } TEST_F(GemmFusionTest, CanFuseDynamicSliceOfContractingDimIfItIsMajor) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { dot_lhs = f32[2,4]{1,0} parameter(0) dynamic_slice_input = f32[5,5]{1,0} parameter(1) start_index0 = s32[] constant(2) start_index1 = s32[] constant(0) dynamic_slice = f32[2,5]{1,0} dynamic-slice(dynamic_slice_input, start_index0, start_index1), dynamic_slice_sizes={2,5} ROOT d = f32[4,5]{1,0} dot(dot_lhs, dynamic_slice), lhs_contracting_dims={0}, rhs_contracting_dims={0} })")); EXPECT_TRUE(GemmFusion(se::CudaComputeCapability{ se::CudaComputeCapability::AMPERE, 0}) .Run(module.get()) .value()); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch((m::Fusion(m::Parameter(), m::Parameter(), m::Constant(), m::Constant())))); } TEST_F(GemmFusionTest, SliceToDegenerateIsSkipped) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { p = f32[3] parameter(0) s = f32[1] slice(p), slice={[2:3]} r = f32[] reshape(s) b = f32[3,3] broadcast(r), dimensions={} ROOT d = f32[3,3] dot(b, b), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )")); const se::CudaComputeCapability cc{se::CudaComputeCapability::AMPERE, 0}; ASSERT_TRUE(GemmFusion(cc).Run(module.get()).value()); MatchHloModule(*module, R"( ; CHECK-NOT: slice ; CHECK: ENTRY ; CHECK: slice )"); } TEST_F(GemmFusionTest, MultipleUsesAreHandled) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { c = f32[] constant(1) b = f32[6,8] broadcast(c), dimensions={} p0 = f32[6,8] parameter(0) a1 = f32[6,8] add(p0, b) e = f32[6,8] exponential(a1) a2 = f32[6,8] add(e, b) d = f32[6,8] divide(b, a2) p2 = f16[8,6] parameter(1) cv = f32[8,6] convert(p2) ROOT r = f32[6,6] dot(d, cv), lhs_contracting_dims={1}, rhs_contracting_dims={0} })")); const se::CudaComputeCapability cc{se::CudaComputeCapability::AMPERE, 0}; EXPECT_TRUE(GemmFusion(cc).Run(module.get()).value()); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Fusion(m::Parameter(), m::Parameter()))); } TEST_F(GemmFusionTest, BinaryElementwiseOfBroadcastIsFused) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { p2 = f32[3072] parameter(2) b = f32[8192,3072] broadcast(p2), dimensions={1} p0 = f16[8192,3072] parameter(0) p0c = f32[8192,3072] convert(p0) a = f32[8192,3072] add(p0c, b) p1 = f32[3072,768] parameter(1) ROOT r = f32[8192,768] dot(a, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} })")); const se::CudaComputeCapability cc{se::CudaComputeCapability::AMPERE, 0}; EXPECT_TRUE(GemmFusion(cc).Run(module.get()).value()); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(m::Fusion(m::Parameter(), m::Parameter(), m::Parameter()))); } TEST_F(GemmFusionTest, BinaryElementwiseOfUnsupportedBroadcastIsNotFused) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { p2 = f32[768] parameter(2) b = f32[8192,768,4] broadcast(p2), dimensions={1} s = f32[8192,3072] bitcast(b) p0 = f16[8192,3072] parameter(0) p0c = f32[8192,3072] convert(p0) a = f32[8192,3072] add(p0c, s) p1 = f32[3072,768] parameter(1) ROOT r = f32[8192,768] dot(a, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} })")); const se::CudaComputeCapability cc{se::CudaComputeCapability::AMPERE, 0}; EXPECT_FALSE(GemmFusion(cc).Run(module.get()).value()); } class GemmFusionLevel2Test : public GemmFusionTest { public: DebugOptions GetDebugOptionsForTest() override { DebugOptions debug_options = GemmFusionTest::GetDebugOptionsForTest(); debug_options.set_xla_gpu_triton_fusion_level(2); return debug_options; } }; TEST_F(GemmFusionTest, ConcatenationDivisibleBy64IsFused) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { p0 = bf16[8192,1]{1,0} parameter(0) p1 = bf16[2752,8192]{1,0} parameter(1) p2 = bf16[2752,8192]{1,0} parameter(2) concat = bf16[5504,8192]{1,0} concatenate(p1, p2), dimensions={0} bitcast = bf16[8192,5504]{0,1} bitcast(concat) ROOT r = f32[1,5504]{1,0} dot(p0, bitcast), lhs_contracting_dims={0}, rhs_contracting_dims={0} })")); const se::CudaComputeCapability cc{se::CudaComputeCapability::AMPERE, 0}; EXPECT_TRUE(GemmFusion(cc).Run(module.get()).value()); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(m::Fusion(m::Parameter(), m::Parameter(), m::Parameter()))); } TEST_F(GemmFusionLevel2Test, ReshapeToScalarIsHandled) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { p0 = s8[5,3] parameter(0) c = f16[5,3] convert(p0) p1 = f16[1] parameter(1) r = f16[] reshape(p1) b = f16[5,7] broadcast(r) ROOT d = f16[3,7] dot(c, b), lhs_contracting_dims={0}, rhs_contracting_dims={0} })")); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch(m::Fusion(m::Parameter(), m::Parameter()))); } TEST_F(GemmFusionLevel2Test, DoNotFuseIncompatibleDimensionSplits) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { p1 = s8[5,7,2,3]{3,2,1,0} parameter(1) t1 = s8[7,5,2,3]{3,2,1,0} transpose(p1), dimensions={1,0,2,3} r1 = s8[7,30]{1,0} reshape(t1) cvt = f16[7,30]{1,0} convert(r1) p2 = f16[2,7,5,3]{3,2,1,0} parameter(2) t2 = f16[7,2,5,3]{3,2,1,0} transpose(p2), dimensions={1,0,2,3} r2 = f16[7,30]{1,0} reshape(t2) a = f16[7,30]{1,0} add(cvt, r2) p0 = f16[7,79]{1,0} parameter(0) ROOT dot = f16[30,79]{1,0} dot(a, p0), lhs_contracting_dims={0}, rhs_contracting_dims={0} })")); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch(m::Fusion(m::Transpose(), m::Parameter(), m::Parameter()))); } TEST_F(GemmFusionLevel2Test, DoNotFuseTooManyParameters) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { tmp_0 = f32[] constant(1) tmp_1 = f32[3,49]{1,0} broadcast(tmp_0), dimensions={} tmp_2 = f32[3,49]{1,0} parameter(6) tmp_3 = f32[] constant(0) tmp_4 = f32[3,49]{1,0} broadcast(tmp_3), dimensions={} tmp_5 = pred[3,49]{1,0} compare(tmp_2, tmp_4), direction=GT tmp_6 = f32[3,49]{1,0} convert(tmp_5) tmp_7 = f32[3,49]{1,0} subtract(tmp_1, tmp_6) tmp_8 = s32[] parameter(13) tmp_9 = f32[] convert(tmp_8) tmp_10 = f32[] maximum(tmp_9, tmp_0) tmp_11 = f32[] divide(tmp_3, tmp_10) tmp_12 = f32[3,49]{1,0} broadcast(tmp_11), dimensions={} tmp_13 = pred[3,49]{1,0} parameter(7) tmp_14 = pred[3,49]{1,0} parameter(10) tmp_15 = pred[3,49]{1,0} and(tmp_13, tmp_14) tmp_16 = f32[3,49]{1,0} convert(tmp_15) tmp_17 = f32[3,49]{1,0} multiply(tmp_12, tmp_16) tmp_18 = f32[3,49]{1,0} negate(tmp_17) tmp_19 = f32[3,49]{1,0} multiply(tmp_7, tmp_18) tmp_20 = f32[3,49]{1,0} parameter(19) tmp_21 = f32[3,49]{1,0} subtract(tmp_1, tmp_20) tmp_22 = f32[3,49]{1,0} divide(tmp_19, tmp_21) tmp_23 = f32[3,49]{1,0} negate(tmp_22) tmp_24 = f32[3,49]{1,0} negate(tmp_6) tmp_25 = f32[3,49]{1,0} multiply(tmp_24, tmp_17) tmp_26 = f32[3,49]{1,0} divide(tmp_25, tmp_20) tmp_27 = f32[3,49]{1,0} add(tmp_23, tmp_26) tmp_28 = f32[3,49]{1,0} parameter(18) tmp_29 = f32[3,49]{1,0} multiply(tmp_27, tmp_28) tmp_30 = f32[3,49]{1,0} parameter(17) tmp_31 = f32[3,49]{1,0} multiply(tmp_29, tmp_30) tmp_32 = f32[3,49]{1,0} parameter(16) tmp_33 = f32[3,49]{1,0} multiply(tmp_31, tmp_32) tmp_34 = f32[3,49]{1,0} parameter(15) tmp_35 = f32[3,49]{1,0} add(tmp_33, tmp_34) tmp_36 = f32[3,49]{1,0} parameter(14) tmp_37 = f32[3,49]{1,0} add(tmp_35, tmp_36) tmp_38 = f32[1,1]{1,0} constant({ {0} }) tmp_39 = f32[1,1]{1,0} broadcast(tmp_38), dimensions={0,1} tmp_40 = f32[] reshape(tmp_39) tmp_41 = f32[3,32]{1,0} broadcast(tmp_40), dimensions={} tmp_42 = u32[48]{0} parameter(11) tmp_43 = u32[48]{0} parameter(5) tmp_44 = u32[96]{0} concatenate(tmp_42, tmp_43), dimensions={0} tmp_45 = u32[3,32]{1,0} reshape(tmp_44) tmp_46 = u32[96]{0} reshape(tmp_45) tmp_47 = u32[] constant(1) tmp_48 = u32[3,32]{1,0} broadcast(tmp_47), dimensions={} tmp_49 = u32[96]{0} reshape(tmp_48) tmp_50 = u32[96]{0} shift-right-logical(tmp_46, tmp_49) tmp_51 = u32[3,32]{1,0} reshape(tmp_50) tmp_52 = u32[3,32]{1,0} or(tmp_51, tmp_48) tmp_53 = f32[3,32]{1,0} bitcast-convert(tmp_52) tmp_54 = f32[3,32]{1,0} broadcast(tmp_0), dimensions={} tmp_55 = f32[3,32]{1,0} subtract(tmp_53, tmp_54) tmp_56 = f32[1,1]{1,0} constant({ {1} }) tmp_57 = f32[1,1]{1,0} broadcast(tmp_56), dimensions={0,1} tmp_58 = f32[] reshape(tmp_57) tmp_59 = f32[3,32]{1,0} broadcast(tmp_58), dimensions={} tmp_60 = f32[3,32]{1,0} multiply(tmp_55, tmp_59) tmp_61 = f32[3,32]{1,0} add(tmp_60, tmp_41) tmp_62 = f32[3,32]{1,0} maximum(tmp_41, tmp_61) tmp_63 = f32[3,32]{1,0} broadcast(tmp_3), dimensions={} tmp_64 = pred[3,32]{1,0} compare(tmp_62, tmp_63), direction=LT tmp_65 = f32[3,32]{1,0} convert(tmp_64) tmp_66 = f32[3,49]{1,0} parameter(9) tmp_67 = f32[49]{0} parameter(4) tmp_68 = f32[3,49]{1,0} broadcast(tmp_67), dimensions={1} tmp_69 = f32[3,49]{1,0} add(tmp_66, tmp_68) tmp_70 = f32[1,49]{1,0} parameter(12) tmp_71 = f32[1,49]{1,0} broadcast(tmp_0), dimensions={} tmp_72 = f32[1,49]{1,0} divide(tmp_70, tmp_71) tmp_73 = f32[1,49]{1,0} broadcast(tmp_72), dimensions={0,1} tmp_74 = f32[49]{0} reshape(tmp_73) tmp_75 = f32[3,49]{1,0} broadcast(tmp_74), dimensions={1} tmp_76 = f32[3,49]{1,0} subtract(tmp_69, tmp_75) tmp_77 = f32[1,49]{1,0} parameter(3) tmp_78 = f32[1,49]{1,0} parameter(8) tmp_79 = f32[1,49]{1,0} divide(tmp_78, tmp_71) tmp_80 = f32[1,49]{1,0} multiply(tmp_72, tmp_72) tmp_81 = f32[1,49]{1,0} subtract(tmp_79, tmp_80) tmp_82 = f32[1,49]{1,0} add(tmp_81, tmp_71) tmp_83 = f32[1,49]{1,0} rsqrt(tmp_82) tmp_84 = f32[1,49]{1,0} multiply(tmp_77, tmp_83) tmp_85 = f32[1,49]{1,0} broadcast(tmp_84), dimensions={0,1} tmp_86 = f32[49]{0} reshape(tmp_85) tmp_87 = f32[3,49]{1,0} broadcast(tmp_86), dimensions={1} tmp_88 = f32[3,49]{1,0} multiply(tmp_76, tmp_87) tmp_89 = f32[1,49]{1,0} parameter(2) tmp_90 = f32[1,49]{1,0} broadcast(tmp_89), dimensions={0,1} tmp_91 = f32[49]{0} reshape(tmp_90) tmp_92 = f32[3,49]{1,0} broadcast(tmp_91), dimensions={1} tmp_93 = f32[3,49]{1,0} add(tmp_88, tmp_92) tmp_94 = f32[49,32]{1,0} parameter(1) tmp_95 = f32[3,32]{1,0} dot(tmp_93, tmp_94), lhs_contracting_dims={1}, rhs_contracting_dims={0} tmp_96 = f32[32]{0} parameter(0) tmp_97 = f32[3,32]{1,0} broadcast(tmp_96), dimensions={1} tmp_98 = f32[3,32]{1,0} add(tmp_95, tmp_97) tmp_99 = f32[3,32]{1,0} multiply(tmp_65, tmp_98) tmp_100 = f32[3,32]{1,0} divide(tmp_99, tmp_63) tmp_101 = f32[3,32]{1,0} maximum(tmp_100, tmp_63) ROOT tmp_102 = f32[49,32]{1,0} dot(tmp_37, tmp_101), lhs_contracting_dims={0}, rhs_contracting_dims={0} })")); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(), HloOpcode::kFusion); EXPECT_EQ(module->entry_computation()->root_instruction()->fusion_kind(), HloInstruction::FusionKind::kCustom); EXPECT_LE(module->entry_computation()->root_instruction()->operand_count(), TritonFusionAnalysis::kMaxParameterPerDotOperand * 2); } TEST_F(GemmFusionLevel2Test, DoNotFuseTooManyParametersWhenAnInstructionWouldAddMultipleParameters) { static_assert(TritonFusionAnalysis::kMaxParameterPerDotOperand == 4, "We have to update this test."); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { a = f32[3,49]{1,0} parameter(0) b = f32[3,49]{1,0} parameter(1) c = pred[3,49]{1,0} parameter(2) d = f32[3,49]{1,0} parameter(3) e = f32[3,49]{1,0} parameter(4) add0 = f32[3,49]{1,0} add(a, b) select = f32[3,49]{1,0} select(c, d, e) add1 = f32[3,49]{1,0} add(add0, select) f = f32[3,32]{1,0} parameter(5) ROOT tmp_102 = f32[49,32]{1,0} dot(add1, f), lhs_contracting_dims={0}, rhs_contracting_dims={0} })")); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(), HloOpcode::kFusion); EXPECT_EQ(module->entry_computation()->root_instruction()->fusion_kind(), HloInstruction::FusionKind::kCustom); EXPECT_LE(module->entry_computation()->root_instruction()->operand_count(), TritonFusionAnalysis::kMaxParameterPerDotOperand + 1); } TEST_F(GemmFusionLevel2Test, DoNotFuseTooManyParametersForConcat) { static_assert(TritonFusionAnalysis::kMaxParameterPerDotOperand == 4, "We have to update this test."); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { a = f32[3,3]{1,0} parameter(0) b = f32[3,3]{1,0} parameter(1) c = f32[3,3]{1,0} parameter(2) d = f32[3,3]{1,0} parameter(3) e = f32[3,3]{1,0} parameter(4) f = f16[3,3]{1,0} parameter(5) concat = f32[15,3]{1,0} concatenate(a, b, c, d, e), dimensions={0} convert = f32[3,3]{1,0} convert(f) ROOT dot = f32[15,3]{1,0} dot(concat, convert), lhs_contracting_dims={1}, rhs_contracting_dims={1} })")); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); EXPECT_EQ(module->entry_computation()->root_instruction()->opcode(), HloOpcode::kFusion); EXPECT_EQ(module->entry_computation()->root_instruction()->fusion_kind(), HloInstruction::FusionKind::kCustom); EXPECT_LE(module->entry_computation()->root_instruction()->operand_count(), TritonFusionAnalysis::kMaxParameterPerDotOperand + 1); } TEST_F(GemmFusionLevel2Test, InstructionsReachableFromMultipleOperandsAreHandledCorrectly) { static_assert(TritonFusionAnalysis::kMaxParameterPerDotOperand == 4, "We have to update this test."); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { a = f32[2,4]{1,0} parameter(0) b = f32[2,4]{1,0} parameter(1) c = f32[2,4]{1,0} parameter(2) d = f32[2,4]{1,0} parameter(3) e = f32[2,4]{1,0} parameter(4) add0 = f32[2,4]{1,0} add(a, b) add1 = f32[2,4]{1,0} add(add0, c) add2 = f32[2,4]{1,0} add(add1, d) add3 = f32[2,4]{1,0} add(add2, e) ROOT r = f32[2,2]{1,0} dot(add3, add0), lhs_contracting_dims={1}, rhs_contracting_dims={1} })")); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); } TEST_F(GemmFusionLevel2Test, EachScopeIsFusedToASeparateSubgraph) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { a = f32[2,4]{1,0} parameter(0) b = f32[2,4]{1,0} parameter(1) add = f32[2,4]{1,0} add(a, b) ROOT r = f32[2,2]{1,0} dot(add, add), lhs_contracting_dims={1}, rhs_contracting_dims={1} })")); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); MatchHloModule(*module, R"( CHECK-DAG: %[[P0:.*]] = f32[2,4]{1,0} parameter(0) CHECK-DAG: %[[P1:.*]] = f32[2,4]{1,0} parameter(1) CHECK-DAG: %[[ADD0:.*]] = f32[2,4]{1,0} add(f32[2,4]{1,0} %[[P0]], f32[2,4]{1,0} %[[P1]]) CHECK-DAG: %[[P2:.*]] = f32[2,4]{1,0} parameter(2) CHECK-DAG: %[[P3:.*]] = f32[2,4]{1,0} parameter(3) CHECK-DAG: %[[ADD1:.*]] = f32[2,4]{1,0} add(f32[2,4]{1,0} %[[P2]], f32[2,4]{1,0} %[[P3]]) CHECK-DAG: ROOT {{.*}} = f32[2,2]{1,0} dot(f32[2,4]{1,0} %[[ADD0]], f32[2,4]{1,0} %[[ADD1]]) CHECK: ENTRY CHECK-DAG: %[[P0:.*]] = f32[2,4]{1,0} parameter(0) CHECK-DAG: %[[P1:.*]] = f32[2,4]{1,0} parameter(1) CHECK-DAG: ROOT {{.*}} = f32[2,2]{1,0} CHECK-SAME: fusion(f32[2,4]{1,0} %[[P0]], f32[2,4]{1,0} %[[P1]], f32[2,4]{1,0} %[[P0]], f32[2,4]{1,0} %[[P1]]), CHECK-SAME: kind=kCustom CHECK-SAME: __triton_gemm })"); } TEST_F(GemmFusionLevel2Test, ParamNodesAreReusedIfTheyHaveTheSameIterSpec) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { a = f32[2,4]{1,0} parameter(0) add = f32[2,4]{1,0} add(a, a) ROOT r = f32[2,2]{1,0} dot(add, add), lhs_contracting_dims={1}, rhs_contracting_dims={1} })")); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); MatchHloModule(*module, R"( CHECK-DAG: %[[P0:.*]] = f32[2,4]{1,0} parameter(0) CHECK-DAG: %[[ADD0:.*]] = f32[2,4]{1,0} add(f32[2,4]{1,0} %[[P0]], f32[2,4]{1,0} %[[P0]]) CHECK-DAG: %[[P1:.*]] = f32[2,4]{1,0} parameter(1) CHECK-DAG: %[[ADD1:.*]] = f32[2,4]{1,0} add(f32[2,4]{1,0} %[[P1]], f32[2,4]{1,0} %[[P1]]) CHECK-DAG: ROOT {{.*}} = f32[2,2]{1,0} dot(f32[2,4]{1,0} %[[ADD0]], f32[2,4]{1,0} %[[ADD1]]) CHECK: ENTRY CHECK-DAG: %[[P0:.*]] = f32[2,4]{1,0} parameter(0) CHECK-DAG: ROOT {{.*}} = f32[2,2]{1,0} CHECK-SAME: fusion(f32[2,4]{1,0} %[[P0]], f32[2,4]{1,0} %[[P0]]) CHECK-SAME: kind=kCustom CHECK-SAME: __triton_gemm })"); } TEST_F(GemmFusionLevel2Test, NonParamNodesAreReusedIfTheyHaveTheSameIterSpec) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { a = f32[4,4]{1,0} parameter(0) b = f32[4,4]{1,0} parameter(1) negate = f32[4,4]{1,0} negate(a) sine = f32[4,4]{1,0} sine(negate) add = f32[4,4]{1,0} add(negate, sine) ROOT r = f32[4,4]{1,0} dot(add, b), lhs_contracting_dims={1}, rhs_contracting_dims={1} })")); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); MatchHloModule(*module, R"( CHECK-DAG: %[[P0:.*]] = f32[4,4]{1,0} parameter(0) CHECK-DAG: %[[P1:.*]] = f32[4,4]{1,0} parameter(1) CHECK-DAG: %[[NEGATE:.*]] = f32[4,4]{1,0} negate(f32[4,4]{1,0} %[[P0]]) CHECK-DAG: %[[SINE:.*]] = f32[4,4]{1,0} sine(f32[4,4]{1,0} %[[NEGATE]]) CHECK-DAG: %[[ADD:.*]] = f32[4,4]{1,0} add(f32[4,4]{1,0} %[[NEGATE]], f32[4,4]{1,0} %[[SINE]]) CHECK-DAG: ROOT {{.*}} = f32[4,4]{1,0} dot(f32[4,4]{1,0} %[[ADD]], f32[4,4]{1,0} %[[P1]]) CHECK: ENTRY CHECK-DAG: %[[P0:.*]] = f32[4,4]{1,0} parameter(0) CHECK-DAG: %[[P1:.*]] = f32[4,4]{1,0} parameter(1) CHECK-DAG: ROOT {{.*}} = f32[4,4]{1,0} CHECK-SAME: fusion(f32[4,4]{1,0} %[[P0]], f32[4,4]{1,0} %[[P1]]) CHECK-SAME: kind=kCustom CHECK-SAME: __triton_gemm })"); } TEST_F(GemmFusionLevel2Test, NodesAreNotReusedIfTheyHaveDifferentIterSpecs) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { a = f32[4,4]{1,0} parameter(0) b = f32[4,4]{1,0} parameter(1) tr_a = f32[4,4]{1,0} transpose(a), dimensions={1,0} add = f32[4,4]{1,0} add(a, tr_a) ROOT r = f32[4,4]{1,0} dot(add, b), lhs_contracting_dims={1}, rhs_contracting_dims={1} })")); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); MatchHloModule(*module, R"( CHECK-DAG: %[[P0:.*]] = f32[4,4]{1,0} parameter(0) CHECK-DAG: %[[P1:.*]] = f32[4,4]{1,0} parameter(1) CHECK-DAG: %[[P2:.*]] = f32[4,4]{1,0} parameter(2) CHECK-DAG: %[[TRANSPOSE:.*]] = f32[4,4]{1,0} transpose(f32[4,4]{1,0} %[[P1]]) CHECK-DAG: %[[ADD:.*]] = f32[4,4]{1,0} add(f32[4,4]{1,0} %[[P0]], f32[4,4]{1,0} %[[TRANSPOSE]]) CHECK-DAG: ROOT {{.*}} = f32[4,4]{1,0} dot(f32[4,4]{1,0} %[[ADD]], f32[4,4]{1,0} %[[P2]]) CHECK: ENTRY CHECK-DAG: %[[P0:.*]] = f32[4,4]{1,0} parameter(0) CHECK-DAG: %[[P1:.*]] = f32[4,4]{1,0} parameter(1) CHECK-DAG: ROOT {{.*}} = f32[4,4]{1,0} CHECK-SAME: fusion(f32[4,4]{1,0} %[[P0]], f32[4,4]{1,0} %[[P0]], f32[4,4]{1,0} %[[P1]]) CHECK-SAME: kind=kCustom CHECK-SAME: __triton_gemm })"); } TEST_F(GemmFusionLevel2Test, OperationsAddingMoreParametersGetMultipleTries) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( e { p0 = f32[2,2] parameter(0) c0 = f32[] constant(12345) b0 = f32[2,2] broadcast(c0), dimensions={} m0 = f32[2,2] multiply(p0, b0) c1 = f32[] constant(34567) b1 = f32[2,2] broadcast(c1), dimensions={} a0 = f32[2,2] add(m0, b1) b3 = f32[2,2,2] broadcast(a0), dimensions={0,1} p2 = f32[2,2,2] parameter(2) m2 = f32[2,2,2] multiply(p2, b3) p1 = f32[2]{0} parameter(1) c2 = f32[] constant(5678) b2 = f32[2] broadcast(c2), dimensions={} a1 = f32[2]{0} add(p1, b2) b4 = f32[2,2,2] broadcast(a1), dimensions={2} m1 = f32[2,2,2] multiply(m2, b4) b = f32[4,2] bitcast(m1) p3 = f16[2,2] parameter(3) p3c = f32[2,2] convert(p3) ROOT r = f32[4,2] dot(b, p3c), lhs_contracting_dims={1}, rhs_contracting_dims={0} })")); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch((m::Fusion(m::Parameter(), m::Parameter(), m::Parameter(), m::Parameter())))); } TEST_F(GemmFusionLevel2Test, GemmFusionBailsOutPreAmpere) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { p0 = f32[2,53] parameter(0) p0e = f32[2,53] exponential(p0) p1 = s16[53,2] parameter(1) p1c = f32[53,2] convert(p1) ROOT dot = f32[2,2] dot(p0e, p1c), lhs_contracting_dims={1}, rhs_contracting_dims={0} })")); EXPECT_THAT( GemmFusion(se::CudaComputeCapability{se::CudaComputeCapability::VOLTA, 0}) .Run(module.get()), tsl::testing::StatusIs( absl::StatusCode::kFailedPrecondition, ::testing::HasSubstr("Triton support is only enabled for Ampere GPUs " "(compute capability 8.0) and up, but got"))); } TEST_F(GemmFusionLevel2Test, GemmFusionSucceedsOnNonCudaGpu) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { p0 = f32[2,53] parameter(0) p0e = f32[2,53] exponential(p0) p1 = s16[53,2] parameter(1) p1c = f32[53,2] convert(p1) ROOT dot = f32[2,2] dot(p0e, p1c), lhs_contracting_dims={1}, rhs_contracting_dims={0} })")); EXPECT_TRUE(GemmFusion(se::RocmComputeCapability{}).Run(module.get()).ok()); } TEST_F(GemmFusionLevel2Test, ParameterUsedElementwiseTwiceIsFused) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( HloModule t ENTRY e { p0 = f32[2,35] parameter(0) p0n = f32[2,35] negate(p0) p0e = f32[2,35] exponential(p0) a = f32[2,35] add(p0e, p0n) p1 = f16[35,2] parameter(1) p1c = f32[35,2] convert(p1) ROOT dot = f32[2,2] dot(a, p1c), lhs_contracting_dims={1}, rhs_contracting_dims={0} })")); EXPECT_TRUE(GemmFusion(se::CudaComputeCapability{ se::CudaComputeCapability::AMPERE, 0}) .Run(module.get()) .value()); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch((m::Fusion(m::Parameter(), m::Parameter())))); TF_ASSERT_OK_AND_ASSIGN( const auto analysis, TritonFusionAnalysis::Execute(*module->entry_computation() ->root_instruction() ->called_computations()[0])); EXPECT_EQ(analysis.ScopeParameters(TritonFusionAnalysis::Scope::LHS).size(), 1); EXPECT_EQ(analysis.ScopeParameters(TritonFusionAnalysis::Scope::RHS).size(), 1); } TEST_F(GemmFusionLevel2Test, ParameterUsedNonElementwiseTwiceIsFusedOnBothPaths) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( HloModule t ENTRY e { p0 = f32[4,4] parameter(0) p0t = f32[4,4] transpose(p0), dimensions={1,0} a = f32[4,4] add(p0, p0t) p1 = f16[4,5] parameter(1) p1c = f32[4,5] convert(p1) ROOT dot = f32[4,5] dot(a, p1c), lhs_contracting_dims={1}, rhs_contracting_dims={0} })")); EXPECT_TRUE(GemmFusion(se::CudaComputeCapability{ se::CudaComputeCapability::AMPERE, 0}) .Run(module.get()) .value()); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch((m::Fusion(m::Parameter(), m::Parameter(), m::Parameter())))); } TEST_F(GemmFusionLevel2Test, ComputationParameterWithMultipleUsersIsNotTrivialToFuse) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { p0 = f32[400,400] parameter(0) c0 = f16[400,400] convert(p0) p1 = f16[400,400] parameter(1) dot0 = f16[400,400] dot(c0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} c1 = f16[400,400] convert(p0) p2 = f16[400,400] parameter(2) dot1 = f16[400,400] dot(c1, p2), lhs_contracting_dims={1}, rhs_contracting_dims={0} ROOT a = f16[400,400] add(dot0, dot1) })")); EXPECT_FALSE(GemmFusion(se::CudaComputeCapability{ se::CudaComputeCapability::AMPERE, 0}) .Run(module.get()) .value()); } TEST_F(GemmFusionLevel2Test, NarrowingConversionIsAlwaysBetterToFuse) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( ENTRY e { p0 = s8[512,512] parameter(0) c0 = f16[512,512] convert(p0) p1 = f16[512,512] parameter(1) dot0 = f16[512,512] dot(c0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} n = f16[512,512] negate(c0) ROOT a = f16[512,512] add(dot0, n) })")); EXPECT_TRUE(GemmFusion(se::CudaComputeCapability{ se::CudaComputeCapability::AMPERE, 0}) .Run(module.get()) .value()); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch((m::Add(m::Fusion(m::Parameter(), m::Parameter()), m::Negate())))); } TEST_F(GemmFusionLevel2Test, NestedSlicingIsAnalyzedCorrectly) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( triton_gemm_d_computation { p0 = f32[6,24]{1,0} parameter(0) slice1 = f32[5,20]{1,0} slice(p0), slice={[1:6], [3:23]} n1 = f32[5,20]{1,0} negate(slice1) slice2 = f32[3,7]{1,0} slice(n1), slice={[1:4], [13:20]} p1 = f32[7,37]{1,0} parameter(1) ROOT d = f32[3,37]{1,0} dot(slice2, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} } ENTRY e { p0 = f32[7,37]{1,0} parameter(0) p1 = f32[6,24]{1,0} parameter(1) ROOT triton_gemm_d = f32[3,37]{1,0} fusion(p1, p0), kind=kCustom, calls=triton_gemm_d_computation })")); const HloComputation* computation = module->entry_computation()->root_instruction()->called_computations()[0]; TF_ASSERT_OK_AND_ASSIGN(const auto analysis, TritonFusionAnalysis::Execute(*computation)); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, computation->parameter_instruction(0), 0), ElementsAre(FieldsAre(24, 6, 2, 3, ElementsAre(3)))); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::LHS, computation->parameter_instruction(0), 1), ElementsAre(FieldsAre(1, 24, 16, 7, ElementsAre(7)))); } TEST_F(GemmFusionLevel2Test, FusedConcatenationIsAnalyzedCorrectly) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( e { p0 = s8[153,1536] parameter(0) p1 = s8[153,128] parameter(1) p2 = s8[153,256] parameter(2) cat = s8[153,1920] concatenate(p0, p1, p2), dimensions={1} cvt = bf16[153,1920] convert(cat) p3 = bf16[16,153] parameter(3) ROOT d = bf16[16,1920] dot(p3, cvt), lhs_contracting_dims={1}, rhs_contracting_dims={0} })")); EXPECT_TRUE(GemmFusion(se::CudaComputeCapability{ se::CudaComputeCapability::AMPERE, 0}) .Run(module.get()) .value()); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch((m::Fusion(m::Parameter(), m::Parameter(), m::Parameter(), m::Parameter())))); const HloComputation* computation = module->entry_computation()->root_instruction()->called_computations()[0]; TF_ASSERT_OK_AND_ASSIGN(const auto analysis, TritonFusionAnalysis::Execute(*computation)); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, computation->parameter_instruction(1), 0), ElementsAre(FieldsAre(1536, 153, 0, 153, ElementsAre(153)))); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, computation->parameter_instruction(1), 1), ElementsAre(FieldsAre(1, 1536, 0, 1536, ElementsAre(1536)))); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, computation->parameter_instruction(2), 0), ElementsAre(FieldsAre(128, 153, 0, 153, ElementsAre(153)))); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, computation->parameter_instruction(2), 1), ElementsAre(FieldsAre(1, 128, -1536, 128, ElementsAre(128)))); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, computation->parameter_instruction(3), 0), ElementsAre(FieldsAre(256, 153, 0, 153, ElementsAre(153)))); EXPECT_THAT(*analysis.IterSpec(TritonFusionAnalysis::Scope::RHS, computation->parameter_instruction(3), 1), ElementsAre(FieldsAre(1, 256, -1536 - 128, 256, ElementsAre(256)))); } TEST_F(GemmFusionLevel2Test, IndivisibleConcatenationIsNotFused) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( e { p0 = s8[124,1024] parameter(0) p1 = s8[124,1001] parameter(1) cat = s8[124,2025] concatenate(p0, p1), dimensions={1} cvt = f16[124,2025] convert(cat) p2 = f16[123,124] parameter(2) ROOT d = f16[2025,123] dot(cvt, p2), lhs_contracting_dims={0}, rhs_contracting_dims={1} })")); EXPECT_TRUE(GemmFusion(se::CudaComputeCapability{ se::CudaComputeCapability::AMPERE, 0}) .Run(module.get()) .value()); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch((m::Fusion(m::Concatenate(), m::Parameter())))); } TEST_F(GemmFusionLevel2Test, ConcatenationOfContractingIsNotFused) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( e { p0 = s8[124,1024] parameter(0) p1 = s8[124,1024] parameter(1) cat = s8[124,2048] concatenate(p0, p1), dimensions={1} cvt = f16[124,2048] convert(cat) p2 = f16[123,2048] parameter(2) ROOT d = f16[124,123] dot(cvt, p2), lhs_contracting_dims={1}, rhs_contracting_dims={1} })")); EXPECT_TRUE(GemmFusion(se::CudaComputeCapability{ se::CudaComputeCapability::AMPERE, 0}) .Run(module.get()) .value()); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch((m::Fusion(m::Concatenate(), m::Parameter())))); } TEST_F(GemmFusionLevel2Test, ConcatenationOfBatchIsNotFused) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( e { p0 = s8[124,1024,50] parameter(0) p1 = s8[124,1024,50] parameter(1) cat = s8[124,2048,50] concatenate(p0, p1), dimensions={1} cvt = f16[124,2048,50] convert(cat) p2 = f16[123,2048,50] parameter(2) ROOT d = f16[2048,124,123] dot(cvt, p2), lhs_batch_dims={1}, rhs_batch_dims={1}, lhs_contracting_dims={2}, rhs_contracting_dims={2} })")); EXPECT_TRUE(GemmFusion(se::CudaComputeCapability{ se::CudaComputeCapability::AMPERE, 0}) .Run(module.get()) .value()); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch((m::Fusion(m::Concatenate(), m::Parameter())))); } TEST_F(GemmFusionLevel2Test, DifferentConcatenationOfSameParametersIsFusedViaNodeDuplication) { TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(R"( e { p0 = s8[128,2] parameter(0) p1 = s8[128,2] parameter(1) cat0 = s8[256,2] concatenate(p0, p1), dimensions={0} cvt0 = f16[256,2] convert(cat0) cat1 = s8[256,2] concatenate(p1, p0), dimensions={0} n1 = s8[256,2] negate(cat1) cvt1 = f16[256,2] convert(n1) a = f16[256,2] add(cvt1, cvt0) p2 = f16[2,18] parameter(2) ROOT d = f16[18,256] dot(p2, a), lhs_contracting_dims={0}, rhs_contracting_dims={1} })")); EXPECT_TRUE(GemmFusion(se::CudaComputeCapability{ se::CudaComputeCapability::AMPERE, 0}) .Run(module.get()) .value()); EXPECT_THAT( module->entry_computation()->root_instruction(), GmockMatch((m::Fusion(m::Parameter(), m::Parameter(), m::Parameter(), m::Parameter(), m::Parameter())))); } TEST_F(GemmFusionTest, CopiesDotMetadataToFusionOp) { auto module = ParseAndReturnVerifiedModule(R"( HloModule m ENTRY e { p0 = f16[2,18] parameter(0) p1 = f16[256,2] parameter(1) ROOT d = f16[18,256] dot(p0, p1), lhs_contracting_dims={0}, rhs_contracting_dims={1}, metadata={op_name="foo"} })") .value(); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); EXPECT_EQ( module->entry_computation()->root_instruction()->metadata().op_name(), "foo"); } TEST_F(GemmFusionTest, FusesBroadcastOfScalarEpilogues) { auto module = ParseAndReturnVerifiedModule(R"( HloModule m ENTRY e { p0 = f16[2,18] parameter(0) p1 = f16[256,2] parameter(1) d = f16[18,256] dot(p0, p1), lhs_contracting_dims={0}, rhs_contracting_dims={1} p2 = f16[1] parameter(2) p3 = f16[1] parameter(3) m0 = f16[1] multiply(f16[1] p2, f16[1] p3) bc = f16[] bitcast(m0) b = f16[18,256] broadcast(f16[] bc) ROOT m = f16[18,256] multiply(d, b) })") .value(); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); EXPECT_THAT(module->entry_computation()->root_instruction(), GmockMatch((m::Fusion(m::Parameter(), m::Parameter(), m::Parameter(), m::Parameter())))); } class SmallDotGemmFusionTest : public GemmFusionTest { public: DebugOptions GetDebugOptionsForTest() override { DebugOptions debug_options = GemmFusionTest::GetDebugOptionsForTest(); debug_options.set_xla_gpu_gemm_rewrite_size_threshold(100); return debug_options; } }; TEST_F(SmallDotGemmFusionTest, SkipSmallMatrixMultiplicationRewrite) { auto module = ParseAndReturnVerifiedModule(R"( HloModule m ENTRY e { p0 = f16[2,10] parameter(0) p1 = f16[10,2] parameter(1) ROOT d = f16[10,10] dot(p0, p1), lhs_contracting_dims={0}, rhs_contracting_dims={1} })") .value(); EXPECT_FALSE(GemmFusion(gpu_version_).Run(module.get()).value()); MatchHloModule(*module, R"( ; CHECK-LABEL: ENTRY %e ({{.*}}: f16[2,10], {{.*}}: f16[10,2]) -> f16[10,10] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f16[2,10]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = f16[10,2]{1,0} parameter(1) ; CHECK: ROOT {{.*}} = f16[10,10]{1,0} dot(f16[2,10]{1,0} [[P0]], f16[10,2]{1,0} [[P1]]) })"); } TEST_F(SmallDotGemmFusionTest, LargeMatrixMultiplicationIsRewritten) { auto module = ParseAndReturnVerifiedModule(R"( HloModule m ENTRY e { p0 = f16[2,18] parameter(0) p1 = f16[50,2] parameter(1) ROOT d = f16[18,50] dot(p0, p1), lhs_contracting_dims={0}, rhs_contracting_dims={1} })") .value(); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); MatchHloModule(*module, R"( ; CHECK-LABEL: ENTRY %e ({{.*}}: f16[2,18], {{.*}}: f16[50,2]) -> f16[18,50] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f16[2,18]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = f16[50,2]{1,0} parameter(1) ; CHECK: ROOT {{.*}} = f16[18,50]{1,0} ; CHECK: fusion(f16[2,18]{1,0} [[P0]], f16[50,2]{1,0} [[P1]]), ; CHECK: kind=kCustom ; CHECK: __triton_gemm })"); } class SparseDotTest : public GemmFusionTest {}; TEST_F(SparseDotTest, DotWithSparseLhsOperandIsRewritten) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test ENTRY main { lhs = f16[2,16] parameter(0) rhs = f16[32,2] parameter(1) meta = u16[2,2] parameter(2) ROOT dot = f32[2,2] dot(lhs, rhs, meta), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sparsity=L.1@2:4 })") .value(); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); MatchHloModule(*module, R"( ; CHECK-LABEL: ENTRY %main ({{.*}}: f16[2,16], {{.*}}: f16[32,2], {{.*}}: u16[2,2]) -> f32[2,2] { ; CHECK-NEXT: [[P0:%[^ ]+]] = f16[2,16]{1,0} parameter(0) ; CHECK-NEXT: [[P1:%[^ ]+]] = f16[32,2]{1,0} parameter(1) ; CHECK-NEXT: [[META:%[^ ]+]] = u16[2,2]{1,0} parameter(2) ; CHECK: ROOT {{.*}} = f32[2,2]{1,0} ; CHECK-SAME: fusion(f16[2,16]{1,0} [[P0]], f16[32,2]{1,0} [[P1]], u16[2,2]{1,0} [[META]]), ; CHECK-SAME: kind=kCustom ; CHECK-SAME: __triton_gemm })"); } TEST_F(SparseDotTest, DotWithSparseRhsOperandIsNotSupported) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test ENTRY main { lhs = f16[2,32] parameter(0) rhs = f16[16,2] parameter(1) meta = u16[2,2] parameter(2) ROOT dot = f32[2,2] dot(lhs, rhs, meta), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sparsity=R.0@2:4 })") .value(); auto result = GemmFusion(gpu_version_).Run(module.get()); EXPECT_FALSE(result.ok()); } TEST_F(SparseDotTest, UnsupportedSparsityType) { auto module = ParseAndReturnVerifiedModule(R"( HloModule test ENTRY main { lhs = f16[2,8] parameter(0) rhs = f16[32,2] parameter(1) meta = u16[2,1] parameter(2) ROOT dot = f32[2,2] dot(lhs, rhs, meta), lhs_contracting_dims={1}, rhs_contracting_dims={0}, sparsity=L.1@1:4 })") .value(); auto result = GemmFusion(gpu_version_).Run(module.get()); EXPECT_FALSE(result.ok()); } TEST_F(SmallDotGemmFusionTest, Int4DotIsRewritten) { constexpr auto kInt4Dot = R"( ENTRY e { p0 = s8[16,16] parameter(0) p1 = s4[16,16] parameter(1) p1c = bf16[16,16] convert(p1) ROOT dot = bf16[16,16] dot(p0, p1c), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(kInt4Dot)); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); } TEST_F(SmallDotGemmFusionTest, Int4ConcatPlusConvertIsRewritten) { const std::string kInt4Dot = R"( ENTRY main { lhs1 = s4[4,1024]{1,0} parameter(0) lhs2 = s4[4,1024]{1,0} parameter(1) rhs = bf16[1024,4]{1,0} parameter(2) lhs_concat = s4[8,1024]{1,0} concatenate(lhs1, lhs2), dimensions={0} lhs_converted = bf16[8,1024]{1,0} convert(lhs_concat) ROOT dot = bf16[8,4]{1,0} dot(lhs_converted, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(kInt4Dot)); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); MatchHloModule(*module, R"( CHECK: gemm_fusion_dot_computation CHECK: %parameter_0 = s4[8,1024]{1,0} parameter(0) CHECK: ENTRY CHECK-DAG: ROOT {{.*}} = bf16[8,4]{1,0} fusion(s4[8,1024]{1,0} %lhs_concat, bf16[1024,4]{1,0} %rhs) })"); } TEST_F(SmallDotGemmFusionTest, Int4ConvertPlusNegateIsRewritten) { const std::string kInt4Dot = R"( ENTRY main { lhs = s4[8,1024]{1,0} parameter(0) rhs = f32[1024,4]{1,0} parameter(1) lhs_converted = f32[8,1024]{1,0} convert(lhs) lhs_negated = f32[8,1024]{1,0} negate(lhs_converted) ROOT dot = f32[8,4]{1,0} dot(lhs_negated, rhs), lhs_contracting_dims={1}, rhs_contracting_dims={0} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(kInt4Dot)); EXPECT_TRUE(GemmFusion(gpu_version_).Run(module.get()).value()); MatchHloModule(*module, R"( CHECK: gemm_fusion_dot_computation CHECK: %parameter_0 = s4[8,1024]{1,0} parameter(0) CHECK: ENTRY CHECK-DAG: ROOT {{.*}} = f32[8,4]{1,0} fusion(s4[8,1024]{1,0} %lhs, f32[1024,4]{1,0} %rhs) })"); } TEST_F(SmallDotGemmFusionTest, Int4WithMinorBatchDimIsNotRewritten) { const std::string kInt4Dot = R"( ENTRY main { lhs = s4[8,1024,16]{2,1,0} parameter(0) lhs_converted = bf16[8,1024,16]{2,1,0} convert(lhs) rhs = bf16[16,1024,64]{2,1,0} parameter(1) ROOT dot = bf16[16,8,64]{2,1,0} dot(lhs_converted, rhs), lhs_batch_dims={2}, lhs_contracting_dims={1}, rhs_batch_dims={0}, rhs_contracting_dims={1} } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<VerifiedHloModule> module, ParseAndReturnVerifiedModule(kInt4Dot)); TF_ASSERT_OK_AND_ASSIGN(auto result, GemmFusion(gpu_version_).Run(module.get())); EXPECT_FALSE(result); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/gemm_fusion.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/gemm_fusion_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
3bd1293f-ecb9-4fcb-98fd-11ab2bc9252d
cpp
tensorflow/tensorflow
schedule_postprocessing
third_party/xla/xla/service/gpu/transforms/schedule_postprocessing.cc
third_party/xla/xla/service/gpu/transforms/schedule_postprocessing_test.cc
#include "xla/service/gpu/transforms/schedule_postprocessing.h" #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/ir/hlo_schedule.h" #include "xla/hlo/utils/hlo_query.h" #include "xla/service/gpu/backend_configs.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { using CustomCallInComputation = absl::flat_hash_map<const HloComputation*, bool>; bool MayInvokeCustomCall( const HloInstruction* hlo, const CustomCallInComputation& custom_call_in_computation) { if (hlo->opcode() == HloOpcode::kCustomCall) { return true; } return absl::c_any_of( hlo->called_computations(), [&](const HloComputation* callee) { return custom_call_in_computation.find(callee)->second; }); } absl::StatusOr<bool> IsRelevantAsynchronousStart(const HloInstruction* hlo) { if (!hlo_query::IsAsyncCollectiveStartOp(hlo, false)) { return false; } TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config, hlo->backend_config<GpuBackendConfig>()); const CollectiveBackendConfig& collective_backend_config = gpu_config.collective_backend_config(); return !collective_backend_config.is_sync(); } absl::StatusOr<bool> IsRelevantAsynchronousDone(const HloInstruction* hlo) { return hlo_query::IsAsyncCollectiveDoneOp(hlo, false); } absl::StatusOr<bool> ProcessComputation( const HloSchedule& schedule, HloComputation* computation, CustomCallInComputation& custom_call_in_computation) { bool changed = false; bool has_custom_call = false; absl::flat_hash_set<HloInstruction*> async_starts; const HloInstructionSequence& sequence = schedule.sequence(computation); const std::vector<HloInstruction*>& all_instructions = sequence.instructions(); for (HloInstruction* hlo : all_instructions) { if (MayInvokeCustomCall(hlo, custom_call_in_computation)) { async_starts.clear(); has_custom_call = true; continue; } TF_ASSIGN_OR_RETURN(bool is_async_start, IsRelevantAsynchronousStart(hlo)); if (is_async_start) { async_starts.insert(hlo); continue; } TF_ASSIGN_OR_RETURN(bool is_async_done, IsRelevantAsynchronousDone(hlo)); if (is_async_done) { HloInstruction* async_start = hlo->mutable_operand(0); if (async_starts.contains(async_start)) { changed = true; TF_ASSIGN_OR_RETURN(GpuBackendConfig gpu_config, async_start->backend_config<GpuBackendConfig>()); CollectiveBackendConfig& collective_backend_config = *gpu_config.mutable_collective_backend_config(); collective_backend_config.set_no_parallel_custom_call(true); TF_RETURN_IF_ERROR(async_start->set_backend_config(gpu_config)); async_starts.erase(async_start); } } } custom_call_in_computation[computation] = has_custom_call; return changed; } } absl::StatusOr<bool> SchedulePostprocessing::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { if (!module->has_schedule()) return false; HloSchedule& schedule = module->schedule(); bool changed = false; CustomCallInComputation custom_call_in_computation; std::vector<HloComputation*> all_computations = module->MakeComputationPostOrder(execution_threads); for (auto iter = all_computations.begin(); iter != all_computations.end(); ++iter) { HloComputation* computation = *iter; if (computation->IsFusionComputation()) { custom_call_in_computation[computation] = false; continue; } TF_ASSIGN_OR_RETURN( bool result, ProcessComputation(schedule, computation, custom_call_in_computation)); changed |= result; } return changed; } } }
#include "xla/service/gpu/transforms/schedule_postprocessing.h" #include <memory> #include <gtest/gtest.h> #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/hlo_parser.h" #include "xla/tests/hlo_test_base.h" #include "xla/util.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { using SchedulePostprocessingTest = HloTestBase; TEST_F(SchedulePostprocessingTest, SynchronousOpsNotChanged) { constexpr absl::string_view kHloString = R"( HloModule module, is_scheduled=true ENTRY entry { pf32 = f32[1] parameter(0) all-gather-start = (f32[1], f32[2]) all-gather-start(pf32), dimensions={0}, backend_config={"collective_backend_config":{"is_sync":true,"no_parallel_custom_call":false}} ROOT all-gather-done = f32[2] all-gather-done(all-gather-start) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnUnverifiedModule((kHloString))); SchedulePostprocessing pass; TF_ASSERT_OK_AND_ASSIGN(bool changed, pass.Run(module.get())); EXPECT_FALSE(changed); } TEST_F(SchedulePostprocessingTest, P2POpsNotChanged) { constexpr absl::string_view kHloString = R"( HloModule module, is_scheduled=true ENTRY main { f0 = f32[] constant(0.0) init = f32[1, 1024, 1024] broadcast(f0), dimensions={} after-all = token[] after-all() recv = (f32[1, 1024, 1024], u32[], token[]) recv(after-all), channel_id=2, frontend_attributes={ _xla_send_recv_source_target_pairs="{{0,1}, {1,2}}" } recv-done = (f32[1, 1024, 1024], token[]) recv-done(recv), channel_id=2 ROOT recv-data = f32[1, 1024, 1024] get-tuple-element(recv-done), index=0 } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnUnverifiedModule((kHloString))); SchedulePostprocessing pass; TF_ASSERT_OK_AND_ASSIGN(bool changed, pass.Run(module.get())); EXPECT_FALSE(changed); } TEST_F(SchedulePostprocessingTest, AsynchronousOpsChanged) { constexpr absl::string_view kHloString = R"( HloModule module, is_scheduled=true ENTRY entry { pf32 = f32[1] parameter(0) pf32.2 = f32[1] custom-call(pf32), custom_call_target="my_custom_call" all-gather-start = (f32[1], f32[2]) all-gather-start(pf32.2), dimensions={0}, backend_config={"collective_backend_config":{"is_sync":false}} ROOT all-gather-done = f32[2] all-gather-done(all-gather-start) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnUnverifiedModule((kHloString))); SchedulePostprocessing pass; TF_ASSERT_OK_AND_ASSIGN(bool changed, pass.Run(module.get())); EXPECT_TRUE(changed); HloInstruction* start = FindInstruction(module.get(), "all-gather-start"); TF_ASSERT_OK_AND_ASSIGN(GpuBackendConfig gpu_config, start->backend_config<GpuBackendConfig>()); const CollectiveBackendConfig& collective_backend_config = gpu_config.collective_backend_config(); EXPECT_TRUE(collective_backend_config.no_parallel_custom_call()); } TEST_F(SchedulePostprocessingTest, AsynchronousOpsWithParallelCustomcall) { constexpr absl::string_view kHloString = R"( HloModule module, is_scheduled=true ENTRY entry { pf32 = f32[1] parameter(0) all-gather-start = (f32[1], f32[2]) all-gather-start(pf32), dimensions={0}, backend_config={"collective_backend_config":{"is_sync":false}} pf32.2 = f32[1] custom-call(pf32), custom_call_target="my_custom_call" all-gather-done = f32[2] all-gather-done(all-gather-start) ROOT out = (f32[1], f32[2]) tuple(f32[1] pf32.2, f32[2] all-gather-done) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnUnverifiedModule((kHloString))); SchedulePostprocessing pass; TF_ASSERT_OK_AND_ASSIGN(bool changed, pass.Run(module.get())); EXPECT_FALSE(changed); HloInstruction* start = FindInstruction(module.get(), "all-gather-start"); TF_ASSERT_OK_AND_ASSIGN(GpuBackendConfig gpu_config, start->backend_config<GpuBackendConfig>()); const CollectiveBackendConfig& collective_backend_config = gpu_config.collective_backend_config(); EXPECT_FALSE(collective_backend_config.no_parallel_custom_call()); } TEST_F(SchedulePostprocessingTest, AsynchronousOpsWithParallelNestedCustomcall) { constexpr absl::string_view kHloString = R"( HloModule module, is_scheduled=true foo { v = f32[1] parameter(0) ROOT ret = f32[1] custom-call(v), custom_call_target="my_custom_call" } ENTRY entry { pf32 = f32[1] parameter(0) all-gather-start = (f32[1], f32[2]) all-gather-start(pf32), dimensions={0}, backend_config={"collective_backend_config":{"is_sync":false}} pf32.2 = f32[1] call(f32[1] pf32), to_apply=foo all-gather-done = f32[2] all-gather-done(all-gather-start) ROOT out = (f32[1], f32[2]) tuple(f32[1] pf32.2, f32[2] all-gather-done) } )"; TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnUnverifiedModule((kHloString))); SchedulePostprocessing pass; TF_ASSERT_OK_AND_ASSIGN(bool changed, pass.Run(module.get())); EXPECT_FALSE(changed); HloInstruction* start = FindInstruction(module.get(), "all-gather-start"); TF_ASSERT_OK_AND_ASSIGN(GpuBackendConfig gpu_config, start->backend_config<GpuBackendConfig>()); const CollectiveBackendConfig& collective_backend_config = gpu_config.collective_backend_config(); EXPECT_FALSE(collective_backend_config.no_parallel_custom_call()); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/schedule_postprocessing.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/schedule_postprocessing_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
75ea6ca2-c38e-4fd8-ab84-f4778b0ea94e
cpp
tensorflow/tensorflow
all_reduce_splitter
third_party/xla/xla/service/gpu/transforms/all_reduce_splitter.cc
third_party/xla/xla/service/gpu/transforms/all_reduce_splitter_test.cc
#include "xla/service/gpu/transforms/all_reduce_splitter.h" #include <cstdint> #include <optional> #include <string> #include <variant> #include <vector> #include "absl/cleanup/cleanup.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "absl/strings/substitute.h" #include "xla/hlo/ir/collective_device_list.h" #include "xla/hlo/ir/hlo_casting_utils.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/utils/hlo_query.h" #include "xla/service/collective_opt_utils.h" #include "xla/service/hlo_module_config.h" #include "xla/shape.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace { struct ARReplicaGroups { std::vector<ReplicaGroup> first_ar_replica_groups; std::vector<ReplicaGroup> second_ar_replica_groups; }; struct AllReduceRewriteSpec { int split_dim; int group_size; HloAllReduceInstruction* all_reduce; HloDynamicSliceInstruction* dynamic_slice; ARReplicaGroups replica_groups; std::string ToString() { return absl::Substitute( "{\n split_dim=$0\n group_size=$1\n all_reduce=$2\n " "dynamic_slice=$3\n}\n", split_dim, group_size, all_reduce->ToString(), dynamic_slice->ToString()); } }; struct RewriteInfeasibleReason { const HloInstruction* ar; std::string message; }; struct ReplicaGroups { std::vector<ReplicaGroup> replica_groups; template <typename H> friend H AbslHashValue(H h, const ReplicaGroups& rg) { return H::combine(std::move(h), rg.replica_groups.size()); } friend bool operator==(const ReplicaGroups& item, const ReplicaGroups& other) { if (item.replica_groups.size() != other.replica_groups.size()) { return false; } for (int i = 0; i < item.replica_groups.size(); i++) { const ReplicaGroup& item_replica_group = item.replica_groups[i]; const ReplicaGroup& other_replica_group = other.replica_groups[i]; for (int i = 0; i < item_replica_group.replica_ids_size(); i++) { if (item_replica_group.replica_ids(i) != other_replica_group.replica_ids(i)) { return false; } } } return true; } }; using ARReplicaGroupMap = absl::flat_hash_map<ReplicaGroups, std::vector<const HloAllReduceInstruction*>>; using RewriteDecision = std::variant<AllReduceRewriteSpec, RewriteInfeasibleReason>; std::optional<int> GetSplitDim(const HloAllReduceInstruction& ar, const HloDynamicSliceInstruction& ds) { int split_dim = -1; int num_dims = 0; for (int64_t dim = 0; dim < ar.shape().rank(); ++dim) { if (ar.shape().dimensions(dim) != ds.shape().dimensions(dim)) { num_dims++; split_dim = dim; } } if (num_dims != 1) { VLOG(2) << "No support for multiple nor 0 split dims."; return std::nullopt; } return split_dim; } std::optional<int> GetProcessGroupSize(const HloAllReduceInstruction& ar, const HloDynamicSliceInstruction& ds) { CHECK(ds.operand(0) == &ar) << "Irrelevant AR + DS pair."; std::optional<int> split_dim = GetSplitDim(ar, ds); if (!split_dim.has_value()) { return std::nullopt; } return ar.shape().dimensions(*split_dim) / ds.dynamic_slice_sizes()[*split_dim]; } ARReplicaGroupMap GetReplicaGroupsMap(HloComputation& computation) { ARReplicaGroupMap map; hlo_query::ForEachInstructionWithOpcode( computation, HloOpcode::kAllReduce, [&map](const HloInstruction* instruction) { const HloAllReduceInstruction* ar = Cast<HloAllReduceInstruction>(instruction); auto rgs = ReplicaGroups{ar->replica_groups()}; map[rgs].push_back(ar); }); return map; } ARReplicaGroups GetNewReplicaGroups(int group_size, int num_partitions) { CHECK_EQ(num_partitions % group_size, 0); std::vector<ReplicaGroup> first_ar_rgs, second_ar_rgs; int num_units = num_partitions / group_size; first_ar_rgs.reserve(num_units); second_ar_rgs.reserve(group_size); for (int u = 0; u < group_size * num_units; u += group_size) { ReplicaGroup& group = first_ar_rgs.emplace_back(); for (int r = u; r < u + group_size; r++) { group.add_replica_ids(r); } } for (int g = 0; g < group_size; g++) { ReplicaGroup& group = second_ar_rgs.emplace_back(); for (int r = g; r < group_size * num_units; r += group_size) { group.add_replica_ids(r); } } return { first_ar_rgs, second_ar_rgs, }; } bool IsLogicalReduceScatter(const HloModule& module, const AllReduceRewriteSpec& spec, HloComputation& computation) { HloAllReduceInstruction& ar = *spec.all_reduce; CHECK_EQ(ar.user_count(), 1); CHECK_EQ(module.config().replica_count(), 1); HloInstruction* first_ar = computation.AddInstruction(HloInstruction::CreateAllReduce( ar.shape(), ar.operands(), ar.to_apply(), CollectiveDeviceList(spec.replica_groups.first_ar_replica_groups), ar.constrain_layout(), hlo_query::NextChannelId(module), ar.use_global_device_ids())); HloInstruction* ds = ar.users()[0]; auto* old_operand = ds->mutable_operand(0); if (!ds->ReplaceOperandWith(0, first_ar).ok()) { return false; } absl::Cleanup _ = [&] { CHECK_OK(ds->ReplaceOperandWith(0, old_operand)); CHECK_OK(computation.RemoveInstruction(first_ar)); }; return MatchReduceScatter(Cast<HloAllReduceInstruction>(first_ar), module.config().num_partitions(), module.config().replica_count(), false, true) .has_value(); } bool IsProfitableToSplit(const ARReplicaGroupMap& replica_map, const AllReduceRewriteSpec& spec) { auto new_rgs = spec.replica_groups; bool first_replica_exists = replica_map.contains(ReplicaGroups{new_rgs.first_ar_replica_groups}); bool second_replica_exists = replica_map.contains(ReplicaGroups{new_rgs.second_ar_replica_groups}); return first_replica_exists || second_replica_exists; } RewriteDecision CanRewrite(const HloModule& module, const ARReplicaGroupMap& replica_map, HloComputation& computation, HloInstruction& instruction) { const HloModuleConfig& config = module.config(); if (config.use_auto_spmd_partitioning() || !config.use_spmd_partitioning() || config.replica_count() != 1) { return RewriteInfeasibleReason{ &instruction, "Supporting only SPMD partitioning scheme.", }; } if (instruction.opcode() != HloOpcode::kAllReduce) { return RewriteInfeasibleReason{ &instruction, "Cannot rewrite an AllReduce, since it's not AllReduce.", }; } auto* ar = Cast<HloAllReduceInstruction>(&instruction); if (!ar->use_global_device_ids()) { return RewriteInfeasibleReason{ &instruction, "Only global ids are supported currently.", }; } if (ar->user_count() != 1 || ar->users().front()->opcode() != HloOpcode::kDynamicSlice) { return RewriteInfeasibleReason{ &instruction, "Cannot rewrite AllReduce if it is not a logical reduce scatter.", }; } auto* ds = Cast<HloDynamicSliceInstruction>(ar->users().front()); if (ds->user_count() > 1) { return RewriteInfeasibleReason{ &instruction, "Exactly one user of dynamic slice is required for a rewrite.", }; } int num_partitions = config.num_partitions(); std::vector<ReplicaGroup> rgs = ar->replica_groups(); if (rgs.size() != 1 || rgs.front().replica_ids_size() != num_partitions) { return RewriteInfeasibleReason{ &instruction, absl::StrCat("Cannot determine a valid split with num_partitions: ", num_partitions), }; } std::optional<int> split_dim = GetSplitDim(*ar, *ds); if (!split_dim.has_value()) { return RewriteInfeasibleReason{ &instruction, "Cannot get a split dim.", }; } std::optional<int> group_size = GetProcessGroupSize(*ar, *ds); if (!group_size.has_value()) { return RewriteInfeasibleReason{ &instruction, "Cannot determine a group size.", }; } if (num_partitions == group_size) { return RewriteInfeasibleReason{ &instruction, "Nothing to rewrite", }; } if (num_partitions % *group_size != 0) { return RewriteInfeasibleReason{ &instruction, "Group size does not evenly divide the number of partitions", }; } auto spec = AllReduceRewriteSpec{ *split_dim, *group_size, ar, ds, GetNewReplicaGroups(*group_size, num_partitions), }; if (!IsLogicalReduceScatter(module, spec, computation)) { return RewriteInfeasibleReason{ &instruction, "Not a logical reduce scatter.", }; } if (!IsProfitableToSplit(replica_map, spec)) { return RewriteInfeasibleReason{ &instruction, "Splitting is not profitable.", }; } return spec; } absl::StatusOr<bool> SplitAllReduce(const HloModuleConfig& config, AllReduceRewriteSpec spec, HloComputation& computation) { int64_t next_channel_id = hlo_query::NextChannelId(*spec.all_reduce->GetModule()); VLOG(1) << "AR splitting spec: " << spec.ToString(); int num_partitions = config.num_partitions(); int group_size = spec.group_size; CHECK_EQ(num_partitions % group_size, 0); HloAllReduceInstruction& ar = *spec.all_reduce; HloDynamicSliceInstruction& ds = *spec.dynamic_slice; const auto& [first_ar_replica_groups, second_ar_replica_groups] = spec.replica_groups; int channel_id = next_channel_id++; HloInstruction* first_ar = computation.AddInstruction(HloInstruction::CreateAllReduce( ar.shape(), ar.operands(), ar.to_apply(), CollectiveDeviceList(first_ar_replica_groups), ar.constrain_layout(), channel_id, ar.use_global_device_ids())); channel_id = next_channel_id++; HloInstruction* second_ar = computation.AddInstruction(HloInstruction::CreateAllReduce( ds.shape(), {&ds}, ar.to_apply(), CollectiveDeviceList(second_ar_replica_groups), ar.constrain_layout(), channel_id, ar.use_global_device_ids())); TF_RETURN_IF_ERROR(computation.ReplaceInstruction(&ar, first_ar)); if (ds.IsRoot()) { computation.set_root_instruction(second_ar); } TF_RETURN_IF_ERROR(ds.ReplaceAllUsesWith(second_ar)); return true; } absl::StatusOr<bool> SplitAllReduce(const HloModule& module, const ARReplicaGroupMap& replica_map, HloComputation& computation, HloInstruction& instruction) { RewriteDecision spec = CanRewrite(module, replica_map, computation, instruction); if (std::holds_alternative<RewriteInfeasibleReason>(spec)) { auto reason = std::get<RewriteInfeasibleReason>(spec); VLOG(1) << "Cannot process {" << reason.ar->ToString() << "} due to : " << reason.message; return false; } return SplitAllReduce(module.config(), std::get<AllReduceRewriteSpec>(spec), computation); } } absl::StatusOr<bool> AllReduceSplitter::Run( HloModule* module, const absl::flat_hash_set<absl::string_view>& execution_threads) { bool changed = false; for (auto* computation : module->computations(execution_threads)) { ARReplicaGroupMap replica_map = GetReplicaGroupsMap(*computation); for (HloInstruction* instr : computation->MakeInstructionPostOrder()) { TF_ASSIGN_OR_RETURN(bool rewritten, SplitAllReduce(*module, replica_map, *computation, *instr)); changed |= rewritten; } } return changed; } }
#include "xla/service/gpu/transforms/all_reduce_splitter.h" #include <cstddef> #include <cstdint> #include <memory> #include <string> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/algorithm/container.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/hlo/pass/hlo_pass_pipeline.h" #include "xla/service/gpu/transforms/reduce_scatter_creator.h" #include "xla/service/hlo_module_config.h" #include "xla/tests/filecheck.h" #include "xla/tests/hlo_test_base.h" #include "xla/tsl/lib/core/status_test_util.h" #include "tsl/platform/status_matchers.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { using ::tsl::testing::IsOkAndHolds; class AllReduceSplitterTest : public HloTestBase { public: absl::StatusOr<std::unique_ptr<HloModule>> PrepareModule( absl::string_view hlo_module, int64_t num_replicas, int64_t num_partitions) { HloModuleConfig config = GetModuleConfigForTest( num_replicas, num_partitions); config.set_use_spmd_partitioning(num_partitions > 1); return ParseAndReturnVerifiedModule(hlo_module, config); } size_t AllReduceCount(const HloModule &module) { return CollectiveCount(module, HloOpcode::kAllReduce); } private: size_t CollectiveCount(const HloModule &module, HloOpcode opcode) { return absl::c_count_if( module.entry_computation()->instructions(), [&opcode](HloInstruction *instr) { return instr->opcode() == opcode; }); } }; class AllReduceSplitterFilecheckTest : public AllReduceSplitterTest { public: absl::Status FileCheck(const std::string &hlo_text, absl::string_view pattern) { TF_ASSIGN_OR_RETURN(bool matched, RunFileCheck(hlo_text, pattern)); if (!matched) { return absl::InternalError("Filecheck failed."); } return absl::OkStatus(); } }; TEST_F( AllReduceSplitterFilecheckTest, MatchBasicPatternIfDynamicSliceIsRootAndThereExistsAllReduceWithSameReplicaGroups) { absl::string_view hlo_string = R"( HloModule m sum { a = bf16[] parameter(0) b = bf16[] parameter(1) ROOT _ = bf16[] add(a,b) } ENTRY main { p = bf16[2,4096,4096] parameter(0) first.ar = bf16[2,4096,4096] all-reduce(p), replica_groups={{0,1,2,3},{4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=1 zero = bf16[] constant(0) reduce = bf16[4096] reduce(first.ar, zero), dimensions={0,1}, to_apply=sum all-reduce = bf16[4096] all-reduce(reduce), replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=2 table = s32[8]{0} constant({0,1,2,3,0,1,2,3}) pid = u32[] partition-id() id = s32[1] dynamic-slice(table, pid), dynamic_slice_sizes={1} reshape = s32[] reshape(id) slice_size = s32[] constant(1024) offset = s32[] multiply(reshape, slice_size) ROOT _ = bf16[1024] dynamic-slice(all-reduce, offset), dynamic_slice_sizes={1024} } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, PrepareModule(hlo_string, 1, 8)); EXPECT_THAT(AllReduceSplitter().Run(module.get()), IsOkAndHolds(true)); TF_EXPECT_OK(FileCheck(module->ToString(), R"( CHECK-DAG: %[[P0:.*]] = bf16[2,4096,4096]{2,1,0} parameter(0) CHECK: %[[AR0:.*]] = bf16[2,4096,4096]{2,1,0} all-reduce(bf16[2,4096,4096]{2,1,0} %[[P0]]) CHECK-SAME: replica_groups={[[DESIRED_RGS:.*]]} CHECK-DAG: %[[ZERO:.*]] = bf16[] constant(0) CHECK-DAG: %[[LOCAL_REDUCE:.*]] = bf16[4096]{0} reduce(bf16[2,4096,4096]{2,1,0} %[[AR0]], bf16[] %[[ZERO]]) CHECK: %[[AR1:.*]] = bf16[4096]{0} all-reduce(bf16[4096]{0} %[[LOCAL_REDUCE]]) CHECK-SAME: replica_groups={[[DESIRED_RGS]]} CHECK: %[[DS:.*]] = bf16[1024]{0} dynamic-slice(bf16[4096]{0} %[[AR1]], s32[] %[[_:.*]]) CHECK-SAME: dynamic_slice_sizes={1024} CHECK-NEXT: ROOT %[[AR2:.*]] = bf16[1024]{0} all-reduce(bf16[1024]{0} %[[DS]]) CHECK-SAME: replica_groups={{[{]}}{0,4},{1,5},{2,6},{3,7}{{[}]}} )")); } TEST_F( AllReduceSplitterTest, DoesNotMatchMatchBasicPatternIfDynamicSliceIsRootAndThereIsNoAllReduceWithSameReplicaGroups) { absl::string_view hlo_string = R"( HloModule m sum { a = bf16[] parameter(0) b = bf16[] parameter(1) ROOT _ = bf16[] add(a,b) } ENTRY main { p = bf16[2,4096,4096] parameter(0) zero = bf16[] constant(0) reduce = bf16[4096] reduce(p, zero), dimensions={0,1}, to_apply=sum all-reduce = bf16[4096] all-reduce(reduce), replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=2 table = s32[8]{0} constant({0,1,2,3,0,1,2,3}) pid = u32[] partition-id() id = s32[1] dynamic-slice(table, pid), dynamic_slice_sizes={1} reshape = s32[] reshape(id) slice_size = s32[] constant(1024) offset = s32[] multiply(reshape, slice_size) ROOT _ = bf16[1024] dynamic-slice(all-reduce, offset), dynamic_slice_sizes={1024} } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, PrepareModule(hlo_string, 1, 8)); EXPECT_THAT(AllReduceSplitter().Run(module.get()), IsOkAndHolds(false)); EXPECT_EQ(AllReduceCount(*module), 1); } TEST_F( AllReduceSplitterFilecheckTest, MatchBasicPatternIfDynamicSliceIsNotRootAndThereExistsAllReduceWithSameReplicaGroups) { absl::string_view hlo_string = R"( HloModule m sum { a = bf16[] parameter(0) b = bf16[] parameter(1) ROOT _ = bf16[] add(a,b) } ENTRY main { p = bf16[2,4096,4096] parameter(0) zero = bf16[] constant(0) first.ar = bf16[2,4096,4096] all-reduce(p), replica_groups={{0,1,2,3},{4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=1 reduce = bf16[4096] reduce(p, zero), dimensions={0,1}, to_apply=sum all-reduce = bf16[4096] all-reduce(reduce), replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=1 table = s32[8]{0} constant({0,1,2,3,0,1,2,3}) pid = u32[] partition-id() id = s32[1] dynamic-slice(table, pid), dynamic_slice_sizes={1} reshape = s32[] reshape(id) slice_size = s32[] constant(1024) offset = s32[] multiply(reshape, slice_size) dynamic_slice = bf16[1024] dynamic-slice(all-reduce, offset), dynamic_slice_sizes={1024} broadcast = bf16[1024,1024] broadcast(dynamic_slice), dimensions={0} ROOT _ = tuple(broadcast, first.ar) } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, PrepareModule(hlo_string, 1, 8)); EXPECT_THAT(AllReduceSplitter().Run(module.get()), IsOkAndHolds(true)); TF_EXPECT_OK(FileCheck(module->ToString(), R"( CHECK-DAG: %[[P0:.*]] = bf16[2,4096,4096]{2,1,0} parameter(0) CHECK-DAG: %[[ZERO:.*]] = bf16[] constant(0) CHECK-DAG: %[[LOCAL_REDUCE:.*]] = bf16[4096]{0} reduce(bf16[2,4096,4096]{2,1,0} %[[P0]], bf16[] %[[ZERO]]) CHECK: %[[AR0:.*]] = bf16[4096]{0} all-reduce(bf16[4096]{0} %[[LOCAL_REDUCE]]) CHECK-SAME: replica_groups={[[DESIRED_RGS:.*]]} CHECK: %[[DS:.*]] = bf16[1024]{0} dynamic-slice(bf16[4096]{0} %[[AR0]], s32[] %[[_:.*]]) CHECK-SAME: dynamic_slice_sizes={1024} CHECK-NEXT: %[[AR1:.*]] = bf16[1024]{0} all-reduce(bf16[1024]{0} %[[DS]]) CHECK-SAME: replica_groups={{[{]}}{0,4},{1,5},{2,6},{3,7}{{[}]}} CHECK: %[[EXISTING_AR:.*]] = bf16[2,4096,4096]{2,1,0} all-reduce(bf16[2,4096,4096]{2,1,0} %[[P0]]) CHECK-SAME: replica_groups={[[DESIRED_RGS]]} CHECK: ROOT CHECK-NOT: %[[AR1]] CHECK-SAME: %[[EXISTING_AR]] )")); } TEST_F( AllReduceSplitterTest, DoesNotMatchBasicPatternIfDynamicSliceIsNotRootAndThereIsNoAllReduceWithSameReplicaGroups) { absl::string_view hlo_string = R"( HloModule m sum { a = bf16[] parameter(0) b = bf16[] parameter(1) ROOT _ = bf16[] add(a,b) } ENTRY main { p = bf16[2,4096,4096] parameter(0) p.1 = bf16[2,4096,4096] parameter(1) zero = bf16[] constant(0) reduce = bf16[4096] reduce(p, zero), dimensions={0,1}, to_apply=sum all-reduce = bf16[4096] all-reduce(reduce), replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=1 table = s32[8]{0} constant({0,1,2,3,0,1,2,3}) pid = u32[] partition-id() id = s32[1] dynamic-slice(table, pid), dynamic_slice_sizes={1} reshape = s32[] reshape(id) slice_size = s32[] constant(1024) offset = s32[] multiply(reshape, slice_size) dynamic_slice = bf16[1024] dynamic-slice(all-reduce, offset), dynamic_slice_sizes={1024} broadcast = bf16[1024,1024] broadcast(dynamic_slice), dimensions={0} add = bf16[2,4096,4096] add(p,p.1) ROOT _ = tuple(broadcast, add) } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, PrepareModule(hlo_string, 1, 8)); EXPECT_THAT(AllReduceSplitter().Run(module.get()), IsOkAndHolds(false)); EXPECT_EQ(AllReduceCount(*module), 1); } TEST_F(AllReduceSplitterTest, DoesNotMatchBasicPatternIfDynamicSliceIsFullySharded) { absl::string_view hlo_string = R"( HloModule m sum { a = bf16[] parameter(0) b = bf16[] parameter(1) ROOT _ = bf16[] add(a,b) } ENTRY main { p = bf16[2,4096,4096] parameter(0) first.ar = bf16[2,4096,4096] all-reduce(p), replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=1 zero = bf16[] constant(0) reduce = bf16[4096] reduce(first.ar, zero), dimensions={0,1}, to_apply=sum all-reduce = bf16[4096] all-reduce(reduce), replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=2 table = s32[8]{0} constant({0,1,2,3,0,1,2,3}) pid = u32[] partition-id() id = s32[1] dynamic-slice(table, pid), dynamic_slice_sizes={1} reshape = s32[] reshape(id) slice_size = s32[] constant(512) offset = s32[] multiply(reshape, slice_size) ROOT _ = bf16[512] dynamic-slice(all-reduce, offset), dynamic_slice_sizes={512} } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, PrepareModule(hlo_string, 1, 8)); EXPECT_THAT(AllReduceSplitter().Run(module.get()), IsOkAndHolds(false)); EXPECT_EQ(AllReduceCount(*module), 2); } TEST_F(AllReduceSplitterTest, DoesNotMatchBasicPatternIfItIsNotCompiledWithSPMDPartitioning) { absl::string_view hlo_string = R"( HloModule m sum { a = bf16[] parameter(0) b = bf16[] parameter(1) ROOT _ = bf16[] add(a,b) } ENTRY main { p = bf16[2,4096,4096] parameter(0) first.ar = bf16[2,4096,4096] all-reduce(p), replica_groups={{0,1,2,3},{4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=1 zero = bf16[] constant(0) reduce = bf16[4096] reduce(first.ar, zero), dimensions={0,1}, to_apply=sum all-reduce = bf16[4096] all-reduce(reduce), replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=2 table = s32[8]{0} constant({0,1,2,3,0,1,2,3}) pid = u32[] partition-id() id = s32[1] dynamic-slice(table, pid), dynamic_slice_sizes={1} reshape = s32[] reshape(id) slice_size = s32[] constant(1024) offset = s32[] multiply(reshape, slice_size) ROOT _ = bf16[1024] dynamic-slice(all-reduce, offset), dynamic_slice_sizes={1024} } )"; HloModuleConfig config = GetModuleConfigForTest(1, 8); config.set_use_spmd_partitioning(false); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string, config)); EXPECT_THAT(AllReduceSplitter().Run(module.get()), IsOkAndHolds(false)); EXPECT_THAT(AllReduceCount(*module), 2); } TEST_F(AllReduceSplitterTest, DoesNotMatchBasicPatternIfUseGlobalDeviceIdsIsFalse) { absl::string_view hlo_string = R"( HloModule m sum { a = bf16[] parameter(0) b = bf16[] parameter(1) ROOT _ = bf16[] add(a,b) } ENTRY main { p = bf16[2,4096,4096] parameter(0) first.ar = bf16[2,4096,4096] all-reduce(p), replica_groups={{0,1,2,3},{4,5,6,7}}, to_apply=sum, channel_id=1 zero = bf16[] constant(0) reduce = bf16[4096] reduce(first.ar, zero), dimensions={0,1}, to_apply=sum all-reduce = bf16[4096] all-reduce(reduce), replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=sum, channel_id=2 table = s32[8]{0} constant({0,1,2,3,0,1,2,3}) pid = u32[] partition-id() id = s32[1] dynamic-slice(table, pid), dynamic_slice_sizes={1} reshape = s32[] reshape(id) slice_size = s32[] constant(1024) offset = s32[] multiply(reshape, slice_size) ROOT _ = bf16[1024] dynamic-slice(all-reduce, offset), dynamic_slice_sizes={1024} } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, PrepareModule(hlo_string, 1, 8)); EXPECT_THAT(AllReduceSplitter().Run(module.get()), IsOkAndHolds(false)); EXPECT_EQ(AllReduceCount(*module), 2); } TEST_F(AllReduceSplitterTest, DoesNotMatchBasicPatternIfIsNotCrossAllPartitionsAllReduce) { absl::string_view hlo_string = R"( HloModule m sum { a = bf16[] parameter(0) b = bf16[] parameter(1) ROOT _ = bf16[] add(a,b) } ENTRY main { p = bf16[2,4096,4096] parameter(0) first.ar = bf16[2,4096,4096] all-reduce(p), replica_groups={{0,1,2,3},{4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=1 zero = bf16[] constant(0) reduce = bf16[4096] reduce(first.ar, zero), dimensions={0,1}, to_apply=sum all-reduce = bf16[4096] all-reduce(reduce), replica_groups={{0,1,2,3},{4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=2 table = s32[8]{0} constant({0,1,2,3,0,1,2,3}) pid = u32[] partition-id() id = s32[1] dynamic-slice(table, pid), dynamic_slice_sizes={1} reshape = s32[] reshape(id) slice_size = s32[] constant(1024) offset = s32[] multiply(reshape, slice_size) ROOT _ = bf16[1024] dynamic-slice(all-reduce, offset), dynamic_slice_sizes={1024} } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, PrepareModule(hlo_string, 1, 8)); EXPECT_THAT(AllReduceSplitter().Run(module.get()), IsOkAndHolds(false)); EXPECT_EQ(AllReduceCount(*module), 2); } TEST_F( AllReduceSplitterFilecheckTest, PipelineMatchesBasicPatternWithDynamicSliceAsRootAndRewritesToReduceScatter) { absl::string_view hlo_string = R"( HloModule m sum { a = bf16[] parameter(0) b = bf16[] parameter(1) ROOT _ = bf16[] add(a,b) } ENTRY main { p = bf16[2,4096,4096] parameter(0) first.ar = bf16[2,4096,4096] all-reduce(p), replica_groups={{0,1,2,3},{4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=1 zero = bf16[] constant(0) reduce = bf16[4096] reduce(first.ar, zero), dimensions={0,1}, to_apply=sum all-reduce = bf16[4096] all-reduce(reduce), replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=2 table = s32[8]{0} constant({0,1,2,3,0,1,2,3}) pid = u32[] partition-id() id = s32[1] dynamic-slice(table, pid), dynamic_slice_sizes={1} reshape = s32[] reshape(id) slice_size = s32[] constant(1024) offset = s32[] multiply(reshape, slice_size) ROOT _ = bf16[1024] dynamic-slice(all-reduce, offset), dynamic_slice_sizes={1024} } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, PrepareModule(hlo_string, 1, 8)); HloPassPipeline pipeline("all-reduce-splitter-rewrite"); pipeline.AddPass<AllReduceSplitter>(); pipeline.AddPass<ReduceScatterCreator>(); EXPECT_THAT(pipeline.Run(module.get()), IsOkAndHolds(true)); TF_EXPECT_OK(FileCheck(module->ToString(), R"( CHECK-DAG: %[[P0:.*]] = bf16[2,4096,4096]{2,1,0} parameter(0) CHECK: %[[AR0:.*]] = bf16[2,4096,4096]{2,1,0} all-reduce(bf16[2,4096,4096]{2,1,0} %[[P0]]) CHECK-SAME: replica_groups={[[DESIRED_RGS:.*]]} CHECK-DAG: %[[ZERO:.*]] = bf16[] constant(0) CHECK-DAG: %[[LOCAL_REDUCE:.*]] = bf16[4096]{0} reduce(bf16[2,4096,4096]{2,1,0} %[[AR0]], bf16[] %[[ZERO]]) CHECK: %[[REDUCE_SCATTER:.*]] = bf16[1024]{0} reduce-scatter(bf16[4096]{0} %[[LOCAL_REDUCE]]) CHECK-SAME: replica_groups={[[DESIRED_RGS]]} CHECK-NEXT: ROOT %[[AR2:.*]] = bf16[1024]{0} all-reduce(bf16[1024]{0} %[[REDUCE_SCATTER]]) CHECK-SAME: replica_groups={{[{]}}{0,4},{1,5},{2,6},{3,7}{{[}]}} )")); } TEST_F( AllReduceSplitterFilecheckTest, PipelineMatchesBasicPatternWithDynamicSliceNotAsRootAndRewritesToReduceScatter) { absl::string_view hlo_string = R"( HloModule m sum { a = bf16[] parameter(0) b = bf16[] parameter(1) ROOT _ = bf16[] add(a,b) } ENTRY main { p = bf16[2,4096,4096] parameter(0) zero = bf16[] constant(0) first.ar = bf16[2,4096,4096] all-reduce(p), replica_groups={{0,1,2,3},{4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=1 reduce = bf16[4096] reduce(p, zero), dimensions={0,1}, to_apply=sum all-reduce = bf16[4096] all-reduce(reduce), replica_groups={{0,1,2,3,4,5,6,7}}, to_apply=sum, use_global_device_ids=true, channel_id=1 table = s32[8]{0} constant({0,1,2,3,0,1,2,3}) pid = u32[] partition-id() id = s32[1] dynamic-slice(table, pid), dynamic_slice_sizes={1} reshape = s32[] reshape(id) slice_size = s32[] constant(1024) offset = s32[] multiply(reshape, slice_size) dynamic_slice = bf16[1024] dynamic-slice(all-reduce, offset), dynamic_slice_sizes={1024} broadcast = bf16[1024,1024] broadcast(dynamic_slice), dimensions={0} ROOT _ = tuple(broadcast, first.ar) } )"; TF_ASSERT_OK_AND_ASSIGN( std::unique_ptr<HloModule> module, PrepareModule(hlo_string, 1, 8)); HloPassPipeline pipeline("all-reduce-splitter-rewrite"); pipeline.AddPass<AllReduceSplitter>(); pipeline.AddPass<ReduceScatterCreator>(); EXPECT_THAT(pipeline.Run(module.get()), IsOkAndHolds(true)); TF_EXPECT_OK(FileCheck(module->ToString(), R"( CHECK-DAG: %[[P0:.*]] = bf16[2,4096,4096]{2,1,0} parameter(0) CHECK-DAG: %[[ZERO:.*]] = bf16[] constant(0) CHECK-DAG: %[[LOCAL_REDUCE:.*]] = bf16[4096]{0} reduce(bf16[2,4096,4096]{2,1,0} %[[P0]], bf16[] %[[ZERO]]) CHECK: %[[REDUCE_SCATTER:.*]] = bf16[1024]{0} reduce-scatter(bf16[4096]{0} %[[LOCAL_REDUCE]]) CHECK-NEXT: %[[AR1:.*]] = bf16[1024]{0} all-reduce(bf16[1024]{0} %[[REDUCE_SCATTER]]) CHECK-SAME: replica_groups={{[{]}}{0,4},{1,5},{2,6},{3,7}{{[}]}} CHECK: %[[EXISTING_AR:.*]] = bf16[2,4096,4096]{2,1,0} all-reduce(bf16[2,4096,4096]{2,1,0} %[[P0]]) CHECK: ROOT CHECK-NOT: %[[AR1]] CHECK-SAME: %[[EXISTING_AR]] )")); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/all_reduce_splitter.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/transforms/all_reduce_splitter_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
faca4e48-e6e7-4077-b1d1-06e856a81276
cpp
tensorflow/tensorflow
tiled_hlo_computation
third_party/xla/xla/service/gpu/model/tiled_hlo_computation.cc
third_party/xla/xla/service/gpu/model/tiled_hlo_computation_test.cc
#include "xla/service/gpu/model/tiled_hlo_computation.h" #include <sstream> #include <string> #include "absl/container/flat_hash_map.h" #include "absl/container/inlined_vector.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_join.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/gpu/model/tiled_hlo_instruction.h" #include "xla/service/name_uniquer.h" #include "xla/util.h" namespace xla { namespace gpu { std::string TiledHloComputation::ToString() const { std::stringstream ss; NameUniquer name_uniquer("_"); absl::flat_hash_map<const TiledHloInstruction*, std::string> tile_names; for (const auto* tiled_hlo : instructions()) { std::string tile_name = name_uniquer.GetUniqueName( absl::StrCat(tiled_hlo->hlo()->name(), ".tile_0")); tile_names[tiled_hlo] = tile_name; absl::InlinedVector<std::string, 4> operand_names; for (const auto& operand : tiled_hlo->operands()) { operand_names.push_back(tile_names.at(operand)); } ss << tile_name << " = " << HloOpcodeString(tiled_hlo->hlo()->opcode()) << "(" << absl::StrJoin(operand_names, ", ") << ")\n"; ss << tiled_hlo->ToString() << "\n"; } return ss.str(); } } }
#include "xla/service/gpu/model/tiled_hlo_computation.h" #include <gmock/gmock.h> #include <gtest/gtest.h> #include "xla/service/gpu/backend_configs.pb.h" namespace xla { namespace gpu { namespace { using ::testing::ElementsAre; TEST(BlockLevelParametersTest, BlockLevelParametersCanBeParsedFromBlockLevelFusionConfig) { BlockLevelFusionConfig block_level_fusion_config; block_level_fusion_config.mutable_output_tile_sizes()->Add(18); block_level_fusion_config.mutable_output_tile_sizes()->Add(19); block_level_fusion_config.set_num_warps(12); BlockLevelParameters block_level_parameters = BlockLevelParameters::FromBlockLevelFusionConfig( block_level_fusion_config); EXPECT_THAT(block_level_parameters.output_tile_sizes, ElementsAre(18, 19)); EXPECT_THAT(block_level_parameters.num_warps, 12); } TEST(BlockLevelParametersTest, BlockLevelParametersCanBeConvertedToBlockLevelFusionConfig) { BlockLevelParameters block_level_parameters; block_level_parameters.output_tile_sizes = {18, 19}; block_level_parameters.num_warps = 12; BlockLevelFusionConfig block_level_fusion_config = block_level_parameters.ToBlockLevelFusionConfig(); EXPECT_THAT(block_level_fusion_config.output_tile_sizes(), ElementsAre(18, 19)); EXPECT_THAT(block_level_fusion_config.num_warps(), 12); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/tiled_hlo_computation.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/tiled_hlo_computation_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
153b7328-bff2-427a-ba76-3f633b9357c5
cpp
tensorflow/tensorflow
tiled_hlo_instruction
third_party/xla/xla/service/gpu/model/tiled_hlo_instruction.cc
third_party/xla/xla/service/gpu/model/tiled_hlo_instruction_test.cc
#include "xla/service/gpu/model/tiled_hlo_instruction.h" #include <cstdint> #include <memory> #include <optional> #include <sstream> #include <string> #include <utility> #include "absl/memory/memory.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" #include "absl/strings/str_join.h" #include "llvm/ADT/SmallVector.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/service/gpu/model/indexing_map.h" #include "xla/service/gpu/model/indexing_map_serialization.h" #include "xla/service/gpu/model/tiled_hlo_computation.h" #include "xla/util.h" #include "tsl/platform/errors.h" namespace xla { namespace gpu { namespace { absl::Status VerifyTiledHloInstructionConstructorPreconditions( const HloInstruction* hlo, llvm::SmallVector<int64_t> tile_sizes, llvm::SmallVector<int64_t> tile_strides, std::optional<IndexingMap> tile_offsets_indexing) { int rank = hlo->shape().rank(); if (tile_sizes.size() != rank) { return absl::InvalidArgumentError( absl::StrCat("Number of tile sizes must be equal to the rank of the " "hlo shape. tile_sizes = ", tile_sizes.size(), ", hlo = ", hlo->ToString())); } if (tile_strides.size() != rank) { return absl::InvalidArgumentError( absl::StrCat("Number of tile strides must be equal to the rank of the " "hlo shape. tile_sizes = ", tile_strides.size(), ", hlo = ", hlo->ToString())); } if (tile_offsets_indexing.has_value() && tile_offsets_indexing->GetAffineMap().getNumResults() != rank) { return absl::InvalidArgumentError(absl::StrFormat( "tile_offsets_indexing must have the same number of results as the " "rank of the hlo shape. tile_offsets_indexing = %s, hlo = %s", ToString(*tile_offsets_indexing), hlo->ToString())); } return absl::OkStatus(); } } absl::StatusOr<std::unique_ptr<TiledHloInstruction>> TiledHloInstruction::Create( const HloInstruction* hlo, llvm::SmallVector<const TiledHloInstruction*> operands, llvm::SmallVector<int64_t> tile_sizes, llvm::SmallVector<int64_t> tile_strides, std::optional<IndexingMap> tile_offsets_indexing) { TF_RETURN_IF_ERROR(VerifyTiledHloInstructionConstructorPreconditions( hlo, tile_sizes, tile_strides, tile_offsets_indexing)); return absl::WrapUnique(new TiledHloInstruction( hlo, std::move(operands), std::move(tile_sizes), std::move(tile_strides), std::move(tile_offsets_indexing))); } std::string TiledHloInstruction::ToString() const { std::stringstream ss; ss << "\thlo: " << hlo_->ToString() << "\n"; ss << "\ttile_sizes: (" << absl::StrJoin(tile_sizes_, ", ") << ")\n"; ss << "\ttile_strides: (" << absl::StrJoin(tile_strides_, ", ") << ")\n"; ss << "\ttile_offsets_indexing: " << (tile_offsets_indexing_.has_value() ? gpu::ToString(*tile_offsets_indexing_) : "nullopt"); return ss.str(); } absl::StatusOr<std::unique_ptr<TiledHloFusionInstruction>> TiledHloFusionInstruction::Create( const HloInstruction* hlo, llvm::SmallVector<const TiledHloInstruction*> operands, std::unique_ptr<TiledHloComputation> called_computation, llvm::SmallVector<int64_t> tile_sizes, llvm::SmallVector<int64_t> tile_strides, std::optional<IndexingMap> tile_offsets_indexing) { TF_RETURN_IF_ERROR(VerifyTiledHloInstructionConstructorPreconditions( hlo, tile_sizes, tile_strides, tile_offsets_indexing)); return absl::WrapUnique(new TiledHloFusionInstruction( hlo, std::move(operands), std::move(called_computation), std::move(tile_sizes), std::move(tile_strides), std::move(tile_offsets_indexing))); } TiledHloFusionInstruction::TiledHloFusionInstruction( const HloInstruction* hlo, llvm::SmallVector<const TiledHloInstruction*> operands, std::unique_ptr<TiledHloComputation> called_computation, llvm::SmallVector<int64_t> tile_sizes, llvm::SmallVector<int64_t> tile_strides, std::optional<IndexingMap> tile_offsets_indexing) : TiledHloInstruction(hlo, std::move(operands), std::move(tile_sizes), std::move(tile_strides), std::move(tile_offsets_indexing)), called_computation_(std::move(called_computation)) {} } }
#include "xla/service/gpu/model/tiled_hlo_instruction.h" #include <memory> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "mlir/IR/MLIRContext.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/service/gpu/model/indexing_map.h" #include "xla/service/gpu/model/indexing_test_utils.h" #include "xla/service/gpu/model/tiled_hlo_computation.h" #include "xla/shape_util.h" #include "xla/tests/hlo_test_base.h" #include "xla/xla_data.pb.h" namespace xla { namespace gpu { namespace { using ::testing::HasSubstr; class TiledHloInstructionTest : public HloTestBase { public: mlir::MLIRContext mlir_context_; }; TEST_F(TiledHloInstructionTest, TileSizesAndStridesShouldMatchHloShapeRank) { std::unique_ptr<HloInstruction> hlo = HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(PrimitiveType::F32, {32, 64}), "p0"); IndexingMap tile_offsets_indexing = IndexingMap::FromTensorSizes( ParseAffineMap("(d0) -> (d0 floordiv 16, (d0 mod 16) * 16)", &mlir_context_), {8}, {}); EXPECT_THAT(TiledHloInstruction::Create( hlo.get(), {}, {16}, {1, 1}, tile_offsets_indexing) .status() .message(), HasSubstr("Number of tile sizes must be equal to the rank")); EXPECT_THAT(TiledHloInstruction::Create( hlo.get(), {}, {16, 16}, {1, 1, 1}, tile_offsets_indexing) .status() .message(), HasSubstr("Number of tile strides must be equal to the rank")); } TEST_F(TiledHloInstructionTest, ShouldReturnErrorIfBlockIdToTileOffsetsIndexingIsInvalid) { std::unique_ptr<HloInstruction> hlo = HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(PrimitiveType::F32, {32, 64}), "p0"); IndexingMap tile_offsets_indexing = IndexingMap::FromTensorSizes( ParseAffineMap("(d0, d1) -> (2 * d0)", &mlir_context_), {2, 4}, {}); EXPECT_THAT( TiledHloInstruction::Create( hlo.get(), {}, {16, 16}, {1, 1}, tile_offsets_indexing) .status() .message(), HasSubstr( "must have the same number of results as the rank of the hlo shape")); } using TiledHloFusionInstructionTest = TiledHloInstructionTest; TEST_F(TiledHloFusionInstructionTest, TileSizesAndStridesShouldMatchHloShapeRank) { std::unique_ptr<HloInstruction> hlo = HloInstruction::CreateParameter( 0, ShapeUtil::MakeShape(PrimitiveType::F32, {32, 64}), "p0"); IndexingMap tile_offsets_indexing = IndexingMap::FromTensorSizes( ParseAffineMap("(d0) -> (d0 floordiv 16, (d0 mod 16) * 16)", &mlir_context_), {8}, {}); EXPECT_THAT(TiledHloFusionInstruction::Create( hlo.get(), {}, nullptr, {16}, {1, 1}, tile_offsets_indexing) .status() .message(), HasSubstr("Number of tile sizes must be equal to the rank")); EXPECT_THAT(TiledHloFusionInstruction::Create( hlo.get(), {}, nullptr, {16, 16}, {1, 1, 1}, tile_offsets_indexing) .status() .message(), HasSubstr("Number of tile strides must be equal to the rank")); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/tiled_hlo_instruction.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/tiled_hlo_instruction_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
5bd8cb22-40d3-4485-8d6d-5d6717fce3d5
cpp
tensorflow/tensorflow
gpu_collective_performance_model
third_party/xla/xla/service/gpu/model/gpu_collective_performance_model.cc
third_party/xla/xla/service/gpu/model/gpu_collective_performance_model_test.cc
#include "xla/service/gpu/model/gpu_collective_performance_model.h" #include <algorithm> #include <cstdint> #include <cstdlib> #include <vector> #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/strings/numbers.h" #include "absl/time/time.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/model/gpu_hlo_cost_analysis.h" #include "xla/service/hlo_dataflow_analysis.h" #include "xla/stream_executor/device_description.h" #include "xla/util.h" #if GOOGLE_CUDA #include "third_party/gpus/cuda/nvml/include/nvml.h" #endif namespace xla { namespace gpu { namespace { int64_t GetNcclMaxNumChannels( GpuPerformanceWithCollectiveModel::CollectiveAlgo algorithm) { int64_t max_nchannels = 0; switch (algorithm) { case GpuPerformanceWithCollectiveModel::RING: case GpuPerformanceWithCollectiveModel::TREE: max_nchannels = GpuPerformanceWithCollectiveModel::kMaxNumChannelsRing; break; } const char* env = std::getenv("NCCL_MAX_NCHANNELS"); if (env != nullptr) { int64_t max_nchannels_from_env; if (absl::SimpleAtoi(env, &max_nchannels_from_env)) { max_nchannels = std::min(max_nchannels_from_env, max_nchannels); } } return max_nchannels; } int64_t GetMinNumberOfChannels( GpuPerformanceWithCollectiveModel::CollectiveAlgo algorithm) { int64_t min_nchannels = 0; switch (algorithm) { case GpuPerformanceWithCollectiveModel::RING: case GpuPerformanceWithCollectiveModel::TREE: min_nchannels = 1; break; } const char* env = std::getenv("NCCL_MIN_NCHANNELS"); if (env != nullptr) { int64_t min_nchannels_from_env; if (absl::SimpleAtoi(env, &min_nchannels_from_env)) { min_nchannels = std::min(min_nchannels_from_env, min_nchannels); } } return min_nchannels; } int GetNumThreads(int warp_size, int min_num_threads, int max_num_threads, int default_num_threads) { int threads_from_env = default_num_threads; const char* env = std::getenv("NCCL_NTHREADS"); if (env != nullptr) { CHECK(absl::SimpleAtoi(env, &threads_from_env)); } int num_threads = threads_from_env; if (num_threads > 0) { if (num_threads % warp_size != 0) { num_threads = max_num_threads; } else if (num_threads > max_num_threads) { num_threads = max_num_threads; } else if (num_threads < min_num_threads) { num_threads = min_num_threads; } } else { num_threads = default_num_threads; } return num_threads; } float GetMaxSysBwFromGpu(const se::CudaComputeCapability cc, const double* bandwidths_table) { switch (cc.major) { case se::CudaComputeCapability::VOLTA: return bandwidths_table[0]; case se::CudaComputeCapability::AMPERE: return bandwidths_table[1]; case se::CudaComputeCapability::HOPPER: return bandwidths_table[2]; case se::CudaComputeCapability::BLACKWELL: return bandwidths_table[3]; default: return bandwidths_table[4]; } } } float GpuPerformanceWithCollectiveModel::GetNvlinkBw( se::CudaComputeCapability compute_capability) { return compute_capability.IsAtLeast(se::CudaComputeCapability::HOPPER) ? kSm90NvlinkBandwidth : compute_capability.IsAtLeast(se::CudaComputeCapability::AMPERE) ? kSm80NvlinkBandwidth : compute_capability.IsAtLeast(se::CudaComputeCapability::VOLTA) ? kSm70NvlinkBandwidth : compute_capability.IsAtLeast(se::CudaComputeCapability::PASCAL_) ? kSm60NvlinkBandwidth : kSm80NvlinkBandwidth; } bool GpuPerformanceWithCollectiveModel::InitNvml() { #if GOOGLE_CUDA && (defined(PLATFORM_POSIX) || defined(PLATFORM_GOOGLE)) void* libhandle = dlopen("libnvidia-ml.so.1", RTLD_NOW); CHECK(libhandle != nullptr) << "Failed to open libnvidia-ml.so.1"; struct SymbolEntry { void** functor; char const* name; }; std::vector<SymbolEntry> symbols = { {(void**)&xla_nvmlInit, "nvmlInit_v2"}, {(void**)&xla_nvmlShutdown, "nvmlShutdown"}, {(void**)&xla_nvmlDeviceGetHandleByIndex, "nvmlDeviceGetHandleByIndex"}, {(void**)&xla_nvmlDeviceGetNvLinkCapability, "nvmlDeviceGetNvLinkCapability"}, }; for (SymbolEntry se : symbols) { *se.functor = dlsym(libhandle, se.name); } nvmlReturn_t init_result = xla_nvmlInit(); return init_result == NVML_SUCCESS; #else return false; #endif } bool GpuPerformanceWithCollectiveModel::ShutdownNvml() { #if GOOGLE_CUDA nvmlReturn_t shutdown_result = xla_nvmlShutdown(); return shutdown_result == NVML_SUCCESS; #else return false; #endif } uint32_t GpuPerformanceWithCollectiveModel::CheckIfNvlinkSupportsP2P() { #if GOOGLE_CUDA CHECK(InitNvml()) << "NVML init failed."; nvmlDevice_t nvml_device; nvmlReturn_t get_device_result = xla_nvmlDeviceGetHandleByIndex(0, &nvml_device); CHECK(get_device_result == NVML_SUCCESS); uint32_t supported_p2p = 0; nvmlReturn_t nvlink_cap_result = xla_nvmlDeviceGetNvLinkCapability( nvml_device, 0, NVML_NVLINK_CAP_P2P_SUPPORTED, &supported_p2p); CHECK(nvlink_cap_result == NVML_SUCCESS || nvlink_cap_result == NVML_ERROR_NOT_SUPPORTED); CHECK(ShutdownNvml()) << "NVML shutdown failed."; return supported_p2p; #else return 0; #endif } absl::Duration GpuPerformanceWithCollectiveModel::ComputeAllreduceTime( const HloInstruction& instr, const GpuHloCostAnalysis* cost_analysis, const se::DeviceDescription& gpu_device_info) { absl::Duration total_time = kNcclKernelLaunchOverhead; stream_executor::CudaComputeCapability compute_cap = gpu_device_info.cuda_compute_capability(); int64_t size_of_speed_array = kIntraNodeSpeeds.size(); int64_t size_of_sm90_speed_array = kIntraNodeSpeedsSm90.size(); int num_speeds = compute_cap.major >= se::CudaComputeCapability::HOPPER ? size_of_sm90_speed_array : size_of_speed_array; const double* speeds = compute_cap.major >= se::CudaComputeCapability::HOPPER ? kIntraNodeSpeedsSm90.data() : kIntraNodeSpeeds.data(); int speed_index = 0; float max_sys_bw = GetMaxSysBwFromGpu(compute_cap, kLowLatencyMaxBandwidths.data()); CHECK_GT(max_sys_bw, 0); while ((speed_index < num_speeds - 1) && speeds[speed_index] > max_sys_bw) { speed_index++; } float bw_intra_node = speeds[speed_index]; int64_t num_devices = cost_analysis->NumOfDevices(instr); int64_t min_nchannels = std::max(num_devices, GetMinNumberOfChannels(CollectiveAlgo::RING)); int64_t num_channels = std::max(min_nchannels, GetNcclMaxNumChannels(CollectiveAlgo::RING)); int default_threads = (bw_intra_node * num_channels <= kPciBandwidth) ? 256 : kLL128NumThreads; int warp_size = gpu_device_info.threads_per_warp(); int num_threads = GetNumThreads(warp_size, kLL128NumThreads / 4, kLL128NumThreads, default_threads); absl::Duration compute_time_per_channel = ComputeTime( gpu_device_info, cost_analysis->flop_count(instr) / num_channels, num_channels, num_threads); total_time += compute_time_per_channel; uint32_t supported_p2p = CheckIfNvlinkSupportsP2P(); if (supported_p2p == 0) { VLOG(8) << "Nvlink doesn't support p2p communication. Model will " "continue using default system bandwidth."; } else { VLOG(8) << "Nvlink supports p2p communication, setting intra node " "bandwidth to nvlink bw."; bw_intra_node = GetNvlinkBw(compute_cap); } double bus_bandwidth = bw_intra_node * num_channels; double per_channel_ring_ll128_Bw = GetMaxSysBwFromGpu(compute_cap, kPerChannelMaxRingLL128Bandwidths.data()); bus_bandwidth = std::min(bus_bandwidth * kRingAlgorithmDiscountFactor, num_channels * per_channel_ring_ll128_Bw); double actual_bandwidth = bus_bandwidth * cost_analysis->ScalingRatio(instr); absl::Duration communication_time = absl::Milliseconds( cost_analysis->bytes_accessed(instr) / (1e6 * actual_bandwidth)); total_time += communication_time; return total_time; } absl::Duration GpuPerformanceWithCollectiveModel::ComputeCollectiveTime( const HloInstruction& instr, const GpuHloCostAnalysis* cost_analysis, const se::DeviceDescription& gpu_device_info) { if (cost_analysis->NumOfDevices(instr) == 1) { VLOG(8) << "Returning only kernel launch overhead for a single partition."; return kNcclKernelLaunchOverhead; } if (HloDataflowAnalysis::IsAsynchronousOperationDone(instr.opcode())) { VLOG(8) << "Returning 0 cost for async done op " << instr.name(); return absl::ZeroDuration(); } switch (instr.opcode()) { case HloOpcode::kAllReduce: case HloOpcode::kAllReduceStart: return ComputeAllreduceTime(instr, cost_analysis, gpu_device_info); default: { LOG(WARNING) << "Runtime estimate for " << instr.name() << " not implemented. Returning only the kernel launch time."; return kNcclKernelLaunchOverhead; } } } } }
#include <gtest/gtest.h> #include "xla/service/gpu/backend_configs.pb.h" #include "xla/tests/hlo_test_base.h" namespace xla { namespace gpu { namespace { using GpuPerformanceWithCollectiveModelTest = HloTestBase; TEST_F(GpuPerformanceWithCollectiveModelTest, TestNvmlLibraryLoading) { #if GOOGLE_CUDA EXPECT_TRUE(GpuPerformanceWithCollectiveModel::InitNvml()); nvmlDevice_t nvml_device; nvmlReturn_t get_device_result = xla_nvmlDeviceGetHandleByIndex(0, &nvml_device); EXPECT_TRUE(get_device_result == NVML_SUCCESS); EXPECT_TRUE(GpuPerformanceWithCollectiveModel::InitNvml()); #endif } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/gpu_collective_performance_model.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/gpu_collective_performance_model_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
be319639-b305-4ef3-ad85-bd35f5ecac40
cpp
tensorflow/tensorflow
hlo_op_profiles
third_party/xla/xla/service/gpu/model/hlo_op_profiles.cc
third_party/xla/xla/service/gpu/model/hlo_op_profiles_test.cc
#include "xla/service/gpu/model/hlo_op_profiles.h" #include <memory> #include <string> #include <string_view> #include <utility> #include <variant> #include "absl/memory/memory.h" #include "absl/strings/str_cat.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/gpu/model/hlo_op_profile.pb.h" #include "xla/service/gpu/model/hlo_op_profiles_data.h" #include "xla/stream_executor/device_description.h" #include "tsl/platform/logging.h" #include "tsl/platform/protobuf.h" namespace xla { namespace gpu { const HloOpProfiles& HloOpProfiles::Singleton() { static const auto* hlo_op_profiles = HloOpProfiles::Load(kDeviceHloOpProfiles, "sm_86") .release(); return *hlo_op_profiles; } std::string HloOpProfiles::GetProfileName( const se::DeviceDescription& device_info) { if (auto* ptr = std::get_if<stream_executor::CudaComputeCapability>( &device_info.gpu_compute_capability())) { return absl::StrCat("sm_", ptr->major, ptr->minor); } return "<unknown>"; } std::unique_ptr<HloOpProfiles> HloOpProfiles::Load( std::string_view profiles_text_proto, std::string_view default_profile_name) { ProfilesNestedMap profiles_map; DeviceHloInstructionProfiles all_device_profiles; CHECK(tsl::protobuf::TextFormat::ParseFromString( std::string(profiles_text_proto), &all_device_profiles)); for (const auto& device_profile : all_device_profiles.entries()) { for (const auto& entry : device_profile.second.entries()) { auto op_code = StringToHloOpcode(entry.instruction().opcode()).value(); auto element_type = entry.instruction().shape().element_type(); profiles_map[device_profile.first][std::make_pair( op_code, element_type)] = entry.clock_cycles(); } } return absl::WrapUnique( new HloOpProfiles(std::move(profiles_map), default_profile_name)); } const HloOpProfiles::HloOpProfile& HloOpProfiles::GetProfile( const se::DeviceDescription& device_info) const { auto it = profiles_.find(GetProfileName(device_info)); if (it != profiles_.end()) return it->second; return default_profile_; } } }
#include "xla/service/gpu/model/hlo_op_profiles.h" #include <gtest/gtest.h> #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/gpu/gpu_device_info_for_tests.h" #include "xla/stream_executor/device_description.h" #include "xla/xla_data.pb.h" namespace xla { namespace gpu { namespace { constexpr char kDeviceHloOpProfiles[] = R"pb( entries { key: "sm_90" value { entries { instruction { opcode: "divide" shape { element_type: F32 } } clock_cycles: 32 } } } entries { key: "sm_80" value { entries { instruction { opcode: "multiply" shape { element_type: F32 } } clock_cycles: 64 } } } )pb"; using HloOpProfilesTest = ::testing::Test; TEST_F(HloOpProfilesTest, GetProfile) { auto hlo_op_profiles = HloOpProfiles::Load(kDeviceHloOpProfiles, "sm_80"); auto device_info_sm_90 = TestGpuDeviceInfo::RTXA6000DeviceInfo( stream_executor::CudaComputeCapability(9, 0)); const auto& op_profile = hlo_op_profiles->GetProfile(device_info_sm_90); ASSERT_TRUE(op_profile.contains( std::make_pair(HloOpcode::kDivide, PrimitiveType::F32))); EXPECT_EQ( op_profile.at(std::make_pair(HloOpcode::kDivide, PrimitiveType::F32)), 32); } TEST_F(HloOpProfilesTest, GetProfileDefault) { auto hlo_op_profiles = HloOpProfiles::Load(kDeviceHloOpProfiles, "sm_80"); auto device_info_sm_85 = TestGpuDeviceInfo::RTXA6000DeviceInfo( stream_executor::CudaComputeCapability(8, 5)); const auto& op_profile = hlo_op_profiles->GetProfile(device_info_sm_85); ASSERT_TRUE(op_profile.contains( std::make_pair(HloOpcode::kMultiply, PrimitiveType::F32))); EXPECT_EQ( op_profile.at(std::make_pair(HloOpcode::kMultiply, PrimitiveType::F32)), 64); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/hlo_op_profiles.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/hlo_op_profiles_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
b915529b-6b60-4540-9f51-ef7f6ea5b06a
cpp
tensorflow/tensorflow
gpu_indexing_performance_model
third_party/xla/xla/service/gpu/model/gpu_indexing_performance_model.cc
third_party/xla/xla/service/gpu/model/gpu_indexing_performance_model_test.cc
#include "xla/service/gpu/model/gpu_indexing_performance_model.h" #include <algorithm> #include <cstdint> #include <optional> #include <utility> #include <variant> #include <vector> #include "absl/container/flat_hash_map.h" #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "absl/time/time.h" #include "absl/types/span.h" #include "llvm/Support/MathExtras.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/fusions/triton.h" #include "xla/service/gpu/hlo_fusion_analysis.h" #include "xla/service/gpu/hlo_traversal.h" #include "xla/service/gpu/ir_emission_utils.h" #include "xla/service/gpu/launch_dimensions.h" #include "xla/service/gpu/model/coalescing_analysis.h" #include "xla/service/gpu/model/gpu_hlo_cost_analysis.h" #include "xla/service/gpu/model/gpu_performance_model_base.h" #include "xla/service/gpu/model/indexing_analysis.h" #include "xla/service/gpu/model/indexing_map.h" #include "xla/service/gpu/model/symbolic_tile_analysis.h" #include "xla/service/gpu/model/tiled_hlo_computation.h" #include "xla/service/gpu/model/triton_emitter_constraints.h" #include "xla/service/instruction_fusion.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/stream_executor/device_description.h" #include "xla/util.h" #include "tsl/platform/status.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { struct OperandReadInfo { int64_t total_bytes_read = 0; int64_t is_coalesced = true; }; int64_t GetPaddedTileSize(absl::Span<int64_t const> tile_sizes) { int64_t result = 1; for (int64_t tile_size : tile_sizes) { result *= llvm::PowerOf2Ceil(tile_size); } return result; } bool DoesTileFitsInRegisters(int64_t tile_size, const se::DeviceDescription& device_info) { constexpr double kFractionOfRegistersAvailableToStoreTile = 0.4; return tile_size <= kFractionOfRegistersAvailableToStoreTile * device_info.registers_per_block_limit(); } int64_t GetNumWarps(int64_t tile_size) { if (tile_size <= 512) return 1; if (tile_size <= 1024) return 2; if (tile_size <= 16384) return 4; if (tile_size <= 32768) return 8; if (tile_size <= 65536) return 16; return 32; } } int64_t GpuPerformanceModelWithIndexingAnalysis::FlopsPerElement( const HloInstruction* instr) { switch (instr->opcode()) { case HloOpcode::kBitcast: case HloOpcode::kBroadcast: case HloOpcode::kConstant: case HloOpcode::kDynamicSlice: case HloOpcode::kDynamicUpdateSlice: case HloOpcode::kGather: case HloOpcode::kIota: case HloOpcode::kPad: case HloOpcode::kParameter: case HloOpcode::kSlice: case HloOpcode::kTranspose: case HloOpcode::kTuple: return 0; default: break; }; if (instr->IsElementwise()) { return cost_analysis_.GetFlopsPerElementwiseOpElement( instr->shape().element_type(), instr->opcode()); } if (instr->opcode() == HloOpcode::kReduce) { int64_t flops_per_reduce_computation = 0; for (const HloInstruction* reducer_instr : instr->called_computations()[0]->instructions()) { flops_per_reduce_computation += FlopsPerElement(reducer_instr); } auto operand_shape = instr->operand(0)->shape(); auto output_shape = instr->shape().IsArray() ? instr->shape() : instr->shape().tuple_shapes(0); int64_t reduction_factor = ShapeUtil::ElementsIn(operand_shape) / ShapeUtil::ElementsIn(output_shape); return (reduction_factor - 1) * flops_per_reduce_computation; } TF_CHECK_OK( cost_analysis_.RevisitInstruction(const_cast<HloInstruction*>(instr))); return cost_analysis_.flop_count(*instr) / ShapeUtil::ElementsInRecursive(instr->shape()); } int64_t GpuPerformanceModelWithIndexingAnalysis::GetShapeSizeRecursive( const Shape& shape) const { CHECK(shape.IsArray() || shape.IsTuple()); if (shape.IsArray()) { return shape_size_(shape); } int64_t total_size = 0; for (const auto& element_shape : shape.tuple_shapes()) { total_size += GetShapeSizeRecursive(element_shape); } return total_size; } int64_t GetIterationSpaceSize(const IndexingMap& indexing_map, const HloInstruction* instr) { if (indexing_map.IsUndefined()) { return ShapeUtil::ElementsInRecursive(instr->shape()); } if (indexing_map.IsKnownEmpty()) { return 0; } auto get_ranges_iteration_space_size = [](const std::vector<Interval>& ranges) { int64_t num_iters = 1; for (const Interval& range : ranges) { num_iters *= range.upper - range.lower + 1; } return num_iters; }; return get_ranges_iteration_space_size(indexing_map.GetSymbolBounds()) * get_ranges_iteration_space_size(indexing_map.GetDimensionBounds()); } EstimateRunTimeData GpuPerformanceModelWithIndexingAnalysis::EstimateRunTimeForFusion( const HloFusionAnalysis& fusion_analysis, bool is_coalesced) { auto& fusion_adaptor = fusion_analysis.fusion(); VLOG(5) << "EstimateRunTimeForFusion: " << fusion_adaptor.ToString(); auto roots = fusion_adaptor.GetRoots(); CHECK_EQ(roots.size(), 1) << "Indexing cost model doesn't support multi-output fusions."; auto root_shape = roots.front().shape(); LaunchDimensions launch_dimensions = EstimateFusionLaunchDimensions(fusion_analysis); int64_t num_blocks = launch_dimensions.num_blocks(); auto grouped_fusion_indexing = ComputeGroupedOutputToInputIndexing( fusion_adaptor, roots[0], mlir_context_); int64_t flops = 0; int64_t bytes_read = 0; absl::Duration read_time = absl::ZeroDuration(); for (const auto& [instr, indexing_maps] : grouped_fusion_indexing) { VLOG(10) << "instr: " << instr->name(); bool is_operand = !fusion_adaptor.ContainsInstruction(instr); auto element_type = instr->shape().element_type(); int64_t n_bytes_total = 0; for (const auto& indexing_map : indexing_maps) { VLOG(10) << indexing_map; int64_t num_iters = GetIterationSpaceSize(indexing_map, instr); if (is_operand) { int64_t type_size = ShapeUtil::ByteSizeOfPrimitiveType(element_type); n_bytes_total += type_size * num_iters; } else { int64_t flops_per_element = FlopsPerElement(instr); flops += flops_per_element * num_iters; } } if (is_operand) { int64_t operand_size = shape_size_(instr->shape()); int64_t n_bytes_net = std::min(operand_size, n_bytes_total); bytes_read += n_bytes_total; VLogOperandRead(instr, n_bytes_total, n_bytes_net, is_coalesced); read_time += ReadTimeWithDRAMHeuristic(*device_info_, num_blocks, n_bytes_net, n_bytes_total, element_type, is_coalesced); } } int64_t bytes_written = GetShapeSizeRecursive(root_shape); absl::Duration compute_time = ComputeTime(*device_info_, flops, num_blocks, launch_dimensions.num_threads_per_block()); absl::Duration write_time = WriteTime(*device_info_, bytes_written); absl::Duration memory_access_time = read_time + write_time; absl::Duration exec_time = CombineComputeAndMemoryAccessTime( compute_time, memory_access_time, GpuPerformanceModelOptions::PriorityFusion()); EstimateRunTimeData runtime_data = {flops, bytes_read, bytes_written, read_time, write_time, compute_time, exec_time}; VLOG(3) << "Runtime data for HLO fusion: " << fusion_adaptor.ToString() << "\n" << launch_dimensions.ToString() << "\n" << runtime_data.ToString(); return runtime_data; } EstimateRunTimeData GpuPerformanceModelWithIndexingAnalysis::EstimateRunTimeForInstruction( const HloInstruction* producer) { if (producer->opcode() == HloOpcode::kBitcast) { return EstimateRunTimeData::Zero(); } auto fusion_analysis = HloFusionAnalysis::Create(*producer, *device_info_); bool is_coalesced = IsReadCoalescedHeuristic( fusion_analysis.GetEmitterFusionKind(), producer); return EstimateRunTimeForFusion(fusion_analysis, is_coalesced); } EstimateRunTimeData GpuPerformanceModelWithIndexingAnalysis::EstimateRunTimeForProducerConsumer( const HloInstruction* producer, const HloInstruction* consumer) { auto fusion_analysis = HloFusionAnalysis::Create(*producer, *consumer, *device_info_); bool is_coalesced = IsReadCoalescedHeuristic( fusion_analysis.GetEmitterFusionKind(), producer, consumer); return EstimateRunTimeForFusion(fusion_analysis, is_coalesced); } GpuPerformanceModelWithIndexingAnalysis::RunTimes GpuPerformanceModelWithIndexingAnalysis::EstimateRunTimes( const HloInstruction* producer, absl::Span<const HloInstruction* const> fused_consumers) { auto producer_runtime = EstimateRunTimeForInstruction(producer); absl::Duration time_unfused = kKernelLaunchOverhead * (fused_consumers.size() + 1) + producer_runtime.exec_time; absl::Duration time_fused = kKernelLaunchOverhead * fused_consumers.size(); for (const auto& consumer : fused_consumers) { time_unfused += EstimateRunTimeForInstruction(consumer).exec_time; time_fused += EstimateRunTimeForProducerConsumer(producer, consumer).exec_time; } return {time_unfused, time_fused}; } absl::StatusOr<EstimateRunTimeData> GpuPerformanceModelWithIndexingAnalysis::EstimateRunTimeForTiledHloComputation( const HloFusionAdaptor& fusion_adaptor, const TiledHloComputation& tiled_hlo_computation, const LaunchDimensions& launch_dimensions) { absl::flat_hash_map<const HloInstruction*, OperandReadInfo> n_bytes_total_map; int64_t flops = 0; int64_t bytes_read = 0; int64_t num_blocks = launch_dimensions.num_blocks(); for (const auto& tiled_hlo : tiled_hlo_computation.instructions()) { int64_t padded_tile_size = GetPaddedTileSize(tiled_hlo->tile_sizes()); if (!DoesTileFitsInRegisters(padded_tile_size, *device_info_)) { return EstimateRunTimeData::Infinite(); } const HloInstruction* hlo = tiled_hlo->hlo(); if (fusion_adaptor.ContainsInstruction(hlo)) { if (hlo->opcode() == HloOpcode::kConcatenate) { return absl::FailedPreconditionError( "Concatenate is not supported by the indexing cost model."); } int64_t num_elements = num_blocks * padded_tile_size; flops += FlopsPerElement(hlo) * num_elements; } else { int64_t tile_size = Product(tiled_hlo->tile_sizes()); int64_t num_elements = num_blocks * tile_size; int64_t element_type_size = ShapeUtil::ByteSizeOfPrimitiveType(hlo->shape().element_type()); int64_t tile_bytes_read = element_type_size * num_elements; bytes_read += tile_bytes_read; bool is_coalesced = IsTiledReadCoalescedHeuristic(*tiled_hlo, *device_info_); OperandReadInfo& operand_read_info = n_bytes_total_map[hlo]; operand_read_info.total_bytes_read += tile_bytes_read; operand_read_info.is_coalesced &= is_coalesced; } } absl::Duration read_time = absl::ZeroDuration(); for (const auto& [hlo, operand_read_info] : n_bytes_total_map) { int64_t operand_size = shape_size_(hlo->shape()); int64_t n_bytes_net = std::min(operand_size, operand_read_info.total_bytes_read); read_time += ReadTimeWithDRAMHeuristic(*device_info_, num_blocks, n_bytes_net, operand_read_info.total_bytes_read, hlo->shape().element_type(), operand_read_info.is_coalesced); } int64_t bytes_written = GetShapeSizeRecursive(tiled_hlo_computation.GetRoot()->hlo()->shape()); absl::Duration compute_time = ComputeTime(*device_info_, flops, launch_dimensions.num_blocks(), launch_dimensions.num_threads_per_block()); absl::Duration write_time = WriteTime(*device_info_, bytes_written); absl::Duration memory_access_time = read_time + write_time; absl::Duration exec_time = CombineComputeAndMemoryAccessTime( compute_time, memory_access_time, GpuPerformanceModelOptions::PriorityFusion()); return EstimateRunTimeData{flops, bytes_read, bytes_written, read_time, write_time, compute_time, exec_time}; } absl::StatusOr<EstimateRunTimeData> GpuPerformanceModelWithIndexingAnalysis::EstimateRunTimeForTiledFusion( const HloFusionAdaptor& fusion_adaptor, const LaunchDimensions& launch_dimensions, absl::Span<const int64_t> tile_sizes) { SymbolicTileAnalysisOrError analysis_or_error = SymbolicTileAnalysis::AnalyzeFusion( fusion_adaptor, mlir_context_, nullptr); if (const auto* fusion_decision = std::get_if<FusionDecision>(&analysis_or_error)) { return absl::FailedPreconditionError(absl::StrCat( "SymbolicTileAnalysis failed. ", fusion_decision->Explain())); } SymbolicTileAnalysis analysis = std::get<SymbolicTileAnalysis>(std::move(analysis_or_error)); TF_ASSIGN_OR_RETURN(TiledHloComputation tiled_hlo_computation, analysis.ComputeTiledHloInstructions(tile_sizes)); return EstimateRunTimeForTiledHloComputation( fusion_adaptor, tiled_hlo_computation, launch_dimensions); } absl::StatusOr<EstimateRunTimeData> GpuPerformanceModelWithIndexingAnalysis::EstimateRunTimeForTriton( const HloInstruction* producer, const HloInstruction* consumer) { const auto& fusion_analysis = (consumer == nullptr) ? fusion_analysis_cache_->Get(*producer) : fusion_analysis_cache_->Get(*producer, *consumer); auto launch_config = TritonFusion(fusion_analysis).launch_config(); if (!launch_config.has_value()) { return absl::InvalidArgumentError( "Could not get launch config for Triton fusion."); } return EstimateRunTimeForTiledFusion( fusion_analysis.fusion(), launch_config->launch_dimensions, launch_config->block_level_parameters.output_tile_sizes); } LaunchDimensions GpuPerformanceModelWithIndexingAnalysis::GetLaunchDimensionsForTiledFusion( const TiledHloComputation& tiled_hlo_computation) { int64_t num_blocks = tiled_hlo_computation.num_output_tiles(); int64_t largest_live_tile_size = 1; for (const auto& tiled_hlo : tiled_hlo_computation.instructions()) { largest_live_tile_size = std::max( largest_live_tile_size, GetPaddedTileSize(tiled_hlo->tile_sizes())); } int64_t num_warps = GetNumWarps(largest_live_tile_size); return {static_cast<uint64_t>(num_blocks), static_cast<uint64_t>(num_warps * WarpSize())}; } absl::StatusOr<TiledRunTimeDataOrError> GpuPerformanceModelWithIndexingAnalysis::TryFindBestTilingForFusion( const HloFusionAdaptor& fusion_adaptor) { SymbolicTileAnalysisOrError analysis_or_error = SymbolicTileAnalysis::AnalyzeFusion( fusion_adaptor, mlir_context_, TritonEmitterConstraints::GetBuilder(*device_info_)); if (const auto* fusion_decision = std::get_if<FusionDecision>(&analysis_or_error)) { return *fusion_decision; } SymbolicTileAnalysis analysis = std::get<SymbolicTileAnalysis>(std::move(analysis_or_error)); TF_ASSIGN_OR_RETURN(auto tilings, analysis.GetGoodTilings()); std::optional<TiledRunTimeData> best_tiled_run_time_data; for (const auto& tiling : tilings) { TF_ASSIGN_OR_RETURN(TiledHloComputation tiled_hlo_computation, analysis.ComputeTiledHloInstructions(tiling)); LaunchDimensions launch_dimensions = GetLaunchDimensionsForTiledFusion(tiled_hlo_computation); TF_ASSIGN_OR_RETURN( EstimateRunTimeData estimate_run_time_data, EstimateRunTimeForTiledHloComputation( fusion_adaptor, tiled_hlo_computation, launch_dimensions)); if (!best_tiled_run_time_data.has_value() || estimate_run_time_data.exec_time < best_tiled_run_time_data->runtime_data.exec_time) { BlockLevelParameters block_level_parameters; block_level_parameters.output_tile_sizes = std::vector<int64_t>(tiling.begin(), tiling.end()); block_level_parameters.num_warps = launch_dimensions.num_threads_per_block() / WarpSize(); best_tiled_run_time_data = TiledRunTimeData{estimate_run_time_data, block_level_parameters}; } } if (!best_tiled_run_time_data.has_value()) { return FusionDecision::Forbid("No valid tilings found."); } return *best_tiled_run_time_data; } } }
#include "xla/service/gpu/model/gpu_indexing_performance_model.h" #include <cstdint> #include <memory> #include <variant> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/status/status.h" #include "absl/strings/string_view.h" #include "absl/time/time.h" #include "mlir/IR/MLIRContext.h" #include "xla/hlo/ir/hlo_computation.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/service/gpu/backend_configs.pb.h" #include "xla/service/gpu/gpu_device_info_for_tests.h" #include "xla/service/gpu/hlo_traversal.h" #include "xla/service/gpu/ir_emission_utils.h" #include "xla/service/gpu/launch_dimensions.h" #include "xla/service/gpu/model/fusion_analysis_cache.h" #include "xla/service/gpu/model/gpu_hlo_cost_analysis.h" #include "xla/service/gpu/model/gpu_performance_model_base.h" #include "xla/service/gpu/model/symbolic_tile_analysis.h" #include "xla/service/gpu/model/tiled_hlo_computation.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/stream_executor/device_description.h" #include "xla/test_helpers.h" #include "xla/tests/hlo_test_base.h" #include "xla/tsl/lib/core/status_test_util.h" #include "tsl/platform/status_matchers.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { namespace { using ::testing::ElementsAre; using ::testing::HasSubstr; using ::tsl::testing::StatusIs; class GpuIndexingPerformanceModelTest : public HloTestBase { public: GpuHloCostAnalysis::ShapeSizeFunction ShapeSizeBytesFunction() const { return [&](const Shape& shape) { constexpr int64_t kPointerSize = 8; return ShapeUtil::ByteSizeOf(shape, kPointerSize); }; } mlir::MLIRContext mlir_context_; se::DeviceDescription device_info_{TestGpuDeviceInfo::RTXA6000DeviceInfo()}; HloFusionAnalysisCache fusion_analysis_cache_{device_info_}; GpuPerformanceModelWithIndexingAnalysis indexing_cost_model_{ &device_info_, &fusion_analysis_cache_, ShapeSizeBytesFunction(), &mlir_context_}; GpuIndexingPerformanceModelTest() : HloTestBase() {} }; TEST_F(GpuIndexingPerformanceModelTest, BroadcastElementwise) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule( R"( HloModule extracted ENTRY entry_computation { param_0 = f32[32]{0} parameter(0) broadcast = f32[32,1,768]{2,1,0} broadcast(param_0), dimensions={0} param_1 = f32[32,1,768]{2,1,0} parameter(1) ROOT multiply = f32[32,1,768]{2,1,0} multiply(broadcast, param_1) } )")); auto producer = module->entry_computation()->GetInstructionWithName("broadcast"); auto consumer = module->entry_computation()->GetInstructionWithName("multiply"); auto runtime_data = indexing_cost_model_.EstimateRunTimeForProducerConsumer( producer, consumer); EXPECT_EQ(runtime_data.flops, 73728); EXPECT_EQ(runtime_data.bytes_written, 98304); EXPECT_NEAR(absl::ToInt64Nanoseconds(runtime_data.write_time), 128, 2); EXPECT_NEAR(absl::ToInt64Nanoseconds(runtime_data.exec_time), 267, 2); } TEST_F(GpuIndexingPerformanceModelTest, Bitcast) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule( R"( HloModule m ENTRY entry_computation { param_0 = bf16[4,8,65,128]{3,2,1,0} parameter(0) ROOT bitcast = bf16[8,4,65,128]{3,2,0,1} bitcast(param_0) } )")); auto instruction = module->entry_computation()->GetInstructionWithName("bitcast"); auto runtime_data = indexing_cost_model_.EstimateRunTimeForInstruction(instruction); EXPECT_EQ(runtime_data.flops, 0); EXPECT_EQ(runtime_data.bytes_written, 0); EXPECT_EQ(runtime_data.write_time, absl::ZeroDuration()); EXPECT_EQ(runtime_data.exec_time, absl::ZeroDuration()); } TEST_F(GpuIndexingPerformanceModelTest, Reduce) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule( R"( HloModule m add { param_0 = f32[] parameter(0) param_1 = f32[] parameter(1) ROOT add.0 = f32[] add(param_0, param_1) } ENTRY entry_computation { param_0.3 = f32[32,40]{1,0} parameter(0) constant = f32[] constant(0) ROOT reduce = f32[32]{0} reduce(param_0.3, constant), dimensions={1}, to_apply=add } )")); auto instruction = module->entry_computation()->root_instruction(); auto runtime_data = indexing_cost_model_.EstimateRunTimeForInstruction(instruction); EXPECT_EQ(runtime_data.flops, 3744); EXPECT_EQ(runtime_data.bytes_written, 128); EXPECT_NEAR(absl::ToDoubleNanoseconds(runtime_data.write_time), 0, 1); EXPECT_NEAR(absl::ToDoubleNanoseconds(runtime_data.exec_time), 29, 1); } TEST_F(GpuIndexingPerformanceModelTest, VariadicReduce) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule( R"( HloModule m add { param_0 = f32[] parameter(0) param_1 = f32[] parameter(1) param_2 = f32[] parameter(2) param_3 = f32[] parameter(3) add.0 = f32[] add(param_0, param_2) add.1 = f32[] add(param_1, param_3) ROOT t = (f32[], f32[]) tuple(add.0, add.1) } ENTRY entry_computation { param_0.3 = f32[32,40]{1,0} parameter(0) param_1.3 = f32[32,40]{1,0} parameter(1) param_2.2 = f32[] parameter(2) constant = f32[] constant(0) ROOT reduce = (f32[32]{0}, f32[32]{0}) reduce(param_0.3, param_1.3, param_2.2, constant), dimensions={1}, to_apply=add } )")); auto instruction = module->entry_computation()->root_instruction(); auto runtime_data = indexing_cost_model_.EstimateRunTimeForInstruction(instruction); EXPECT_EQ(runtime_data.flops, 7488); EXPECT_EQ(runtime_data.bytes_written, 256); EXPECT_NEAR(absl::ToDoubleNanoseconds(runtime_data.write_time), 0, 1); EXPECT_NEAR(absl::ToDoubleNanoseconds(runtime_data.exec_time), 58, 1); } TEST_F(GpuIndexingPerformanceModelTest, TritonSoftmaxFusionInstructionIsSupported) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"( HloModule m add { Arg_0 = f32[] parameter(0) Arg_1 = f32[] parameter(1) ROOT add = f32[] add(Arg_0, Arg_1) } triton_softmax_computation { param_0 = f32[512,911]{1,0} parameter(0) param_1 = f32[911]{0} parameter(1) broadcast_0 = f32[512,911]{1,0} broadcast(param_1), dimensions={1} multiply_0 = f32[512,911]{1,0} multiply(param_0, broadcast_0) constant_0 = f32[] constant(0) reduce_0 = f32[512]{0} reduce(multiply_0, constant_0), dimensions={1}, to_apply=add broadcast_4 = f32[512,911]{1,0} broadcast(reduce_0), dimensions={0} ROOT multiply = f32[512,911]{1,0} multiply(multiply_0, broadcast_4) } ENTRY main { param_0 = f32[512,911]{1,0} parameter(0) param_1 = f32[911]{0} parameter(1) ROOT triton_softmax = f32[512,911]{1,0} fusion(param_0, param_1), kind=kCustom, calls=triton_softmax_computation, backend_config={"fusion_backend_config": {"kind":"__triton","block_level_fusion_config":{"output_tile_sizes":["1","911"],"num_warps":"2"}}} } )")); TF_ASSERT_OK_AND_ASSIGN(auto runtime_data, indexing_cost_model_.EstimateRunTimeForTriton( module->entry_computation()->root_instruction())); constexpr int64_t kParam0SizeBytes = 512 * 911 * 4; constexpr int64_t kParam1SizeBytes = 911 * 4; constexpr int64_t kOutputSizeBytes = 512 * 911 * 4; constexpr int64_t kExpectedBytesRead = kParam0SizeBytes + 512 * kParam1SizeBytes; EXPECT_EQ(runtime_data.bytes_read, kExpectedBytesRead); EXPECT_EQ(runtime_data.bytes_written, kOutputSizeBytes); EXPECT_NEAR(absl::ToDoubleMicroseconds(runtime_data.exec_time), 5, 1); } TEST_F(GpuIndexingPerformanceModelTest, TritonSoftmaxProducerConsumerFusionIsSupported) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"( HloModule m add { Arg_0 = f32[] parameter(0) Arg_1 = f32[] parameter(1) ROOT add = f32[] add(Arg_0, Arg_1) } fusion { param_0 = f32[512,911] parameter(0) param_1 = f32[911] parameter(1) broadcast = f32[512,911] broadcast(param_1), dimensions={1} ROOT multiply = f32[512,911] multiply(param_0, broadcast) } triton_softmax_computation { param_0 = f32[512,911] parameter(0) constant_0 = f32[] constant(0) reduce_0 = f32[512] reduce(param_0, constant_0), dimensions={1}, to_apply=add broadcast_4 = f32[512,911] broadcast(reduce_0), dimensions={0} ROOT multiply = f32[512,911] multiply(param_0, broadcast_4) } ENTRY main { param_0 = f32[512,911] parameter(0) param_1 = f32[911] parameter(1) fusion.1 = f32[512,911] fusion(param_0, param_1), kind=kLoop, calls=fusion ROOT triton_softmax = f32[512,911] fusion(fusion.1), kind=kCustom, calls=triton_softmax_computation, backend_config={"fusion_backend_config": {"kind":"__triton","block_level_fusion_config":{"output_tile_sizes":["1","911"],"num_warps":"2"}}} } )")); auto consumer = module->entry_computation()->root_instruction(); auto producer = consumer->operand(0); TF_ASSERT_OK_AND_ASSIGN( auto runtime_data, indexing_cost_model_.EstimateRunTimeForTriton(producer, consumer)); constexpr int64_t kParam0SizeBytes = 512 * 911 * 4; constexpr int64_t kParam1SizeBytes = 911 * 4; constexpr int64_t kOutputSizeBytes = 512 * 911 * 4; constexpr int64_t kExpectedBytesRead = kParam0SizeBytes + 512 * kParam1SizeBytes; EXPECT_EQ(runtime_data.bytes_read, kExpectedBytesRead); EXPECT_EQ(runtime_data.bytes_written, kOutputSizeBytes); EXPECT_NEAR(absl::ToDoubleMicroseconds(runtime_data.exec_time), 5, 1); } TEST_F(GpuIndexingPerformanceModelTest, EstimateBestTiling_TritonSoftmax_IsSupported) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"( HloModule m add { Arg_0 = f32[] parameter(0) Arg_1 = f32[] parameter(1) ROOT add = f32[] add(Arg_0, Arg_1) } triton_softmax_computation { param_0 = f32[512,911]{1,0} parameter(0) param_1 = f32[911]{0} parameter(1) broadcast_0 = f32[512,911]{1,0} broadcast(param_1), dimensions={1} multiply_0 = f32[512,911]{1,0} multiply(param_0, broadcast_0) constant_0 = f32[] constant(0) reduce_0 = f32[512]{0} reduce(multiply_0, constant_0), dimensions={1}, to_apply=add broadcast_4 = f32[512,911]{1,0} broadcast(reduce_0), dimensions={0} ROOT multiply = f32[512,911]{1,0} multiply(multiply_0, broadcast_4) } ENTRY main { param_0 = f32[512,911]{1,0} parameter(0) param_1 = f32[911]{0} parameter(1) ROOT triton_softmax = f32[512,911]{1,0} fusion(param_0, param_1), kind=kCustom, calls=triton_softmax_computation, backend_config={"fusion_backend_config": {"kind":"__triton"}} } )")); auto fusion_adaptor = HloFusionAdaptor::ForInstruction( module->entry_computation()->root_instruction()); TF_ASSERT_OK_AND_ASSIGN( auto tiling_result, indexing_cost_model_.TryFindBestTilingForFusion(*fusion_adaptor)); ASSERT_TRUE(std::holds_alternative<TiledRunTimeData>(tiling_result)); auto tiled_runtime_data = std::get<TiledRunTimeData>(tiling_result); constexpr int64_t kParam0SizeBytes = 512 * 911 * 4; constexpr int64_t kParam1SizeBytes = 911 * 4; constexpr int64_t kOutputSizeBytes = 512 * 911 * 4; constexpr int64_t kExpectedBytesRead = kParam0SizeBytes + 128 * kParam1SizeBytes; EXPECT_THAT(tiled_runtime_data.block_level_parameters.output_tile_sizes, ElementsAre(4, 911)); EXPECT_EQ(tiled_runtime_data.block_level_parameters.num_warps, 4); EXPECT_EQ(tiled_runtime_data.runtime_data.bytes_read, kExpectedBytesRead); EXPECT_EQ(tiled_runtime_data.runtime_data.bytes_written, kOutputSizeBytes); EXPECT_NEAR( absl::ToDoubleMicroseconds(tiled_runtime_data.runtime_data.exec_time), 5, 1); } TEST_F( GpuIndexingPerformanceModelTest, EstimateRunTimeForTiledFusion_NumberOfTilesLargerThanInt32Max_IsSupported) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"( HloModule softmax max_computation { arg_0 = f16[] parameter(0) arg_1 = f16[] parameter(1) ROOT maximum = f16[] maximum(arg_0, arg_1) } softmax { param_0 = f16[131076,16384]{1,0} parameter(0) constant_neg_inf = f16[] constant(-inf) reduce = f16[131076]{0} reduce(param_0, constant_neg_inf), dimensions={1}, to_apply=max_computation broadcast = f16[131076,16384]{1,0} broadcast(reduce), dimensions={0} ROOT subtract = f16[131076,16384]{1,0} subtract(param_0, broadcast) } ENTRY main { param_0 = f16[131076,16384]{1,0} parameter(0) ROOT fusion = f16[131076,16384]{1,0} fusion(param_0), kind=kCustom, calls=softmax })")); auto fusion_adaptor = HloFusionAdaptor::ForInstruction( module->entry_computation()->root_instruction()); LaunchDimensions launch_dimensions{131076LL * 16384LL, 32}; TF_ASSERT_OK_AND_ASSIGN( auto runtime_data, indexing_cost_model_.EstimateRunTimeForTiledFusion( *fusion_adaptor, launch_dimensions, {1, 1})); EXPECT_NEAR(absl::ToDoubleSeconds(runtime_data.read_time), 2931, 1); EXPECT_NEAR(absl::ToDoubleSeconds(runtime_data.compute_time), 19, 1); EXPECT_NEAR(absl::ToDoubleSeconds(runtime_data.exec_time), 2932, 1); } TEST_F(GpuIndexingPerformanceModelTest, EstimateRunTimeForTiledFusion_ConcatenateOperandIsSupported) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"( HloModule m fusion { param_0 = f32[32,64] parameter(0) param_1 = f32[32,64] parameter(1) ROOT subtract = f32[32,64] subtract(param_0, param_1) } ENTRY main { param_0 = f32[32,16] parameter(0) param_1 = f32[32,48] parameter(1) param_2 = f32[32,64] parameter(2) concatenate = f32[32,64] concatenate(param_0, param_1), dimensions={1} ROOT fusion = f32[32,64] fusion(concatenate, param_2), kind=kCustom, calls=fusion })")); auto fusion_adaptor = HloFusionAdaptor::ForInstruction( module->entry_computation()->root_instruction()); LaunchDimensions launch_dimensions{8, WarpSize()}; auto result = indexing_cost_model_.EstimateRunTimeForTiledFusion( *fusion_adaptor, launch_dimensions, {16, 16}); TF_EXPECT_OK(result.status()); } TEST_F(GpuIndexingPerformanceModelTest, EstimateRunTimeForTiledFusion_ConcatenateIsNotSupported) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"( HloModule m concatenate_fusion { param_0 = f32[32, 128] parameter(0) param_1 = f32[64, 128] parameter(1) ROOT concatenate = f32[96, 128] concatenate(param_0, param_1), dimensions={0} } ENTRY main { param_0 = f32[32, 128] parameter(0) param_1 = f32[64, 128] parameter(1) ROOT fusion = f32[96, 128] fusion(param_0, param_1), kind=kCustom, calls=concatenate_fusion })")); auto fusion_adaptor = HloFusionAdaptor::ForInstruction( module->entry_computation()->root_instruction()); LaunchDimensions launch_dimensions{96, 128}; auto result = indexing_cost_model_.EstimateRunTimeForTiledFusion( *fusion_adaptor, launch_dimensions, {1, 128}); EXPECT_THAT(result, StatusIs(absl::StatusCode::kFailedPrecondition, HasSubstr("SymbolicTileAnalysis failed"))); } TEST_F(GpuIndexingPerformanceModelTest, EstimateRunTimeForTiledFusion_RegisterSpill_ReturnsInfinite) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"( HloModule m add { Arg_0 = f32[] parameter(0) Arg_1 = f32[] parameter(1) ROOT add = f32[] add(Arg_0, Arg_1) } triton_softmax_computation { param_0 = f32[16,16000] parameter(0) constant_0 = f32[] constant(0) reduce_0 = f32[16] reduce(param_0, constant_0), dimensions={1}, to_apply=add broadcast = f32[16,16000] broadcast(reduce_0), dimensions={0} ROOT multiply = f32[16,16000] multiply(param_0, broadcast) } ENTRY main { param_0 = f32[16,16000] parameter(0) ROOT triton_softmax = f32[16,16000] fusion(param_0), kind=kCustom, calls=triton_softmax_computation, backend_config={"fusion_backend_config": {"kind":"__triton"}} } )")); auto fusion_adaptor = HloFusionAdaptor::ForInstruction( module->entry_computation()->root_instruction()); TF_ASSERT_OK_AND_ASSIGN( auto tiling_result, indexing_cost_model_.TryFindBestTilingForFusion(*fusion_adaptor)); TF_ASSERT_OK_AND_ASSIGN(auto res1, indexing_cost_model_.EstimateRunTimeForTiledFusion( *fusion_adaptor, {16, 32}, {1, 16000})); EXPECT_NEAR(absl::ToDoubleMicroseconds(res1.exec_time), 3, 1); TF_ASSERT_OK_AND_ASSIGN(auto res2, indexing_cost_model_.EstimateRunTimeForTiledFusion( *fusion_adaptor, {8, 32}, {2, 16000})); EXPECT_TRUE(res2.IsInfinite()); } TEST_F(GpuIndexingPerformanceModelTest, EstimateRunTimeForTiledFusion_UsesPaddedTileSizeForMemoryAccessTime) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"( HloModule m triton_softmax_computation { param_0 = f32[65,65] parameter(0) param_1 = f32[65,65] parameter(1) ROOT add = f32[65,65] add(param_0, param_1) } ENTRY main { param_0 = f32[65,65] parameter(0) param_1 = f32[65,65] parameter(1) ROOT triton_softmax = f32[65,65] fusion(param_0, param_1), kind=kCustom, calls=triton_softmax_computation, backend_config={"fusion_backend_config": {"kind":"__triton"}} } )")); auto fusion_adaptor = HloFusionAdaptor::ForInstruction( module->entry_computation()->root_instruction()); TF_ASSERT_OK_AND_ASSIGN( auto tiling_result, indexing_cost_model_.TryFindBestTilingForFusion(*fusion_adaptor)); TF_ASSERT_OK_AND_ASSIGN( auto res, indexing_cost_model_.EstimateRunTimeForTiledFusion( *fusion_adaptor, {1, 2 * WarpSize()}, {65, 65})); constexpr int64_t kParamSizeBytes = 65 * 65 * 4; constexpr int64_t kPaddedOutputTileSize = 128 * 128; constexpr int64_t kAddFlops = 3; EXPECT_EQ(res.bytes_read, 2 * kParamSizeBytes); EXPECT_EQ(res.flops, kPaddedOutputTileSize * kAddFlops); } TEST_F(GpuIndexingPerformanceModelTest, EstimateRunTimeForTiledFusion_UncoalescedReadsTakeMoreTime) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"( HloModule m triton_softmax_computation { param_0 = f32[2048,512] parameter(0) param_1 = f32[2048,512] parameter(1) ROOT add = f32[2048,512] add(param_0, param_1) } ENTRY main { param_0 = f32[2048,512] parameter(0) param_1 = f32[2048,512] parameter(1) ROOT triton_softmax = f32[2048,512] fusion(param_0, param_1), kind=kCustom, calls=triton_softmax_computation, backend_config={"fusion_backend_config": {"kind":"__triton"}} } )")); auto fusion_adaptor = HloFusionAdaptor::ForInstruction( module->entry_computation()->root_instruction()); TF_ASSERT_OK_AND_ASSIGN( auto tiling_result, indexing_cost_model_.TryFindBestTilingForFusion(*fusion_adaptor)); TF_ASSERT_OK_AND_ASSIGN( auto res_coalesced, indexing_cost_model_.EstimateRunTimeForTiledFusion( *fusion_adaptor, {8192, 2 * WarpSize()}, {1, 128})); TF_ASSERT_OK_AND_ASSIGN( auto res_uncoalesced, indexing_cost_model_.EstimateRunTimeForTiledFusion( *fusion_adaptor, {8192, 2 * WarpSize()}, {128, 1})); constexpr int64_t kParamSizeBytes = 2048 * 512 * 4; EXPECT_EQ(res_coalesced.bytes_read, 2 * kParamSizeBytes); EXPECT_EQ(res_uncoalesced.bytes_read, 2 * kParamSizeBytes); EXPECT_NEAR(absl::ToDoubleMicroseconds(res_coalesced.read_time), 11, 1); EXPECT_NEAR(absl::ToDoubleMicroseconds(res_uncoalesced.read_time), 175, 1); } TEST_F(GpuIndexingPerformanceModelTest, GetLaunchDimensionsForTiledFusion_IsSupported) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"( HloModule m triton_softmax_computation { param_0 = f32[9,9,9] parameter(0) param_1 = f32[9,9,9] parameter(1) ROOT multiply = f32[9,9,9] multiply(param_0, param_1) } ENTRY main { param_0 = f32[9,9,9] parameter(0) param_1 = f32[9,9,9] parameter(1) ROOT fusion = f32[9,9,9] fusion(param_0, param_1), kind=kCustom, calls=triton_softmax_computation, backend_config={"fusion_backend_config": {"kind":"__triton"}} } )")); auto fusion_adaptor = HloFusionAdaptor::ForInstruction( module->entry_computation()->root_instruction()); SymbolicTileAnalysisOrError analysis_or_error = SymbolicTileAnalysis::AnalyzeFusion( *fusion_adaptor, &mlir_context_, nullptr); ASSERT_TRUE(std::holds_alternative<SymbolicTileAnalysis>(analysis_or_error)); TF_ASSERT_OK_AND_ASSIGN( TiledHloComputation tiled_hlo_computation, std::get<SymbolicTileAnalysis>(analysis_or_error) .ComputeTiledHloInstructions({9, 9, 9})); LaunchDimensions launch_dimensions = GpuPerformanceModelWithIndexingAnalysis:: GetLaunchDimensionsForTiledFusion(tiled_hlo_computation); EXPECT_EQ(launch_dimensions.num_blocks(), 1); EXPECT_EQ(launch_dimensions.num_threads_per_block(), 4 * WarpSize()); } TEST_F(GpuIndexingPerformanceModelTest, NumberOfWarpsDependsOnLargestLiveTileSize) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(R"( HloModule m add { param_0 = f32[] parameter(0) param_1 = f32[] parameter(1) ROOT add = f32[] add(param_0, param_1) } fusion_computation { param_0 = f32[1,4096] parameter(0) c0 = f32[] constant(0) ROOT reduce = f32[1] reduce(param_0, c0), dimensions={1}, to_apply=add } ENTRY main { param_0 = f32[1,4096] parameter(0) ROOT fusion = f32[1] fusion(param_0), kind=kCustom, calls=fusion_computation, backend_config={"fusion_backend_config": {"kind":"__triton"}} } )")); auto fusion_adaptor = HloFusionAdaptor::ForInstruction( module->entry_computation()->root_instruction()); SymbolicTileAnalysisOrError analysis_or_error = SymbolicTileAnalysis::AnalyzeFusion( *fusion_adaptor, &mlir_context_, nullptr); ASSERT_TRUE(std::holds_alternative<SymbolicTileAnalysis>(analysis_or_error)); TF_ASSERT_OK_AND_ASSIGN( TiledHloComputation tiled_hlo_computation, std::get<SymbolicTileAnalysis>(analysis_or_error) .ComputeTiledHloInstructions({1})); LaunchDimensions launch_dimensions = GpuPerformanceModelWithIndexingAnalysis:: GetLaunchDimensionsForTiledFusion(tiled_hlo_computation); EXPECT_EQ(launch_dimensions.num_blocks(), 1); EXPECT_EQ(launch_dimensions.num_threads_per_block(), 4 * WarpSize()); } class FlopsPerElementTest : public GpuIndexingPerformanceModelTest { public: void CompareFlopsModels(absl::string_view hlo_module_string) { TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnVerifiedModule(hlo_module_string)); GpuHloCostAnalysis cost_analysis( GpuHloCostAnalysis::Options{ShapeSizeBytesFunction(), {}, {}, true}, device_info_); ASSERT_IS_OK(module->entry_computation()->Accept(&cost_analysis)); auto instr = module->entry_computation()->root_instruction(); int64_t flops_per_element = indexing_cost_model_.FlopsPerElement(instr); const Shape& output_shape = instr->shape().IsArray() ? instr->shape() : instr->shape().tuple_shapes(0); int64_t total_flops = ShapeUtil::ElementsIn(output_shape) * flops_per_element; EXPECT_EQ(total_flops, cost_analysis.flop_count(*instr)); } }; TEST_F(FlopsPerElementTest, MatchesGpuHloCostAnalysis_Reduce) { CompareFlopsModels(R"( HloModule m add { param_0 = f32[] parameter(0) param_1 = f32[] parameter(1) ROOT add.0 = f32[] add(param_0, param_1) } ENTRY entry_computation { param_0.3 = f32[32,40] parameter(0) constant = f32[] constant(0) ROOT reduce = f32[32] reduce(param_0.3, constant), dimensions={1}, to_apply=add } )"); } TEST_F(FlopsPerElementTest, MatchesGpuHloCostAnalysis_VariadicReduce) { CompareFlopsModels(R"( HloModule m add_multiply { param_0 = f32[] parameter(0) param_1 = f32[] parameter(1) param_2 = f32[] parameter(2) param_3 = f32[] parameter(3) add = f32[] add(param_0, param_2) multiply = f32[] multiply(param_1, param_3) ROOT t = (f32[], f32[]) tuple(add, multiply) } ENTRY entry_computation { param_0 = f32[32,40] parameter(0) c0 = f32[] constant(0) ROOT reduce = (f32[32], f32[32]) reduce(param_0, param_0, c0, c0), dimensions={1}, to_apply=add_multiply } )"); } TEST_F(FlopsPerElementTest, MatchesGpuHloCostAnalysis_Elementwise_Cosine) { CompareFlopsModels(R"( HloModule m ENTRY entry_computation { param_0 = f32[32] parameter(0) ROOT cosine = f32[32] cosine(param_0) } )"); } TEST_F(FlopsPerElementTest, MatchesGpuHloCostAnalysis_Elementwise_Clamp) { CompareFlopsModels(R"( HloModule m ENTRY entry_computation { param_0 = f32[32] parameter(0) param_1 = f32[32] parameter(1) param_2 = f32[32] parameter(2) ROOT clamp = clamp(param_0, param_1, param_2) } )"); } TEST_F(FlopsPerElementTest, MatchesGpuHloCostAnalysis_Gather) { CompareFlopsModels(R"( HloModule module entry { operand = f32[33, 76, 70] parameter(0) indices = s32[1806, 2] parameter(1) ROOT gather = f32[1806, 7, 8, 4] gather(operand, indices), offset_dims={1,2,3}, collapsed_slice_dims={}, start_index_map={0,1}, index_vector_dim=1, slice_sizes={7,8,4} })"); } TEST_F(FlopsPerElementTest, MatchesGpuHloCostAnalysis_ReduceWindow) { CompareFlopsModels(R"( add { lhs = f32[] parameter(0) rhs = f32[] parameter(1) ROOT add = f32[] add(lhs, rhs) } ENTRY entry { param_0 = f32[13,12,8,15] parameter(0) c0 = f32[] constant(0) ROOT reduce-window = f32[13,3,8,15] reduce-window(param_0, c0), window={size=1x1x7x1 stride=1x4x1x1 pad=0_0x0_0x3_3x0_0}, to_apply=add })"); } } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/gpu_indexing_performance_model.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/gpu_indexing_performance_model_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea
ed59ab97-24d9-4a90-96df-bc43c5670b95
cpp
tensorflow/tensorflow
fusion_analysis_cache
third_party/xla/xla/service/gpu/model/fusion_analysis_cache.cc
third_party/xla/xla/service/gpu/model/fusion_analysis_cache_test.cc
#include "xla/service/gpu/model/fusion_analysis_cache.h" #include <utility> #include "absl/synchronization/mutex.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/service/gpu/hlo_fusion_analysis.h" namespace xla::gpu { const HloFusionAnalysis& HloFusionAnalysisCache::Get( const HloInstruction& instruction) { { absl::MutexLock lock(&mutex_); auto it = analyses_.find(instruction.unique_id()); if (it != analyses_.end()) { return it->second; } } HloFusionAnalysis analysis = HloFusionAnalysis::Create(instruction, device_info_); absl::MutexLock lock(&mutex_); auto it = analyses_.find(instruction.unique_id()); if (it != analyses_.end()) { return it->second; } return analyses_.emplace(instruction.unique_id(), std::move(analysis)) .first->second; } const HloFusionAnalysis& HloFusionAnalysisCache::Get( const HloInstruction& producer, const HloInstruction& consumer) { std::pair<int, int> key{producer.unique_id(), consumer.unique_id()}; { absl::MutexLock lock(&mutex_); auto it = producer_consumer_analyses_.find(key); if (it != producer_consumer_analyses_.end()) { return it->second; } } HloFusionAnalysis analysis = HloFusionAnalysis::Create(producer, consumer, device_info_); absl::MutexLock lock(&mutex_); auto it = producer_consumer_analyses_.find(key); if (it != producer_consumer_analyses_.end()) { return it->second; } producers_for_consumers_[consumer.unique_id()].push_back( producer.unique_id()); consumers_for_producers_[producer.unique_id()].push_back( consumer.unique_id()); return producer_consumer_analyses_.emplace(key, std::move(analysis)) .first->second; } void HloFusionAnalysisCache::Invalidate(const HloInstruction& instruction) { analyses_.erase(instruction.unique_id()); if (auto consumers = consumers_for_producers_.extract(instruction.unique_id())) { for (const auto consumer : consumers.mapped()) { producer_consumer_analyses_.erase({instruction.unique_id(), consumer}); } } if (auto producers = producers_for_consumers_.extract(instruction.unique_id())) { for (const auto producer : producers.mapped()) { producer_consumer_analyses_.erase({producer, instruction.unique_id()}); } } } void HloFusionAnalysisCache::Clear() { analyses_.clear(); producer_consumer_analyses_.clear(); consumers_for_producers_.clear(); producers_for_consumers_.clear(); } }
#include "xla/service/gpu/model/fusion_analysis_cache.h" #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/strings/string_view.h" #include "xla/service/gpu/gpu_device_info_for_tests.h" #include "xla/service/gpu/hlo_fusion_analysis.h" #include "xla/service/hlo_parser.h" #include "xla/stream_executor/device_description.h" #include "xla/tests/hlo_test_base.h" #include "tsl/platform/statusor.h" namespace xla::gpu { namespace { class FusionAnalysisCacheTest : public HloTestBase { public: stream_executor::DeviceDescription device_{ TestGpuDeviceInfo::RTXA6000DeviceInfo()}; HloFusionAnalysisCache cache_{device_}; }; TEST_F(FusionAnalysisCacheTest, CachesAndInvalidates) { absl::string_view hlo_string = R"( HloModule m f { c0 = f32[] constant(0) b0 = f32[1000] broadcast(c0) ROOT n0 = f32[1000] negate(b0) } ENTRY e { ROOT r.1 = f32[1000] fusion(), kind=kLoop, calls=f })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo_string)); auto* computation = module->GetComputationWithName("f"); auto* broadcast = computation->GetInstructionWithName("b0"); auto* negate = computation->GetInstructionWithName("n0"); auto* fusion = module->entry_computation()->root_instruction(); EXPECT_EQ(&cache_.Get(*fusion).fusion_root(0).instruction(), negate); computation->set_root_instruction(broadcast); EXPECT_EQ(&cache_.Get(*fusion).fusion_root(0).instruction(), negate) << "Analysis should be cached."; cache_.Invalidate(*fusion); EXPECT_EQ(&cache_.Get(*fusion).fusion_root(0).instruction(), broadcast) << "Analysis should have been recomputed"; } TEST_F(FusionAnalysisCacheTest, CachesAndInvalidatesProducerConsumerFusions) { absl::string_view hlo_string = R"( HloModule m add { p0 = f32[] parameter(0) p1 = f32[] parameter(1) ROOT add = f32[] add(p0, p1) } f { c0 = f32[] constant(0) b0 = f32[1000] broadcast(c0) ROOT r0 = f32[] reduce(b0, c0), dimensions={0}, to_apply=add } ENTRY e { f0 = f32[] fusion(), kind=kInput, calls=f ROOT n0 = f32[] negate(f0) })"; TF_ASSERT_OK_AND_ASSIGN(auto module, ParseAndReturnUnverifiedModule(hlo_string)); auto* fusion = module->entry_computation()->GetInstructionWithName("f0"); auto* neg = module->entry_computation()->GetInstructionWithName("n0"); auto* computation = module->GetComputationWithName("f"); auto* constant = computation->GetInstructionWithName("c0"); EXPECT_EQ(cache_.Get(*fusion, *neg).GetEmitterFusionKind(), HloFusionAnalysis::EmitterFusionKind::kReduction); computation->set_root_instruction(constant); EXPECT_EQ(cache_.Get(*fusion, *neg).GetEmitterFusionKind(), HloFusionAnalysis::EmitterFusionKind::kReduction) << "Analysis should be cached."; cache_.Invalidate(*fusion); EXPECT_EQ(cache_.Get(*fusion, *neg).GetEmitterFusionKind(), HloFusionAnalysis::EmitterFusionKind::kLoop) << "Analysis should have been recomputed"; } } }
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/fusion_analysis_cache.cc
https://github.com/tensorflow/tensorflow/blob/4a29233a7b7c1a3a4294e4ccdd1772f9083944ea/third_party/xla/xla/service/gpu/model/fusion_analysis_cache_test.cc
4a29233a7b7c1a3a4294e4ccdd1772f9083944ea