ID
int64
0
2.65k
Language
stringclasses
1 value
Repository Name
stringclasses
21 values
File Name
stringlengths
2
48
File Path in Repository
stringlengths
10
111
File Path for Unit Test
stringlengths
16
116
Code
stringlengths
66
1.91M
Unit Test - (Ground Truth)
stringlengths
40
32.1k
1,000
cpp
tensorflow/tensorflow
memory_management
tensorflow/lite/delegates/gpu/common/memory_management.cc
tensorflow/lite/delegates/gpu/common/memory_management_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_MEMORY_MANAGEMENT_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_MEMORY_MANAGEMENT_H_ #include <stddef.h> #include <vector> #include "absl/memory/memory.h" #include "tensorflow/lite/delegates/gpu/common/memory_management/equality_assignment.h" #include "tensorflow/lite/delegates/gpu/common/memory_management/naive_assignment.h" #include "tensorflow/lite/delegates/gpu/common/memory_management/types.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/types.h" namespace tflite { namespace gpu { using TaskId = size_t; OffsetsAssignment ObjectsToOffsets( const ObjectsAssignment<size_t>& obj_assignment); enum class MemoryStrategy { NAIVE, EQUALITY, GREEDY_IN_ORDER, GREEDY_BY_BREADTH, GREEDY_BY_SIZE, GREEDY_BEST, MINCOSTFLOW, }; absl::Status BestGreedy( const std::vector<TensorUsageRecord<size_t>>& usage_records, ObjectsAssignment<size_t>* assignment); template <typename TensorSizeT> absl::Status AssignObjectsToTensors( const std::vector<TensorUsageRecord<TensorSizeT>>& usage_records, MemoryStrategy strategy, ObjectsAssignment<TensorSizeT>* assignment, const UsageGraph* reallocation_graph = nullptr) { switch (strategy) { case MemoryStrategy::NAIVE: return NaiveAssignment(usage_records, assignment); case MemoryStrategy::EQUALITY: return EqualityAssignment(usage_records, assignment); default: return absl::InternalError( "MemoryStrategy is not supported with current tensor size type."); } return absl::OkStatus(); } template <> absl::Status AssignObjectsToTensors( const std::vector<TensorUsageRecord<size_t>>& usage_records, MemoryStrategy strategy, ObjectsAssignment<size_t>* assignment, const UsageGraph* reallocation_graph); template <> absl::Status AssignObjectsToTensors( const std::vector<TensorUsageRecord<BHWC>>& usage_records, MemoryStrategy strategy, ObjectsAssignment<BHWC>* assignment, const UsageGraph* reallocation_graph); template <> absl::Status AssignObjectsToTensors( const std::vector<TensorUsageRecord<uint2>>& usage_records, MemoryStrategy strategy, ObjectsAssignment<uint2>* assignment, const UsageGraph* reallocation_graph); template <> absl::Status AssignObjectsToTensors( const std::vector<TensorUsageRecord<uint3>>& usage_records, MemoryStrategy strategy, ObjectsAssignment<uint3>* assignment, const UsageGraph* reallocation_graph); absl::Status AssignOffsetsToTensors( const std::vector<TensorUsageRecord<size_t>>& usage_records, const MemoryStrategy& strategy, OffsetsAssignment* assignment, size_t base_addr_align_bytes = 1, const UsageGraph* reallocation_graph = nullptr); } } #endif #include "tensorflow/lite/delegates/gpu/common/memory_management.h" #include <cstddef> #include <numeric> #include <utility> #include <vector> #include "tensorflow/lite/delegates/gpu/common/memory_management/equality_assignment.h" #include "tensorflow/lite/delegates/gpu/common/memory_management/greedy_by_breadth_assignment.h" #include "tensorflow/lite/delegates/gpu/common/memory_management/greedy_by_size_assignment.h" #include "tensorflow/lite/delegates/gpu/common/memory_management/greedy_in_order_assignment.h" #include "tensorflow/lite/delegates/gpu/common/memory_management/min_cost_flow_assignment.h" #include "tensorflow/lite/delegates/gpu/common/memory_management/naive_assignment.h" #include "tensorflow/lite/delegates/gpu/common/memory_management/types.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/types.h" namespace tflite { namespace gpu { namespace { size_t TotalSize(const ObjectsAssignment<size_t>& assignment) { return std::accumulate(assignment.object_sizes.begin(), assignment.object_sizes.end(), static_cast<size_t>(0)); } } OffsetsAssignment ObjectsToOffsets( const ObjectsAssignment<size_t>& obj_assignment) { size_t num_tensors = obj_assignment.object_ids.size(); size_t num_objects = obj_assignment.object_sizes.size(); OffsetsAssignment result = {std::vector<size_t>(num_tensors), 0}; std::vector<size_t> ids_to_offset(num_objects); for (size_t i = 0; i < num_objects; ++i) { ids_to_offset[i] = result.total_size; result.total_size += obj_assignment.object_sizes[i]; } for (size_t i = 0; i < num_tensors; ++i) { result.offsets[i] = ids_to_offset[obj_assignment.object_ids[i]]; } return result; } absl::Status BestGreedy( const std::vector<TensorUsageRecord<size_t>>& usage_records, ObjectsAssignment<size_t>* assignment) { RETURN_IF_ERROR( GreedyBySizeDistPriorityAssignment(usage_records, assignment)); ObjectsAssignment<size_t> assignment_by_breadth; if (GreedyByBreadthAssignment(usage_records, &assignment_by_breadth).ok() && TotalSize(assignment_by_breadth) < TotalSize(*assignment)) { std::swap(*assignment, assignment_by_breadth); } return absl::OkStatus(); } template <> absl::Status AssignObjectsToTensors( const std::vector<TensorUsageRecord<size_t>>& usage_records, MemoryStrategy strategy, ObjectsAssignment<size_t>* assignment, const UsageGraph* reallocation_graph) { switch (strategy) { case MemoryStrategy::NAIVE: return NaiveAssignment(usage_records, assignment); case MemoryStrategy::EQUALITY: return EqualityAssignmentWithHash(usage_records, assignment); case MemoryStrategy::GREEDY_IN_ORDER: return GreedyInOrderAssignment(usage_records, assignment, reallocation_graph); case MemoryStrategy::GREEDY_BY_BREADTH: return GreedyByBreadthAssignment(usage_records, assignment); case MemoryStrategy::GREEDY_BY_SIZE: return GreedyBySizeDistPriorityAssignment(usage_records, assignment); case MemoryStrategy::GREEDY_BEST: return BestGreedy(usage_records, assignment); case MemoryStrategy::MINCOSTFLOW: return MinCostFlowAssignment(usage_records, assignment); default: return absl::InternalError( "MemoryStrategy is not supported with current tensor size type."); } return absl::OkStatus(); } template <> absl::Status AssignObjectsToTensors( const std::vector<TensorUsageRecord<BHWC>>& usage_records, MemoryStrategy strategy, ObjectsAssignment<BHWC>* assignment, const UsageGraph* reallocation_graph) { switch (strategy) { case MemoryStrategy::NAIVE: return NaiveAssignment(usage_records, assignment); case MemoryStrategy::EQUALITY: return EqualityAssignmentWithHash(usage_records, assignment); default: return absl::InternalError( "MemoryStrategy is not supported with current tensor size type."); } return absl::OkStatus(); } template <> absl::Status AssignObjectsToTensors( const std::vector<TensorUsageRecord<uint2>>& usage_records, MemoryStrategy strategy, ObjectsAssignment<uint2>* assignment, const UsageGraph* reallocation_graph) { switch (strategy) { case MemoryStrategy::NAIVE: return NaiveAssignment(usage_records, assignment); case MemoryStrategy::EQUALITY: return EqualityAssignment(usage_records, assignment); case MemoryStrategy::GREEDY_IN_ORDER: return GreedyInOrderAssignmentMultidimensional(usage_records, assignment); default: return absl::InternalError( "MemoryStrategy is not supported with current tensor size type."); } return absl::OkStatus(); } template <> absl::Status AssignObjectsToTensors( const std::vector<TensorUsageRecord<uint3>>& usage_records, MemoryStrategy strategy, ObjectsAssignment<uint3>* assignment, const UsageGraph* reallocation_graph) { switch (strategy) { case MemoryStrategy::NAIVE: return NaiveAssignment(usage_records, assignment); case MemoryStrategy::EQUALITY: return EqualityAssignment(usage_records, assignment); case MemoryStrategy::GREEDY_IN_ORDER: return GreedyInOrderAssignmentMultidimensional(usage_records, assignment); default: return absl::InternalError( "MemoryStrategy is not supported with current tensor size type."); } return absl::OkStatus(); } absl::Status AssignOffsetsToTensors( const std::vector<TensorUsageRecord<size_t>>& usage_records, const MemoryStrategy& strategy, OffsetsAssignment* assignment, size_t base_addr_align_bytes, const UsageGraph* reallocation_graph) { if (strategy == MemoryStrategy::GREEDY_BY_SIZE) { return GreedyBySizeAssignment(usage_records, base_addr_align_bytes, assignment); } ObjectsAssignment<size_t> objects_assignment; RETURN_IF_ERROR(AssignObjectsToTensors( usage_records, strategy, &objects_assignment, reallocation_graph)); *assignment = ObjectsToOffsets(objects_assignment); return absl::OkStatus(); } } }
#include "tensorflow/lite/delegates/gpu/common/memory_management.h" #include <cstddef> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/status/status.h" #include "tensorflow/lite/delegates/gpu/common/memory_management/types.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/common/types.h" namespace tflite { namespace gpu { namespace { using ::testing::ElementsAre; TEST(Model, EmptyAssignment) { ObjectsAssignment<size_t> objects_assignment; OffsetsAssignment result = ObjectsToOffsets(objects_assignment); EXPECT_TRUE(result.offsets.empty()); EXPECT_EQ(result.total_size, 0); } TEST(Model, OneObjectAssignment) { ObjectsAssignment<size_t> objects_assignment; objects_assignment.object_sizes = {16}; objects_assignment.object_ids = {0}; OffsetsAssignment result = ObjectsToOffsets(objects_assignment); EXPECT_EQ(result.total_size, 16); EXPECT_THAT(result.offsets, ElementsAre(0)); objects_assignment.object_ids = {0, 0, 0}; result = ObjectsToOffsets(objects_assignment); EXPECT_EQ(result.total_size, 16); EXPECT_THAT(result.offsets, ElementsAre(0, 0, 0)); } TEST(Model, ManyObjectsAssignment) { ObjectsAssignment<size_t> objects_assignment; objects_assignment.object_sizes = {16, 8, 32, 32, 4, 16}; objects_assignment.object_ids = {2, 0, 2, 1, 3, 3, 1, 5}; OffsetsAssignment result = ObjectsToOffsets(objects_assignment); EXPECT_THAT(result.offsets, ElementsAre(24, 0, 24, 16, 56, 56, 16, 92)); } TEST(Model, EmptyRecords) { ObjectsAssignment<size_t> assignment; ASSERT_TRUE( AssignObjectsToTensors({}, MemoryStrategy::NAIVE, &assignment).ok()); EXPECT_TRUE(assignment.object_ids.empty()); EXPECT_TRUE(assignment.object_sizes.empty()); ASSERT_TRUE( AssignObjectsToTensors({}, MemoryStrategy::EQUALITY, &assignment).ok()); EXPECT_TRUE(assignment.object_ids.empty()); EXPECT_TRUE(assignment.object_sizes.empty()); ASSERT_TRUE( AssignObjectsToTensors({}, MemoryStrategy::GREEDY_IN_ORDER, &assignment) .ok()); EXPECT_TRUE(assignment.object_ids.empty()); EXPECT_TRUE(assignment.object_sizes.empty()); ASSERT_TRUE( AssignObjectsToTensors({}, MemoryStrategy::MINCOSTFLOW, &assignment) .ok()); EXPECT_TRUE(assignment.object_ids.empty()); EXPECT_TRUE(assignment.object_sizes.empty()); ASSERT_TRUE( AssignObjectsToTensors({}, MemoryStrategy::GREEDY_BY_BREADTH, &assignment) .ok()); EXPECT_TRUE(assignment.object_ids.empty()); EXPECT_TRUE(assignment.object_sizes.empty()); ASSERT_TRUE( AssignObjectsToTensors({}, MemoryStrategy::GREEDY_BY_SIZE, &assignment) .ok()); EXPECT_TRUE(assignment.object_ids.empty()); EXPECT_TRUE(assignment.object_sizes.empty()); OffsetsAssignment offsets_assignment; ASSERT_TRUE(AssignOffsetsToTensors({}, MemoryStrategy::GREEDY_BY_SIZE, &offsets_assignment) .ok()); EXPECT_TRUE(offsets_assignment.offsets.empty()); EXPECT_EQ(offsets_assignment.total_size, 0); } TEST(Model, OneRecord) { std::vector<TensorUsageRecord<size_t>> usage_records{ {16, 0, 1}}; ObjectsAssignment<size_t> assignment; ASSERT_TRUE( AssignObjectsToTensors(usage_records, MemoryStrategy::NAIVE, &assignment) .ok()); EXPECT_THAT(assignment.object_ids, ElementsAre(0)); EXPECT_THAT(assignment.object_sizes, ElementsAre(16)); ASSERT_TRUE(AssignObjectsToTensors(usage_records, MemoryStrategy::EQUALITY, &assignment) .ok()); EXPECT_THAT(assignment.object_ids, ElementsAre(0)); EXPECT_THAT(assignment.object_sizes, ElementsAre(16)); ASSERT_TRUE(AssignObjectsToTensors( usage_records, MemoryStrategy::GREEDY_IN_ORDER, &assignment) .ok()); EXPECT_THAT(assignment.object_ids, ElementsAre(0)); EXPECT_THAT(assignment.object_sizes, ElementsAre(16)); ASSERT_TRUE(AssignObjectsToTensors(usage_records, MemoryStrategy::MINCOSTFLOW, &assignment) .ok()); EXPECT_THAT(assignment.object_ids, ElementsAre(0)); EXPECT_THAT(assignment.object_sizes, ElementsAre(16)); ASSERT_TRUE(AssignObjectsToTensors( usage_records, MemoryStrategy::GREEDY_BY_BREADTH, &assignment) .ok()); EXPECT_THAT(assignment.object_ids, ElementsAre(0)); EXPECT_THAT(assignment.object_sizes, ElementsAre(16)); ASSERT_TRUE(AssignObjectsToTensors( usage_records, MemoryStrategy::GREEDY_BY_SIZE, &assignment) .ok()); EXPECT_THAT(assignment.object_ids, ElementsAre(0)); EXPECT_THAT(assignment.object_sizes, ElementsAre(16)); OffsetsAssignment offsets_assignment; ASSERT_TRUE(AssignOffsetsToTensors(usage_records, MemoryStrategy::GREEDY_BY_SIZE, &offsets_assignment) .ok()); EXPECT_THAT(offsets_assignment.offsets, ElementsAre(0)); EXPECT_EQ(offsets_assignment.total_size, 16); } TEST(Model, ChainRecords) { std::vector<TensorUsageRecord<size_t>> usage_records{ {16, 0, 1}, {8, 1, 2}, {64, 2, 3}, {32, 3, 4}, {8, 4, 5}, }; ObjectsAssignment<size_t> assignment; ASSERT_TRUE( AssignObjectsToTensors(usage_records, MemoryStrategy::NAIVE, &assignment) .ok()); EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 3, 4)); EXPECT_THAT(assignment.object_sizes, ElementsAre(16, 8, 64, 32, 8)); ASSERT_TRUE(AssignObjectsToTensors(usage_records, MemoryStrategy::EQUALITY, &assignment) .ok()); EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 3, 1)); EXPECT_THAT(assignment.object_sizes, ElementsAre(16, 8, 64, 32)); ASSERT_TRUE(AssignObjectsToTensors(usage_records, MemoryStrategy::MINCOSTFLOW, &assignment) .ok()); EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 0, 1, 0)); EXPECT_THAT(assignment.object_sizes, ElementsAre(64, 32)); ASSERT_TRUE(AssignObjectsToTensors( usage_records, MemoryStrategy::GREEDY_IN_ORDER, &assignment) .ok()); EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 0, 1, 0)); EXPECT_THAT(assignment.object_sizes, ElementsAre(64, 32)); ASSERT_TRUE(AssignObjectsToTensors( usage_records, MemoryStrategy::GREEDY_BY_BREADTH, &assignment) .ok()); EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 0, 1, 0)); EXPECT_THAT(assignment.object_sizes, ElementsAre(64, 32)); ASSERT_TRUE(AssignObjectsToTensors( usage_records, MemoryStrategy::GREEDY_BY_SIZE, &assignment) .ok()); EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 0, 1, 0)); EXPECT_THAT(assignment.object_sizes, ElementsAre(64, 32)); OffsetsAssignment offsets_assignment; ASSERT_TRUE(AssignOffsetsToTensors(usage_records, MemoryStrategy::GREEDY_BY_SIZE, &offsets_assignment) .ok()); EXPECT_THAT(offsets_assignment.offsets, ElementsAre(0, 64, 0, 64, 0)); EXPECT_EQ(offsets_assignment.total_size, 96); } TEST(Model, ComplexRecords) { std::vector<TensorUsageRecord<size_t>> usage_records{ {32, 0, 1}, {32, 1, 4}, {8, 2, 5}, {16, 3, 5}, {8, 4, 5}, {64, 5, 7}, {8, 6, 8}, {8, 7, 8}, {16, 8, 9}}; ObjectsAssignment<size_t> assignment; ASSERT_TRUE( AssignObjectsToTensors(usage_records, MemoryStrategy::NAIVE, &assignment) .ok()); EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8)); EXPECT_THAT(assignment.object_sizes, ElementsAre(32, 32, 8, 16, 8, 64, 8, 8, 16)); ASSERT_TRUE(AssignObjectsToTensors(usage_records, MemoryStrategy::EQUALITY, &assignment) .ok()); EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 3, 4, 5, 4, 2, 3)); EXPECT_THAT(assignment.object_sizes, ElementsAre(32, 32, 8, 16, 8, 64)); ASSERT_TRUE(AssignObjectsToTensors(usage_records, MemoryStrategy::MINCOSTFLOW, &assignment) .ok()); EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 0, 3, 1, 3, 2, 0)); EXPECT_THAT(assignment.object_sizes, ElementsAre(32, 64, 8, 8)); ASSERT_TRUE(AssignObjectsToTensors( usage_records, MemoryStrategy::GREEDY_IN_ORDER, &assignment) .ok()); EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 0, 2, 3, 1, 3, 2, 0)); EXPECT_THAT(assignment.object_sizes, ElementsAre(32, 64, 16, 8)); ASSERT_TRUE(AssignObjectsToTensors( usage_records, MemoryStrategy::GREEDY_BY_BREADTH, &assignment) .ok()); EXPECT_THAT(assignment.object_ids, ElementsAre(0, 4, 2, 1, 3, 0, 2, 3, 1)); EXPECT_THAT(assignment.object_sizes, ElementsAre(64, 16, 8, 8, 32)); ASSERT_TRUE(AssignObjectsToTensors( usage_records, MemoryStrategy::GREEDY_BY_SIZE, &assignment) .ok()); EXPECT_THAT(assignment.object_ids, ElementsAre(1, 0, 2, 1, 3, 0, 1, 2, 0)); EXPECT_THAT(assignment.object_sizes, ElementsAre(64, 32, 8, 8)); OffsetsAssignment offsets_assignment; ASSERT_TRUE(AssignOffsetsToTensors(usage_records, MemoryStrategy::GREEDY_BY_SIZE, &offsets_assignment) .ok()); EXPECT_THAT(offsets_assignment.offsets, ElementsAre(0, 32, 80, 64, 88, 0, 64, 72, 0)); EXPECT_EQ(offsets_assignment.total_size, 96); } TEST(Model, BHWCRecords) { std::vector<TensorUsageRecord<BHWC>> usage_records{ {BHWC(1, 1, 2, 8), 0, 1}, {BHWC(1, 1, 2, 8), 1, 2}, {BHWC(1, 1, 1, 16), 2, 4}, {BHWC(1, 1, 2, 8), 3, 5}, {BHWC(1, 1, 8, 2), 4, 5}, {BHWC(1, 1, 2, 8), 5, 7}, {BHWC(1, 16, 1, 1), 6, 8}, {BHWC(16, 1, 1, 1), 7, 8}, {BHWC(1, 1, 1, 16), 8, 9}}; ObjectsAssignment<BHWC> assignment; ASSERT_TRUE( AssignObjectsToTensors(usage_records, MemoryStrategy::NAIVE, &assignment) .ok()); EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8)); EXPECT_THAT( assignment.object_sizes, ElementsAre(BHWC(1, 1, 2, 8), BHWC(1, 1, 2, 8), BHWC(1, 1, 1, 16), BHWC(1, 1, 2, 8), BHWC(1, 1, 8, 2), BHWC(1, 1, 2, 8), BHWC(1, 16, 1, 1), BHWC(16, 1, 1, 1), BHWC(1, 1, 1, 16))); ASSERT_TRUE(AssignObjectsToTensors(usage_records, MemoryStrategy::EQUALITY, &assignment) .ok()); EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 1, 3, 0, 4, 5, 2)); EXPECT_THAT( assignment.object_sizes, ElementsAre(BHWC(1, 1, 2, 8), BHWC(1, 1, 2, 8), BHWC(1, 1, 1, 16), BHWC(1, 1, 8, 2), BHWC(1, 16, 1, 1), BHWC(16, 1, 1, 1))); } TEST(Model, UInt2Records) { std::vector<TensorUsageRecord<uint2>> usage_records{ {uint2(2, 8), 0, 1}, {uint2(2, 8), 1, 2}, {uint2(1, 12), 2, 4}, {uint2(2, 8), 3, 5}, {uint2(8, 2), 4, 5}, {uint2(2, 8), 5, 7}, {uint2(1, 8), 6, 8}, {uint2(2, 8), 7, 8}, {uint2(4, 1), 8, 9}}; ObjectsAssignment<uint2> assignment; ASSERT_TRUE( AssignObjectsToTensors(usage_records, MemoryStrategy::NAIVE, &assignment) .ok()); EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8)); EXPECT_THAT(assignment.object_sizes, ElementsAre(uint2(2, 8), uint2(2, 8), uint2(1, 12), uint2(2, 8), uint2(8, 2), uint2(2, 8), uint2(1, 8), uint2(2, 8), uint2(4, 1))); ASSERT_TRUE(AssignObjectsToTensors(usage_records, MemoryStrategy::EQUALITY, &assignment) .ok()); EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 0, 3, 1, 4, 0, 5)); EXPECT_THAT(assignment.object_sizes, ElementsAre(uint2(2, 8), uint2(2, 8), uint2(1, 12), uint2(8, 2), uint2(1, 8), uint2(4, 1))); ASSERT_TRUE(AssignObjectsToTensors( usage_records, MemoryStrategy::GREEDY_IN_ORDER, &assignment) .ok()); EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 0, 3, 1, 2, 0, 3)); EXPECT_THAT(assignment.object_sizes, ElementsAre(uint2(2, 8), uint2(2, 8), uint2(1, 12), uint2(8, 2))); } TEST(Model, UInt3Records) { std::vector<TensorUsageRecord<uint3>> usage_records{ {uint3(1, 2, 8), 0, 1}, {uint3(4, 3, 2), 1, 2}, {uint3(1, 1, 1), 2, 4}, {uint3(2, 4, 1), 3, 5}, {uint3(2, 2, 2), 4, 5}, {uint3(8, 1, 2), 5, 7}, {uint3(1, 2, 1), 6, 8}, {uint3(1, 1, 1), 7, 8}, {uint3(2, 2, 2), 8, 9}}; ObjectsAssignment<uint3> assignment; ASSERT_TRUE( AssignObjectsToTensors(usage_records, MemoryStrategy::NAIVE, &assignment) .ok()); EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 3, 4, 5, 6, 7, 8)); EXPECT_THAT(assignment.object_sizes, ElementsAre(uint3(1, 2, 8), uint3(4, 3, 2), uint3(1, 1, 1), uint3(2, 4, 1), uint3(2, 2, 2), uint3(8, 1, 2), uint3(1, 2, 1), uint3(1, 1, 1), uint3(2, 2, 2))); ASSERT_TRUE(AssignObjectsToTensors(usage_records, MemoryStrategy::EQUALITY, &assignment) .ok()); EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 2, 3, 4, 5, 6, 2, 4)); EXPECT_THAT(assignment.object_sizes, ElementsAre(uint3(1, 2, 8), uint3(4, 3, 2), uint3(1, 1, 1), uint3(2, 4, 1), uint3(2, 2, 2), uint3(8, 1, 2), uint3(1, 2, 1))); ASSERT_TRUE(AssignObjectsToTensors( usage_records, MemoryStrategy::GREEDY_IN_ORDER, &assignment) .ok()); EXPECT_THAT(assignment.object_ids, ElementsAre(0, 1, 0, 2, 1, 3, 2, 0, 1)); EXPECT_THAT(assignment.object_sizes, ElementsAre(uint3(1, 2, 8), uint3(4, 3, 2), uint3(2, 4, 1), uint3(8, 1, 2))); } TEST(Model, OffsetAssignmentWithAlignment) { std::vector<TensorUsageRecord<size_t>> usage_records{ {16, 0, 1}, {8, 1, 2}, {64, 2, 3}, {32, 3, 4}, {8, 4, 5}, }; OffsetsAssignment offsets_assignment; ASSERT_TRUE(AssignOffsetsToTensors(usage_records, MemoryStrategy::GREEDY_BY_SIZE, &offsets_assignment, 128) .ok()); EXPECT_THAT(offsets_assignment.offsets, ElementsAre(0, 128, 0, 128, 0)); EXPECT_EQ(offsets_assignment.total_size, 160); } } } }
1,001
cpp
tensorflow/tensorflow
model_builder_helper
tensorflow/lite/delegates/gpu/common/model_builder_helper.cc
tensorflow/lite/delegates/gpu/common/model_builder_helper_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_MODEL_BUILDER_HELPER_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_MODEL_BUILDER_HELPER_H_ #include <stddef.h> #include <cstdint> #include <cstring> #include "absl/strings/str_cat.h" #include "tensorflow/lite/core/c/builtin_op_data.h" #include "tensorflow/lite/core/c/common.h" #include "tensorflow/lite/delegates/gpu/common/data_type.h" #include "tensorflow/lite/delegates/gpu/common/model.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/tensor.h" #include "tensorflow/lite/kernels/internal/reference/dequantize.h" #include "tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "tensorflow/lite/kernels/internal/types.h" #include "tensorflow/lite/kernels/kernel_util.h" namespace tflite { namespace gpu { absl::Status GetNodeAndRegistration(TfLiteContext* context, int node_id, TfLiteNode** tflite_node, TfLiteRegistration** registration); DataType ToDataType(TfLiteType type); absl::Status ExtractTensorShape(const TfLiteTensor& tflite_tensor, BHWC* bhwc); absl::Status ExtractAxisFromIndex(const TfLiteTensor& tflite_tensor, int index, Axis* axis); absl::Status ConvertTfLiteTensorToTensorRef(const TfLiteTensor& tflite_tensor, TensorRef<BHWC>* tensor_ref); absl::Status PopulateQuantParams(const TfLiteTensor& tensor, QuantizationParams* quant_params); int GetNumberOfRuntimeInputsForNode(const TfLiteContext* context, const TfLiteNode* tflite_node); int GetNumberOfConstInputsForNode(const TfLiteContext* context, const TfLiteNode* tflite_node); absl::Status CheckInputsOutputs(const TfLiteContext* context, const TfLiteNode* tflite_node, int runtime_inputs, int outputs); absl::Status CheckInputsConstsOutputs(const TfLiteContext* context, const TfLiteNode* tflite_node, int runtime_inputs, int const_inputs, int outputs); void ConvertFloat16ToFloat32(size_t num_elements, const uint16_t* src, float* dst); template <typename T> inline void DequantizeConstantTensor(const TfLiteTensor& tensor, const T* source_data, float* dequantized_data) { TfLiteAffineQuantization* quant_params = static_cast<TfLiteAffineQuantization*>(tensor.quantization.params); if (quant_params->scale->size > 1) { PerChannelDequantizationParams op_params; op_params.zero_point = quant_params->zero_point->data; op_params.scale = quant_params->scale->data; op_params.quantized_dimension = quant_params->quantized_dimension; reference_ops::PerChannelDequantize(op_params, GetTensorShape(&tensor), source_data, GetTensorShape(&tensor), dequantized_data); } else { DequantizationParams op_params; op_params.zero_point = tensor.params.zero_point; op_params.scale = tensor.params.scale; reference_ops::Dequantize(op_params, GetTensorShape(&tensor), source_data, GetTensorShape(&tensor), dequantized_data); } } template <typename T> absl::Status CreateVectorCopyData(const TfLiteTensor& src, T* dst) { if (src.bytes % sizeof(T) != 0) { return absl::InvalidArgumentError( absl::StrCat("Input data size ", src.bytes, " is not aligned to expected type: ", sizeof(T))); } if (const int n = tflite::NumElements(&src); n * sizeof(T) == src.bytes) { std::memcpy(dst, src.data.raw_const, src.bytes); return absl::OkStatus(); } else { switch (src.type) { case kTfLiteNoType: return absl::InvalidArgumentError("src has no type."); case kTfLiteFloat32: for (int i = 0; i < n; ++i) { dst[i] = tflite::GetTensorData<float>(&src)[i]; } return absl::OkStatus(); case kTfLiteInt32: for (int i = 0; i < n; ++i) { dst[i] = tflite::GetTensorData<int32_t>(&src)[i]; } return absl::OkStatus(); case kTfLiteUInt8: for (int i = 0; i < n; ++i) { dst[i] = tflite::GetTensorData<uint8_t>(&src)[i]; } return absl::OkStatus(); case kTfLiteInt64: for (int i = 0; i < n; ++i) { dst[i] = tflite::GetTensorData<int64_t>(&src)[i]; } return absl::OkStatus(); case kTfLiteString: return absl::UnimplementedError("src can't be string."); case kTfLiteBool: for (int i = 0; i < n; ++i) { dst[i] = tflite::GetTensorData<bool>(&src)[i]; } return absl::OkStatus(); case kTfLiteInt16: for (int i = 0; i < n; ++i) { dst[i] = tflite::GetTensorData<int16_t>(&src)[i]; } return absl::OkStatus(); case kTfLiteComplex64: return absl::UnimplementedError("src can't be complex64."); case kTfLiteInt8: for (int i = 0; i < n; ++i) { dst[i] = tflite::GetTensorData<int8_t>(&src)[i]; } return absl::OkStatus(); case kTfLiteFloat16: return absl::UnimplementedError("src can't be float16."); case kTfLiteBFloat16: return absl::UnimplementedError("src can't be bfloat16."); case kTfLiteFloat64: for (int i = 0; i < n; ++i) { dst[i] = tflite::GetTensorData<double>(&src)[i]; } return absl::OkStatus(); case kTfLiteComplex128: return absl::UnimplementedError("src can't be complex128."); case kTfLiteUInt64: for (int i = 0; i < n; ++i) { dst[i] = tflite::GetTensorData<uint64_t>(&src)[i]; } return absl::OkStatus(); case kTfLiteResource: return absl::UnimplementedError("src can't be resource."); case kTfLiteVariant: return absl::UnimplementedError("src can't be variant."); case kTfLiteUInt32: for (int i = 0; i < n; ++i) { dst[i] = tflite::GetTensorData<uint32_t>(&src)[i]; } return absl::OkStatus(); case kTfLiteUInt16: for (int i = 0; i < n; ++i) { dst[i] = tflite::GetTensorData<uint16_t>(&src)[i]; } return absl::OkStatus(); case kTfLiteInt4: return absl::UnimplementedError("src can't be int4."); } } } template <> absl::Status CreateVectorCopyData<float>(const TfLiteTensor& src, float* dst); absl::Status SetAllDimensions(const TfLiteIntArray* dimensions, Scalar* shape); absl::Status CheckIfLinearConvertible(const TfLiteIntArray* dimensions); absl::Status SetAllDimensions(const TfLiteIntArray* dimensions, Linear* shape); absl::Status SetAllDimensions(const TfLiteIntArray* dimensions, HWC* shape); absl::Status SetAllDimensions(const TfLiteIntArray* dimensions, HW* shape); absl::Status SetAllDimensions(const TfLiteIntArray* dimensions, OHWI* shape); absl::Status SetAllDimensions(const TfLiteIntArray* dimensions, BHWC* shape); absl::Status MaybeFuseActivation(TfLiteFusedActivation fused_activation, GraphFloat32* graph, Node* node); } } #endif #include "tensorflow/lite/delegates/gpu/common/model_builder_helper.h" #include <stddef.h> #include <cstdint> #include <cstring> #include <limits> #include <string> #include <vector> #include "fp16.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_join.h" #include "tensorflow/lite/context_util.h" #include "tensorflow/lite/core/c/builtin_op_data.h" #include "tensorflow/lite/core/c/common.h" #include "tensorflow/lite/delegates/gpu/common/data_type.h" #include "tensorflow/lite/delegates/gpu/common/model.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/tensor.h" #include "tensorflow/lite/kernels/kernel_util.h" namespace tflite { namespace gpu { namespace { absl::Status NewPassthroughNode(GraphFloat32* graph, Node* node, const Value* output, Node** passthru_node) { *passthru_node = graph->NewNode(); RETURN_IF_ERROR(graph->SetProducer((*passthru_node)->id, output->id)); Value* copy_output = graph->NewValue(); RETURN_IF_ERROR(graph->SetProducer(node->id, copy_output->id)); RETURN_IF_ERROR(graph->AddConsumer((*passthru_node)->id, copy_output->id)); copy_output->tensor = output->tensor; copy_output->tensor.ref = -1; return absl::OkStatus(); } } absl::Status GetNodeAndRegistration(TfLiteContext* context, int node_id, TfLiteNode** tflite_node, TfLiteRegistration** registration) { if (context->GetNodeAndRegistration(context, node_id, tflite_node, registration) != kTfLiteOk) { return absl::InvalidArgumentError(absl::StrCat( "Couldn't get node and registration info for op: ", node_id)); } return absl::OkStatus(); } DataType ToDataType(TfLiteType type) { switch (type) { case kTfLiteFloat32: return DataType::FLOAT32; case kTfLiteInt32: return DataType::INT32; case kTfLiteInt64: return DataType::INT64; case kTfLiteInt8: return DataType::INT8; case kTfLiteUInt8: return DataType::UINT8; case kTfLiteBool: return DataType::BOOL; default: return DataType::UNKNOWN; } } absl::Status ExtractTensorShape(const TfLiteTensor& tflite_tensor, BHWC* bhwc) { const TfLiteIntArray* dims = tflite_tensor.dims; switch (dims->size) { case 1: *bhwc = BHWC(dims->data[0], 1, 1, 1); return absl::OkStatus(); case 2: *bhwc = BHWC(dims->data[0], 1, 1, dims->data[1]); return absl::OkStatus(); case 3: *bhwc = BHWC(dims->data[0], 1, dims->data[1], dims->data[2]); return absl::OkStatus(); case 4: *bhwc = BHWC(dims->data[0], dims->data[1], dims->data[2], dims->data[3]); return absl::OkStatus(); default: return absl::InvalidArgumentError(absl::StrCat( "Tensor \"", tflite_tensor.name ? tflite_tensor.name : "nullptr", "\" has bad input dims size: ", dims->size, ".")); } } absl::Status ExtractAxisFromIndex(const TfLiteTensor& tflite_tensor, int index, Axis* axis) { const TfLiteIntArray* dims = tflite_tensor.dims; if (index < 0) { index = dims->size + index; } if (index < 0 || index >= dims->size) { return absl::OutOfRangeError("Index for axis out of range"); } std::vector<Axis> index_to_axis; switch (dims->size) { case 1: index_to_axis = {Axis::BATCH}; break; case 2: index_to_axis = {Axis::BATCH, Axis::CHANNELS}; break; case 3: index_to_axis = {Axis::BATCH, Axis::WIDTH, Axis::CHANNELS}; break; case 4: index_to_axis = {Axis::BATCH, Axis::HEIGHT, Axis::WIDTH, Axis::CHANNELS}; break; default: return absl::UnavailableError("Unknown layout."); } *axis = index_to_axis[index]; return absl::OkStatus(); } absl::Status ConvertTfLiteTensorToTensorRef(const TfLiteTensor& tflite_tensor, TensorRef<BHWC>* tensor_ref) { tensor_ref->type = ToDataType(tflite_tensor.type); return ExtractTensorShape(tflite_tensor, &tensor_ref->shape); } absl::Status PopulateQuantParams(const TfLiteTensor& tensor, QuantizationParams* quant_params) { const TfLiteQuantization& quant = tensor.quantization; if (quant.type != TfLiteQuantizationType::kTfLiteAffineQuantization) { return absl::InvalidArgumentError( absl::StrCat("Tensor not quantized: ", std::string(tensor.name))); } const TfLiteAffineQuantization* params = static_cast<const TfLiteAffineQuantization*>(quant.params); if (params->scale->size > 1) { return absl::InvalidArgumentError( absl::StrCat("Non-constant per-channel quantized tensor: ", std::string(tensor.name))); } const float scale = params->scale->data[0]; const float zero_point = static_cast<float>(params->zero_point->data[0]); float qmin_value = 0; float qmax_value = 0; if (tensor.type == kTfLiteUInt8) { qmin_value = static_cast<float>(std::numeric_limits<uint8_t>::min()); qmax_value = static_cast<float>(std::numeric_limits<uint8_t>::max()); } else if (tensor.type == kTfLiteInt8) { qmin_value = static_cast<float>(std::numeric_limits<int8_t>::min()); qmax_value = static_cast<float>(std::numeric_limits<int8_t>::max()); } else { return absl::InvalidArgumentError(absl::StrCat( "Type invalid for quantized tensor: ", std::string(tensor.name))); } quant_params->min = scale * (static_cast<float>(qmin_value) - zero_point); quant_params->max = scale * (static_cast<float>(qmax_value) - zero_point); quant_params->scale = scale; return absl::OkStatus(); } int GetNumberOfRuntimeInputsForNode(const TfLiteContext* context, const TfLiteNode* tflite_node) { int number_of_runtime_inputs = 0; for (int i = 0; i < NumInputs(tflite_node); i++) { const TfLiteTensor* tensor = GetOptionalInputTensor(context, tflite_node, i); if (tensor != nullptr && !IsConstantTensor(tensor)) { number_of_runtime_inputs++; } } return number_of_runtime_inputs; } int GetNumberOfConstInputsForNode(const TfLiteContext* context, const TfLiteNode* tflite_node) { return NumInputs(tflite_node) - GetNumberOfRuntimeInputsForNode(context, tflite_node); } absl::Status CheckInputsOutputs(const TfLiteContext* context, const TfLiteNode* tflite_node, int runtime_inputs, int outputs) { const int runtime_inputs_from_model = GetNumberOfRuntimeInputsForNode(context, tflite_node); if (runtime_inputs_from_model != runtime_inputs) { return absl::InternalError(absl::StrCat( "Expected ", runtime_inputs, " runtime input tensor(s), but node has ", runtime_inputs_from_model, " runtime input(s).")); } const int outputs_from_model = NumOutputs(tflite_node); if (outputs_from_model != outputs) { return absl::InternalError(absl::StrCat("Expected ", outputs, " output tensor(s), but node has ", outputs_from_model, " output(s).")); } return absl::OkStatus(); } absl::Status CheckInputsConstsOutputs(const TfLiteContext* context, const TfLiteNode* tflite_node, int runtime_inputs, int const_inputs, int outputs) { const int const_inputs_from_model = GetNumberOfConstInputsForNode(context, tflite_node); if (const_inputs_from_model != const_inputs) { return absl::InternalError(absl::StrCat( "Expected ", const_inputs, " const input tensor(s), but node has ", const_inputs_from_model, " const input(s).")); } return CheckInputsOutputs(context, tflite_node, runtime_inputs, outputs); } void ConvertFloat16ToFloat32(size_t num_elements, const uint16_t* src, float* dst) { for (size_t i = 0; i < num_elements; i++) { *dst++ = fp16_ieee_to_fp32_value(*src++); } } template <> absl::Status CreateVectorCopyData<float>(const TfLiteTensor& src, float* dst) { switch (src.type) { case kTfLiteFloat32: std::memcpy(dst, src.data.f, src.bytes); return absl::OkStatus(); case kTfLiteFloat16: ConvertFloat16ToFloat32(NumElements(&src), reinterpret_cast<uint16_t const*>(src.data.f16), dst); return absl::OkStatus(); case kTfLiteInt8: DequantizeConstantTensor(src, src.data.int8, dst); return absl::OkStatus(); case kTfLiteUInt8: DequantizeConstantTensor(src, src.data.uint8, dst); return absl::OkStatus(); case kTfLiteInt32: DequantizeConstantTensor(src, src.data.i32, dst); return absl::OkStatus(); default: return absl::InvalidArgumentError( "Unsupported data type for float32 tensor"); } } std::string GetDimensionString(const TfLiteIntArray* dimensions) { return absl::StrJoin(TfLiteIntArrayView(dimensions), "x"); } absl::Status SetAllDimensions(const TfLiteIntArray* dimensions, Scalar* shape) { if (dimensions->size < 0) { return absl::InvalidArgumentError("Invalid Scalar dimensions"); } for (int i = 0; i < dimensions->size; ++i) { if (dimensions->data[i] != 1) { return absl::InvalidArgumentError(absl::StrCat( GetDimensionString(dimensions), " cannot be reduced to scalar.")); } } shape->v = 1; return absl::OkStatus(); } absl::Status CheckIfLinearConvertible(const TfLiteIntArray* dimensions) { if (dimensions->size <= 0) { return absl::InvalidArgumentError("Dimension is empty."); } for (int i = 0; i < dimensions->size - 1; ++i) { if (dimensions->data[i] != 1) { return absl::InvalidArgumentError(absl::StrCat( GetDimensionString(dimensions), " cannot be reduced to linear.")); } } return absl::OkStatus(); } absl::Status SetAllDimensions(const TfLiteIntArray* dimensions, Linear* shape) { RETURN_IF_ERROR(CheckIfLinearConvertible(dimensions)); shape->v = dimensions->data[dimensions->size - 1]; return absl::OkStatus(); } absl::Status SetAllDimensions(const TfLiteIntArray* dimensions, HWC* shape) { if (dimensions->size == 3) { shape->h = dimensions->data[0]; shape->w = dimensions->data[1]; shape->c = dimensions->data[2]; return absl::OkStatus(); } if (dimensions->size == 4) { if (dimensions->data[0] != 1) { return absl::UnimplementedError("Batch size is not equal to 1."); } shape->h = dimensions->data[1]; shape->w = dimensions->data[2]; shape->c = dimensions->data[3]; return absl::OkStatus(); } return absl::InvalidArgumentError( absl::StrCat("Expected a 3D tensor of shape HxWxC or a 4D tensor of " "shape 1xHxWxC but got ", GetDimensionString(dimensions))); } absl::Status SetAllDimensions(const TfLiteIntArray* dimensions, HW* shape) { if (dimensions->size != 2) { return absl::InvalidArgumentError( absl::StrCat("Expected a 2D tensor of shape HxW but got ", GetDimensionString(dimensions))); } shape->h = dimensions->data[0]; shape->w = dimensions->data[1]; return absl::OkStatus(); } absl::Status SetAllDimensions(const TfLiteIntArray* dimensions, OHWI* shape) { if (dimensions->size != 4) { return absl::InvalidArgumentError( absl::StrCat("Expected a 4D tensor of shape OxHxWxI but got ", GetDimensionString(dimensions))); } shape->o = dimensions->data[0]; shape->h = dimensions->data[1]; shape->w = dimensions->data[2]; shape->i = dimensions->data[3]; return absl::OkStatus(); } absl::Status SetAllDimensions(const TfLiteIntArray* dimensions, BHWC* shape) { if (dimensions->size != 4) { return absl::InvalidArgumentError( absl::StrCat("Expected a 4D tensor of shape BxHxWxC but got ", GetDimensionString(dimensions))); } shape->b = dimensions->data[0]; shape->h = dimensions->data[1]; shape->w = dimensions->data[2]; shape->c = dimensions->data[3]; return absl::OkStatus(); } absl::Status MaybeFuseActivation(TfLiteFusedActivation fused_activation, GraphFloat32* graph, Node* node) { const auto outputs = graph->FindOutputs(node->id); if (outputs.size() != 1) { return absl::InternalError("Number of outputs != 1"); } switch (fused_activation) { case kTfLiteActNone: return absl::OkStatus(); case kTfLiteActRelu: case kTfLiteActReluN1To1: case kTfLiteActRelu6: { ReLUAttributes attr; attr.activation_max = fused_activation == kTfLiteActRelu ? 0.0f : (fused_activation == kTfLiteActReluN1To1 ? 1.0f : 6.0f); attr.activation_min = fused_activation == kTfLiteActReluN1To1 ? -1.0f : 0.0f; Node* activation_node; RETURN_IF_ERROR( NewPassthroughNode(graph, node, outputs[0], &activation_node)); activation_node->operation.type = ToString(OperationType::RELU); activation_node->operation.attributes = attr; return absl::OkStatus(); } case kTfLiteActTanh: { Node* activation_node; RETURN_IF_ERROR( NewPassthroughNode(graph, node, outputs[0], &activation_node)); activation_node->operation.type = ToString(OperationType::TANH); return absl::OkStatus(); } case kTfLiteActSigmoid: { Node* activation_node; RETURN_IF_ERROR( NewPassthroughNode(graph, node, outputs[0], &activation_node)); activation_node->operation.type = ToString(OperationType::SIGMOID); return absl::OkStatus(); } break; default: return absl::NotFoundError( absl::StrCat("Unsupported fused activation: ", fused_activation)); } } } }
#include "tensorflow/lite/delegates/gpu/common/model_builder_helper.h" #include <cstdint> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/lite/core/c/common.h" namespace tflite { namespace gpu { namespace { using ::testing::ElementsAre; TEST(ModelBuilderHelperTest, CreateVectorCopyDataDifferentSize) { TfLiteTensor tflite_tensor; tflite_tensor.type = kTfLiteInt32; int32_t src_data[4] = {1, 2, 3, 4}; tflite_tensor.data.i32 = src_data; tflite_tensor.dims = TfLiteIntArrayCreate(1); tflite_tensor.dims->data[0] = sizeof(src_data) / sizeof(src_data[0]); tflite_tensor.bytes = sizeof(src_data); int16_t dst[4]; ASSERT_OK(CreateVectorCopyData(tflite_tensor, dst)); EXPECT_THAT(dst, ElementsAre(1, 2, 3, 4)); TfLiteIntArrayFree(tflite_tensor.dims); } } } }
1,002
cpp
tensorflow/tensorflow
internal
tensorflow/lite/delegates/gpu/common/memory_management/internal.cc
tensorflow/lite/delegates/gpu/common/memory_management/internal_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_MEMORY_MANAGEMENT_INTERNAL_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_MEMORY_MANAGEMENT_INTERNAL_H_ #include <stddef.h> #include <limits> #include <vector> #include "absl/memory/memory.h" #include "tensorflow/lite/delegates/gpu/common/memory_management/types.h" #include "tensorflow/lite/delegates/gpu/common/types.h" namespace tflite { namespace gpu { const size_t kNotAssigned = std::numeric_limits<size_t>::max(); template <typename TensorSizeT> struct TensorUsageWithIndex { const TensorUsageRecord<TensorSizeT>* usage_record; size_t idx; TensorUsageWithIndex(const TensorUsageRecord<TensorSizeT>* usage_record, size_t idx) : usage_record(usage_record), idx(idx) {} }; bool CompareBySize(const TensorUsageWithIndex<size_t>& first, const TensorUsageWithIndex<size_t>& second); using TaskProfile = std::vector<TensorUsageWithIndex<size_t>>; bool IsCoveringObject(const uint2& first_object, const uint2& second_object); bool IsCoveringObject(const uint3& first_object, const uint3& second_object); size_t AbsDiffInElements(const size_t first_size, const size_t second_size); size_t AbsDiffInElements(const uint2& first_size, const uint2& second_size); size_t AbsDiffInElements(const uint3& first_size, const uint3& second_size); template <typename ObjectSizeT> struct PoolRecord { PoolRecord(ObjectSizeT size, size_t obj_id) : object_size(size), object_id(obj_id) {} bool operator<(const PoolRecord& other) const { return (object_size < other.object_size) || (object_size == other.object_size && object_id < other.object_id); } ObjectSizeT object_size; size_t object_id; }; struct QueueRecord { QueueRecord(TaskId task_id, size_t obj_id) : last_task(task_id), object_id(obj_id) {} bool operator<(const QueueRecord& other) const { return (last_task > other.last_task) || (last_task == other.last_task && object_id > other.object_id); } TaskId last_task; size_t object_id; }; std::vector<TaskProfile> CalculateTaskProfiles( const std::vector<TensorUsageRecord<size_t>>& usage_records); std::vector<size_t> CalculatePositionalMaximums( const std::vector<TensorUsageRecord<size_t>>& usage_records); } } #endif #include "tensorflow/lite/delegates/gpu/common/memory_management/internal.h" #include <algorithm> #include <cstddef> #include <vector> #include "tensorflow/lite/delegates/gpu/common/memory_management/types.h" #include "tensorflow/lite/delegates/gpu/common/types.h" namespace tflite { namespace gpu { bool CompareBySize(const TensorUsageWithIndex<size_t>& first, const TensorUsageWithIndex<size_t>& second) { return first.usage_record->tensor_size > second.usage_record->tensor_size; } bool IsCoveringObject(const uint2& first_object, const uint2& second_object) { return first_object.x >= second_object.x && first_object.y >= second_object.y; } bool IsCoveringObject(const uint3& first_object, const uint3& second_object) { return first_object.x >= second_object.x && first_object.y >= second_object.y && first_object.z >= second_object.z; } size_t AbsDiffInElements(const size_t first_size, const size_t second_size) { return first_size >= second_size ? first_size - second_size : second_size - first_size; } size_t AbsDiffInElements(const uint2& first_size, const uint2& second_size) { const size_t first_elements_cnt = first_size.y * first_size.x; const size_t second_elements_cnt = second_size.y * second_size.x; return first_elements_cnt >= second_elements_cnt ? first_elements_cnt - second_elements_cnt : second_elements_cnt - first_elements_cnt; } size_t AbsDiffInElements(const uint3& first_size, const uint3& second_size) { const size_t first_elements_cnt = first_size.z * first_size.y * first_size.x; const size_t second_elements_cnt = second_size.z * second_size.y * second_size.x; return first_elements_cnt >= second_elements_cnt ? first_elements_cnt - second_elements_cnt : second_elements_cnt - first_elements_cnt; } std::vector<TaskProfile> CalculateTaskProfiles( const std::vector<TensorUsageRecord<size_t>>& usage_records) { TaskId num_tasks = 0; for (size_t i = 0; i < usage_records.size(); ++i) { num_tasks = std::max(num_tasks, usage_records[i].last_task + 1); } std::vector<TaskProfile> task_profiles(num_tasks); for (size_t rec_id = 0; rec_id < usage_records.size(); ++rec_id) { for (TaskId task_id = usage_records[rec_id].first_task; task_id <= usage_records[rec_id].last_task; ++task_id) { task_profiles[task_id].emplace_back(&usage_records[rec_id], rec_id); } } for (auto& task_profile : task_profiles) { std::stable_sort(task_profile.begin(), task_profile.end(), CompareBySize); } return task_profiles; } std::vector<size_t> CalculatePositionalMaximums( const std::vector<TensorUsageRecord<size_t>>& usage_records) { std::vector<TaskProfile> task_profiles = CalculateTaskProfiles(usage_records); std::vector<size_t> positional_max; for (const auto& task_profile : task_profiles) { size_t i = 0; for (; i < task_profile.size() && i < positional_max.size(); ++i) { positional_max[i] = std::max(positional_max[i], task_profile[i].usage_record->tensor_size); } for (; i < task_profile.size(); ++i) { positional_max.push_back(task_profile[i].usage_record->tensor_size); } } return positional_max; } } }
#include "tensorflow/lite/delegates/gpu/common/memory_management/internal.h" #include <cstddef> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/lite/delegates/gpu/common/memory_management/types.h" namespace tflite { namespace gpu { namespace { using ::testing::ElementsAre; TEST(TaskProfileTest, EmptyRecords) { std::vector<TaskProfile> task_profiles = CalculateTaskProfiles({}); EXPECT_TRUE(task_profiles.empty()); std::vector<size_t> positional_max = CalculatePositionalMaximums({}); EXPECT_TRUE(positional_max.empty()); } TEST(TaskProfileTest, OneRecord) { std::vector<TensorUsageRecord<size_t>> usage_records{ {16, 0, 1}}; const std::vector<std::vector<size_t>> correct_idx = {{0}, {0}}; std::vector<TaskProfile> task_profiles = CalculateTaskProfiles(usage_records); ASSERT_EQ(task_profiles.size(), correct_idx.size()); for (size_t i = 0; i < task_profiles.size(); ++i) { ASSERT_EQ(task_profiles[i].size(), correct_idx[i].size()); for (size_t j = 0; j < task_profiles[i].size(); ++j) { ASSERT_EQ(task_profiles[i][j].usage_record, &usage_records[correct_idx[i][j]]); ASSERT_EQ(task_profiles[i][j].idx, correct_idx[i][j]); } } std::vector<size_t> positional_max = CalculatePositionalMaximums(usage_records); EXPECT_THAT(positional_max, ElementsAre(16)); } TEST(TaskProfileTest, ChainRecords) { std::vector<TensorUsageRecord<size_t>> usage_records{ {16, 0, 1}, {8, 1, 2}, {64, 2, 3}, {32, 3, 4}, {8, 4, 5}, }; const std::vector<std::vector<size_t>> correct_idx = {{0}, {0, 1}, {2, 1}, {2, 3}, {3, 4}, {4}}; std::vector<TaskProfile> task_profiles = CalculateTaskProfiles(usage_records); ASSERT_EQ(task_profiles.size(), correct_idx.size()); for (size_t i = 0; i < task_profiles.size(); ++i) { ASSERT_EQ(task_profiles[i].size(), correct_idx[i].size()); for (size_t j = 0; j < task_profiles[i].size(); ++j) { ASSERT_EQ(task_profiles[i][j].usage_record, &usage_records[correct_idx[i][j]]); ASSERT_EQ(task_profiles[i][j].idx, correct_idx[i][j]); } } std::vector<size_t> positional_max = CalculatePositionalMaximums(usage_records); EXPECT_THAT(positional_max, ElementsAre(64, 32)); } TEST(TaskProfileTest, ComplexRecords) { std::vector<TensorUsageRecord<size_t>> usage_records{ {32, 0, 1}, {32, 1, 4}, {8, 2, 5}, {16, 3, 5}, {8, 4, 5}, {64, 5, 7}, {8, 6, 8}, {8, 7, 8}, {16, 8, 9}}; const std::vector<std::vector<size_t>> correct_idx = { {0}, {0, 1}, {1, 2}, {1, 3, 2}, {1, 3, 2, 4}, {5, 3, 2, 4}, {5, 6}, {5, 6, 7}, {8, 6, 7}, {8}}; std::vector<TaskProfile> task_profiles = CalculateTaskProfiles(usage_records); ASSERT_EQ(task_profiles.size(), correct_idx.size()); for (size_t i = 0; i < task_profiles.size(); ++i) { ASSERT_EQ(task_profiles[i].size(), correct_idx[i].size()); for (size_t j = 0; j < task_profiles[i].size(); ++j) { ASSERT_EQ(task_profiles[i][j].usage_record, &usage_records[correct_idx[i][j]]); ASSERT_EQ(task_profiles[i][j].idx, correct_idx[i][j]); } } std::vector<size_t> positional_max = CalculatePositionalMaximums(usage_records); EXPECT_THAT(positional_max, ElementsAre(64, 32, 8, 8)); } } } }
1,003
cpp
tensorflow/tensorflow
make_padding
tensorflow/lite/delegates/gpu/common/transformations/make_padding.cc
tensorflow/lite/delegates/gpu/common/transformations/make_padding_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TRANSFORMATIONS_MAKE_PADDING_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TRANSFORMATIONS_MAKE_PADDING_H_ #include <memory> #include "tensorflow/lite/delegates/gpu/common/model_transformer.h" namespace tflite { namespace gpu { std::unique_ptr<NodeTransformation> NewMakePaddingFromConcat(); } } #endif #include "tensorflow/lite/delegates/gpu/common/transformations/make_padding.h" #include <memory> #include <string> #include <vector> #include "absl/memory/memory.h" #include "absl/strings/string_view.h" #include "absl/types/any.h" #include "tensorflow/lite/delegates/gpu/common/model.h" #include "tensorflow/lite/delegates/gpu/common/model_transformer.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/tensor.h" namespace tflite { namespace gpu { namespace { bool IsConstZeros(const Node& node) { if (node.operation.type != ToString(OperationType::CONSTANT)) { return false; } auto& attr = absl::any_cast<const ConstTensorAttributes&>(node.operation.attributes); for (auto f : attr.tensor.data) { if (f != 0) { return false; } } return true; } class MakePaddingFromZerosConcat : public NodeTransformation { public: TransformResult ApplyToNode(Node* node, GraphFloat32* graph) final { if (node->operation.type != ToString(OperationType::CONCAT)) { return {TransformStatus::SKIPPED, ""}; } auto inputs = graph->FindInputs(node->id); if (inputs.size() != 2) { return {TransformStatus::SKIPPED, ""}; } bool first = true; for (auto input : inputs) { auto dep = graph->FindProducer(input->id); if (dep != nullptr && IsConstZeros(*dep)) { auto& concat_attr = absl::any_cast<const ConcatAttributes&>(node->operation.attributes); PadAttributes pad_attr; pad_attr.type = PaddingContentType::ZEROS; pad_attr.appended = BHWC(0, 0, 0, 0); pad_attr.prepended = BHWC(0, 0, 0, 0); BHWC* p = first ? &pad_attr.prepended : &pad_attr.appended; switch (concat_attr.axis) { case Axis::HEIGHT: p->h = input->tensor.shape.h; break; case Axis::WIDTH: p->w = input->tensor.shape.w; break; case Axis::CHANNELS: p->c = input->tensor.shape.c; break; default: return {TransformStatus::DECLINED, "Padding for concat axis is unsupported: " + ToString(concat_attr.axis)}; } absl::Status status = RemovePrecedingNode(graph, dep, node); if (!status.ok()) { return {TransformStatus::INVALID, "Unable to remove const node: " + std::string(status.message())}; } node->operation.attributes = pad_attr; node->operation.type = ToString(OperationType::PAD); return {TransformStatus::APPLIED, "Replaced concat with padding"}; } first = false; } return {TransformStatus::SKIPPED, ""}; } }; } std::unique_ptr<NodeTransformation> NewMakePaddingFromConcat() { return absl::make_unique<MakePaddingFromZerosConcat>(); } } }
#include "tensorflow/lite/delegates/gpu/common/transformations/make_padding.h" #include <memory> #include <string> #include <vector> #include <gtest/gtest.h> #include "absl/status/status.h" #include "absl/types/any.h" #include "tensorflow/lite/delegates/gpu/common/model.h" #include "tensorflow/lite/delegates/gpu/common/model_transformer.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/common/tensor.h" namespace tflite { namespace gpu { namespace { TEST(MakePadding, Smoke) { GraphFloat32 graph; auto input = graph.NewValue(); input->tensor.shape = BHWC(1, 2, 3, 5); auto concat_node = graph.NewNode(); ASSERT_TRUE(graph.AddConsumer(concat_node->id, input->id).ok()); concat_node->operation.type = ToString(OperationType::CONCAT); ConcatAttributes attr; attr.axis = Axis::HEIGHT; concat_node->operation.attributes = attr; Value* output = nullptr; ASSERT_TRUE(AddOutput(&graph, concat_node, &output).ok()); output->tensor.shape = BHWC(1, 7, 3, 5); auto const_node = graph.NewNode(); const_node->operation.type = ToString(OperationType::CONSTANT); ConstTensorAttributes const_attr; const_attr.tensor.shape = BHWC(1, 5, 3, 5); const_attr.tensor.data = std::vector<float>(const_attr.tensor.shape.DimensionsProduct(), 0); const_node->operation.attributes = const_attr; Value* const_link = nullptr; ASSERT_TRUE( ConnectTwoNodes(&graph, const_node, concat_node, &const_link).ok()); const_link->tensor.shape = const_attr.tensor.shape; ASSERT_EQ(2, graph.nodes().size()); auto transformation = NewMakePaddingFromConcat(); ModelTransformer transformer(&graph); transformer.Apply("make_padding", transformation.get()); ASSERT_EQ(1, graph.nodes().size()); ASSERT_EQ(2, graph.values().size()); auto pad_node = graph.nodes()[0]; ASSERT_EQ(ToString(OperationType::PAD), pad_node->operation.type); auto pad_attr = absl::any_cast<PadAttributes>(pad_node->operation.attributes); EXPECT_EQ(BHWC(0, 0, 0, 0), pad_attr.prepended); EXPECT_EQ(BHWC(0, 5, 0, 0), pad_attr.appended); } } } }
1,004
cpp
tensorflow/tensorflow
fuse_add_to_conv
tensorflow/lite/delegates/gpu/common/transformations/fuse_add_to_conv.cc
tensorflow/lite/delegates/gpu/common/transformations/fuse_add_to_conv_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TRANSFORMATIONS_FUSE_ADD_TO_CONV_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TRANSFORMATIONS_FUSE_ADD_TO_CONV_H_ #include <memory> #include "tensorflow/lite/delegates/gpu/common/model_transformer.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" namespace tflite { namespace gpu { std::unique_ptr<SequenceTransformation> NewMergeConvolutionWithAdd(); std::unique_ptr<SequenceTransformation> NewMergeAddWithConvolution(); void FuseConvolution2DWithAdd(const ElementwiseAttributes& add_attr, Convolution2DAttributes* attr); void FuseDepthwiseConvolution2DWithAdd(const ElementwiseAttributes& add_attr, DepthwiseConvolution2DAttributes* attr); void FuseConvolutionTransposedWithAdd(const ElementwiseAttributes& add_attr, ConvolutionTransposedAttributes* attr); void FuseFullyConnectedWithAdd(const ElementwiseAttributes& add_attr, FullyConnectedAttributes* attr); } } #endif #include "tensorflow/lite/delegates/gpu/common/transformations/fuse_add_to_conv.h" #include <any> #include <memory> #include <string> #include <variant> #include <vector> #include "absl/strings/string_view.h" #include "tensorflow/lite/delegates/gpu/common/data_type.h" #include "tensorflow/lite/delegates/gpu/common/model.h" #include "tensorflow/lite/delegates/gpu/common/model_transformer.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/tensor.h" namespace tflite { namespace gpu { namespace { void FuseBiasWithAddAttributes(const ElementwiseAttributes& add_attr, const int channels, Tensor<Linear, DataType::FLOAT32>* bias) { auto add = absl::get_if<Tensor<Linear, DataType::FLOAT32>>(&add_attr.param); auto add_scalar = absl::get_if<float>(&add_attr.param); if (bias->data.empty()) { *bias = MakeZeroTensor<Linear, DataType::FLOAT32>(Linear(channels)); } for (int d = 0; d < channels; ++d) { bias->data[d] += add ? add->data[d] : *add_scalar; } } class MergeConvolutionWithAdd : public SequenceTransformation { public: int ExpectedSequenceLength() const final { return 2; } TransformResult ApplyToNodesSequence(const std::vector<Node*>& sequence, GraphFloat32* graph) final { auto& conv_node = *sequence[0]; if (graph->FindInputs(conv_node.id).size() != 1) { return {TransformStatus::DECLINED, "This fusion is only applicable to ops with one runtime input."}; } auto& add_node = *sequence[1]; if (add_node.operation.type != ToString(OperationType::ADD)) { return {TransformStatus::SKIPPED, ""}; } ElementwiseAttributes add_attr = absl::any_cast<ElementwiseAttributes>(add_node.operation.attributes); if (!absl::holds_alternative<Tensor<Linear, DataType::FLOAT32>>( add_attr.param) && !absl::holds_alternative<float>(add_attr.param)) { return {TransformStatus::DECLINED, "This fuse applicable only for broadcast or scalar addition."}; } if (conv_node.operation.type == ToString(OperationType::CONVOLUTION_2D)) { Convolution2DAttributes* conv_attr = absl::any_cast<Convolution2DAttributes>( &conv_node.operation.attributes); FuseConvolution2DWithAdd(add_attr, conv_attr); } else if (conv_node.operation.type == ToString(OperationType::CONVOLUTION_TRANSPOSED)) { ConvolutionTransposedAttributes* conv_attr = absl::any_cast<ConvolutionTransposedAttributes>( &conv_node.operation.attributes); FuseConvolutionTransposedWithAdd(add_attr, conv_attr); } else if (conv_node.operation.type == ToString(OperationType::DEPTHWISE_CONVOLUTION)) { DepthwiseConvolution2DAttributes* conv_attr = absl::any_cast<DepthwiseConvolution2DAttributes>( &conv_node.operation.attributes); FuseDepthwiseConvolution2DWithAdd(add_attr, conv_attr); } else if (conv_node.operation.type == ToString(OperationType::FULLY_CONNECTED)) { FullyConnectedAttributes* conv_attr = absl::any_cast<FullyConnectedAttributes>( &conv_node.operation.attributes); FuseFullyConnectedWithAdd(add_attr, conv_attr); } else { return {TransformStatus::SKIPPED, ""}; } absl::Status status = RemoveFollowingNode(graph, &add_node, &conv_node); if (!status.ok()) { return {TransformStatus::INVALID, "Unable to remove add node after convolution: " + std::string(status.message())}; } return {TransformStatus::APPLIED, ""}; } }; void FuseAddWithConvolution2D(const ElementwiseAttributes& add_attr, Convolution2DAttributes* attr) { auto add = absl::get_if<Tensor<Linear, DataType::FLOAT32>>(&add_attr.param); auto add_scalar = absl::get_if<float>(&add_attr.param); if (attr->bias.data.empty()) { attr->bias = MakeZeroTensor<Linear, DataType::FLOAT32>( Linear(attr->weights.shape.o)); } for (int d = 0; d < attr->weights.shape.o; ++d) { float sum = 0.0f; for (int s = 0; s < attr->weights.shape.i; ++s) { const float add_value = add ? add->data[s] : *add_scalar; for (int k_y = 0; k_y < attr->weights.shape.h; ++k_y) { for (int k_x = 0; k_x < attr->weights.shape.w; ++k_x) { const int index = attr->weights.shape.LinearIndex({{d, k_y, k_x, s}}); sum += add_value * attr->weights.data[index]; } } } attr->bias.data[d] += sum; } } class MergeAddWithConvolution : public SequenceTransformation { public: int ExpectedSequenceLength() const final { return 2; } TransformResult ApplyToNodesSequence(const std::vector<Node*>& sequence, GraphFloat32* graph) final { auto& conv_node = *sequence[1]; if (graph->FindInputs(conv_node.id).size() != 1) { return {TransformStatus::DECLINED, "This fusion is only applicable to ops with one runtime input."}; } auto& add_node = *sequence[0]; if (add_node.operation.type != ToString(OperationType::ADD)) { return {TransformStatus::SKIPPED, ""}; } ElementwiseAttributes add_attr = absl::any_cast<ElementwiseAttributes>(add_node.operation.attributes); if (!absl::holds_alternative<Tensor<Linear, DataType::FLOAT32>>( add_attr.param) && !absl::holds_alternative<float>(add_attr.param)) { return {TransformStatus::DECLINED, "This fuse applicable only for broadcast or scalar addition."}; } if (conv_node.operation.type == ToString(OperationType::CONVOLUTION_2D)) { Convolution2DAttributes* conv_attr = absl::any_cast<Convolution2DAttributes>( &conv_node.operation.attributes); if (conv_attr->groups != 1) { return {TransformStatus::DECLINED, "This fuse not applicable for grouped convolution."}; } if (conv_attr->padding.appended.w != 0 || conv_attr->padding.appended.h != 0 || conv_attr->padding.prepended.w != 0 || conv_attr->padding.prepended.h != 0) { return {TransformStatus::DECLINED, "This fuse applicable only for convolution that do not read " "out of bound elements."}; } FuseAddWithConvolution2D(add_attr, conv_attr); } else { return {TransformStatus::SKIPPED, ""}; } absl::Status status = RemovePrecedingNode(graph, &add_node, &conv_node); if (!status.ok()) { return {TransformStatus::INVALID, "Unable to remove mul node after convolution: " + std::string(status.message())}; } return {TransformStatus::APPLIED, ""}; } }; } std::unique_ptr<SequenceTransformation> NewMergeConvolutionWithAdd() { return absl::make_unique<MergeConvolutionWithAdd>(); } std::unique_ptr<SequenceTransformation> NewMergeAddWithConvolution() { return absl::make_unique<MergeAddWithConvolution>(); } void FuseConvolution2DWithAdd(const ElementwiseAttributes& add_attr, Convolution2DAttributes* attr) { FuseBiasWithAddAttributes(add_attr, attr->weights.shape.o, &attr->bias); } void FuseDepthwiseConvolution2DWithAdd(const ElementwiseAttributes& add_attr, DepthwiseConvolution2DAttributes* attr) { FuseBiasWithAddAttributes( add_attr, attr->weights.shape.o * attr->weights.shape.i, &attr->bias); } void FuseConvolutionTransposedWithAdd(const ElementwiseAttributes& add_attr, ConvolutionTransposedAttributes* attr) { FuseBiasWithAddAttributes(add_attr, attr->weights.shape.o, &attr->bias); } void FuseFullyConnectedWithAdd(const ElementwiseAttributes& add_attr, FullyConnectedAttributes* attr) { FuseBiasWithAddAttributes(add_attr, attr->weights.shape.o, &attr->bias); } } }
#include "tensorflow/lite/delegates/gpu/common/transformations/fuse_add_to_conv.h" #include <any> #include <memory> #include <string> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/status/status.h" #include "tensorflow/lite/delegates/gpu/common/data_type.h" #include "tensorflow/lite/delegates/gpu/common/model.h" #include "tensorflow/lite/delegates/gpu/common/model_transformer.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/common/tensor.h" using ::testing::FloatNear; using ::testing::Pointwise; namespace tflite { namespace gpu { namespace { TEST(MergeConvolutionWithAddTest, Smoke) { GraphFloat32 graph; auto input = graph.NewValue(); input->tensor.shape = BHWC(1, 4, 4, 8); Convolution2DAttributes conv_attr; conv_attr.padding.prepended = HW(0, 0); conv_attr.padding.appended = HW(0, 0); conv_attr.strides = HW(1, 1); conv_attr.dilations = HW(1, 1); conv_attr.weights.shape = OHWI(16, 3, 2, 8); conv_attr.weights.data.resize(conv_attr.weights.shape.DimensionsProduct()); conv_attr.bias.shape = Linear(16); conv_attr.bias.data.resize(16); Tensor<Linear, DataType::FLOAT32> add_tensor; add_tensor.shape = Linear(16); add_tensor.data.resize(16); ElementwiseAttributes add_attr; add_attr.param = add_tensor; auto conv_node = graph.NewNode(); conv_node->operation.type = ToString(OperationType::CONVOLUTION_2D); conv_node->operation.attributes = conv_attr; auto add_node = graph.NewNode(); add_node->operation.type = ToString(OperationType::ADD); add_node->operation.attributes = add_attr; ASSERT_TRUE(graph.AddConsumer(conv_node->id, input->id).ok()); Value* output = nullptr; ASSERT_TRUE(AddOutput(&graph, add_node, &output).ok()); output->tensor.shape = BHWC(1, 4, 4, 16); Value* link1 = nullptr; ASSERT_TRUE(ConnectTwoNodes(&graph, conv_node, add_node, &link1).ok()); link1->tensor.shape = BHWC(1, 4, 4, 16); ASSERT_EQ(2, graph.nodes().size()); ASSERT_EQ(3, graph.values().size()); auto transformation = NewMergeConvolutionWithAdd(); ModelTransformer transformer(&graph); transformer.Apply("merge_convolution_with_add", transformation.get()); EXPECT_EQ(1, graph.nodes().size()); EXPECT_EQ(2, graph.values().size()); EXPECT_EQ(ToString(OperationType::CONVOLUTION_2D), graph.nodes()[0]->operation.type); } TEST(FuseAddAfterConvolution2DTest, Smoke) { Convolution2DAttributes attr; attr.weights.shape = OHWI(2, 1, 2, 2); attr.weights.data = {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f}; attr.bias.shape = Linear(2); attr.bias.data = {1.1f, 1.2f}; Tensor<Linear, DataType::FLOAT32> add_tensor; add_tensor.shape = Linear(2); add_tensor.data = {0.3f, 0.7f}; ElementwiseAttributes add_attr; add_attr.param = add_tensor; FuseConvolution2DWithAdd(add_attr, &attr); EXPECT_THAT(attr.weights.data, Pointwise(FloatNear(1e-6), {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f})); EXPECT_THAT(attr.bias.data, Pointwise(FloatNear(1e-6), {1.4f, 1.9f})); } TEST(FuseAddAfterDepthwiseConvolution2DTest, Smoke) { DepthwiseConvolution2DAttributes attr; attr.weights.shape = OHWI(2, 1, 2, 2); attr.weights.data = {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f}; attr.bias.shape = Linear(4); attr.bias.data = {1.1f, 1.2f, 1.3f, 1.4f}; Tensor<Linear, DataType::FLOAT32> add_tensor; add_tensor.shape = Linear(4); add_tensor.data = {0.3f, 0.7f, 0.5f, 0.1f}; ElementwiseAttributes add_attr; add_attr.param = add_tensor; FuseDepthwiseConvolution2DWithAdd(add_attr, &attr); EXPECT_THAT(attr.weights.data, Pointwise(FloatNear(1e-6), {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f})); EXPECT_THAT(attr.bias.data, Pointwise(FloatNear(1e-6), {1.4f, 1.9f, 1.8f, 1.5f})); } TEST(FuseAddAfterConvolutionTransposedTest, Smoke) { ConvolutionTransposedAttributes attr; attr.weights.shape = OHWI(2, 1, 2, 2); attr.weights.data = {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f}; attr.bias.shape = Linear(2); attr.bias.data = {1.1f, 1.2f}; Tensor<Linear, DataType::FLOAT32> add_tensor; add_tensor.shape = Linear(2); add_tensor.data = {0.3f, 0.7f}; ElementwiseAttributes add_attr; add_attr.param = add_tensor; FuseConvolutionTransposedWithAdd(add_attr, &attr); EXPECT_THAT(attr.weights.data, Pointwise(FloatNear(1e-6), {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f})); EXPECT_THAT(attr.bias.data, Pointwise(FloatNear(1e-6), {1.4f, 1.9f})); } TEST(FuseAddAfterFullyConnectedTest, Smoke) { FullyConnectedAttributes attr; attr.weights.shape = OHWI(2, 1, 1, 2); attr.weights.data = {0.1f, 0.2f, 0.3f, 0.4f}; attr.bias.shape = Linear(2); attr.bias.data = {1.1f, 1.2f}; Tensor<Linear, DataType::FLOAT32> add_tensor; add_tensor.shape = Linear(2); add_tensor.data = {0.3f, 0.7f}; ElementwiseAttributes add_attr; add_attr.param = add_tensor; FuseFullyConnectedWithAdd(add_attr, &attr); EXPECT_THAT(attr.weights.data, Pointwise(FloatNear(1e-6), {0.1f, 0.2f, 0.3f, 0.4f})); EXPECT_THAT(attr.bias.data, Pointwise(FloatNear(1e-6), {1.4f, 1.9f})); } TEST(MergeAddWithConvolutionTest, Smoke) { GraphFloat32 graph; auto input = graph.NewValue(); input->tensor.shape = BHWC(1, 4, 4, 2); Tensor<Linear, DataType::FLOAT32> add_tensor; add_tensor.shape = Linear(2); add_tensor.data = {1.0f, 2.0f}; ElementwiseAttributes add_attr; add_attr.param = add_tensor; Convolution2DAttributes conv_attr; conv_attr.padding.prepended = HW(0, 0); conv_attr.padding.appended = HW(0, 0); conv_attr.strides = HW(1, 1); conv_attr.dilations = HW(1, 1); conv_attr.weights.shape = OHWI(2, 1, 2, 2); conv_attr.weights.data = {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f}; conv_attr.bias.shape = Linear(2); conv_attr.bias.data = {1.1f, 1.2f}; auto conv_node = graph.NewNode(); conv_node->operation.type = ToString(OperationType::CONVOLUTION_2D); conv_node->operation.attributes = conv_attr; auto add_node = graph.NewNode(); add_node->operation.type = ToString(OperationType::ADD); add_node->operation.attributes = add_attr; ASSERT_TRUE(graph.AddConsumer(add_node->id, input->id).ok()); Value* output = nullptr; ASSERT_TRUE(AddOutput(&graph, conv_node, &output).ok()); output->tensor.shape = BHWC(1, 4, 3, 2); Value* link1 = nullptr; ASSERT_TRUE(ConnectTwoNodes(&graph, add_node, conv_node, &link1).ok()); link1->tensor.shape = BHWC(1, 4, 4, 2); ASSERT_EQ(2, graph.nodes().size()); ASSERT_EQ(3, graph.values().size()); auto transformation = NewMergeAddWithConvolution(); ModelTransformer transformer(&graph); transformer.Apply("merge_add_with_convolution", transformation.get()); EXPECT_EQ(1, graph.nodes().size()); EXPECT_EQ(2, graph.values().size()); EXPECT_EQ(ToString(OperationType::CONVOLUTION_2D), graph.nodes()[0]->operation.type); Convolution2DAttributes* conv_attr_new = absl::any_cast<Convolution2DAttributes>( &graph.nodes()[0]->operation.attributes); EXPECT_THAT(conv_attr_new->bias.data, Pointwise(FloatNear(1e-6), {2.7f, 5.2f})); } } } }
1,005
cpp
tensorflow/tensorflow
global_pooling_to_reduce_op
tensorflow/lite/delegates/gpu/common/transformations/global_pooling_to_reduce_op.cc
tensorflow/lite/delegates/gpu/common/transformations/global_pooling_to_reduce_op_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TRANSFORMATIONS_GLOBAL_POOLING_TO_REDUCE_OP_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TRANSFORMATIONS_GLOBAL_POOLING_TO_REDUCE_OP_H_ #include <memory> #include "tensorflow/lite/delegates/gpu/common/model_transformer.h" namespace tflite { namespace gpu { std::unique_ptr<NodeTransformation> NewGlobalPoolingToReduceOp(); } } #endif #include "tensorflow/lite/delegates/gpu/common/transformations/global_pooling_to_reduce_op.h" #include <memory> #include <string> #include <vector> #include "absl/memory/memory.h" #include "absl/types/any.h" #include "tensorflow/lite/delegates/gpu/common/model.h" #include "tensorflow/lite/delegates/gpu/common/model_transformer.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/common/tensor.h" namespace tflite { namespace gpu { namespace { bool IsGlobalPooling(const Pooling2DAttributes& attr, const BHWC& src_shape, const BHWC& dst_shape) { return dst_shape.w == 1 && dst_shape.h == 1 && attr.kernel.w == src_shape.w && attr.kernel.h == src_shape.h && attr.padding.appended.w == 0 && attr.padding.appended.h == 0 && attr.padding.prepended.w == 0 && attr.padding.prepended.h == 0; } bool IsGlobalAveragePooling(const Pooling2DAttributes& attr, const BHWC& src_shape, const BHWC& dst_shape) { return attr.type == tflite::gpu::PoolingType::AVERAGE && attr.output_indices == false && IsGlobalPooling(attr, src_shape, dst_shape); } class GlobalPoolingToReduceOp : public NodeTransformation { public: TransformResult ApplyToNode(Node* node, GraphFloat32* graph) final { if (node->operation.type != ToString(OperationType::POOLING_2D)) { return {TransformStatus::SKIPPED, ""}; } auto inputs = graph->FindInputs(node->id); auto outputs = graph->FindOutputs(node->id); const auto& pool_attr = absl::any_cast<const Pooling2DAttributes&>(node->operation.attributes); if (!IsGlobalAveragePooling(pool_attr, inputs[0]->tensor.shape, outputs[0]->tensor.shape)) { return {TransformStatus::SKIPPED, ""}; } MeanAttributes mean_attr; mean_attr.dims = {Axis::WIDTH, Axis::HEIGHT}; node->operation.attributes = mean_attr; node->operation.type = ToString(OperationType::MEAN); return {TransformStatus::APPLIED, "Replaced global average pooling with mean."}; } }; } std::unique_ptr<NodeTransformation> NewGlobalPoolingToReduceOp() { return absl::make_unique<GlobalPoolingToReduceOp>(); } } }
#include "tensorflow/lite/delegates/gpu/common/transformations/global_pooling_to_reduce_op.h" #include <memory> #include <string> #include <vector> #include <gtest/gtest.h> #include "absl/status/status.h" #include "absl/types/any.h" #include "tensorflow/lite/delegates/gpu/common/model.h" #include "tensorflow/lite/delegates/gpu/common/model_transformer.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/common/tensor.h" namespace tflite { namespace gpu { namespace { TEST(MakeMeanFromGlobalAveragePooling, Smoke) { GraphFloat32 graph; auto input = graph.NewValue(); input->tensor.shape = BHWC(1, 4, 4, 8); Pooling2DAttributes attr; attr.padding.prepended = tflite::gpu::HW(0, 0); attr.padding.appended = tflite::gpu::HW(0, 0); attr.strides = tflite::gpu::HW(4, 4); attr.kernel = tflite::gpu::HW(4, 4); attr.type = tflite::gpu::PoolingType::AVERAGE; attr.output_indices = false; auto pool_node = graph.NewNode(); pool_node->operation.type = ToString(OperationType::POOLING_2D); pool_node->operation.attributes = attr; ASSERT_TRUE(graph.AddConsumer(pool_node->id, input->id).ok()); Value* output = nullptr; ASSERT_TRUE(AddOutput(&graph, pool_node, &output).ok()); output->tensor.shape = BHWC(1, 1, 1, 8); ASSERT_EQ(1, graph.nodes().size()); ASSERT_EQ(2, graph.values().size()); auto transformation = NewGlobalPoolingToReduceOp(); ModelTransformer transformer(&graph); transformer.Apply("global_average_pooling_to_mean", transformation.get()); ASSERT_EQ(1, graph.nodes().size()); ASSERT_EQ(2, graph.values().size()); ASSERT_EQ(ToString(OperationType::MEAN), graph.nodes()[0]->operation.type); } } } }
1,006
cpp
tensorflow/tensorflow
make_fully_connected
tensorflow/lite/delegates/gpu/common/transformations/make_fully_connected.cc
tensorflow/lite/delegates/gpu/common/transformations/make_fully_connected_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TRANSFORMATIONS_MAKE_FULLY_CONNECTED_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TRANSFORMATIONS_MAKE_FULLY_CONNECTED_H_ #include <memory> #include "tensorflow/lite/delegates/gpu/common/model_transformer.h" namespace tflite { namespace gpu { std::unique_ptr<NodeTransformation> NewMakeFullyConnectedFromConvolution(); } } #endif #include "tensorflow/lite/delegates/gpu/common/transformations/make_fully_connected.h" #include <memory> #include <string> #include <vector> #include "absl/memory/memory.h" #include "absl/types/any.h" #include "tensorflow/lite/delegates/gpu/common/model.h" #include "tensorflow/lite/delegates/gpu/common/model_transformer.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/common/tensor.h" namespace tflite { namespace gpu { namespace { bool IsConvEquivalentToFullyConnected(const Convolution2DAttributes& attr) { return attr.weights.shape.w == 1 && attr.weights.shape.h == 1 && attr.strides == HW(1, 1) && attr.dilations == HW(1, 1) && attr.padding.prepended == HW(0, 0) && attr.padding.appended == HW(0, 0); } class MakeFullyConnectedFromConvolution : public NodeTransformation { public: TransformResult ApplyToNode(Node* node, GraphFloat32* graph) final { if (node->operation.type != ToString(OperationType::CONVOLUTION_2D)) { return {TransformStatus::SKIPPED, ""}; } auto inputs = graph->FindInputs(node->id); if (inputs.size() != 1) { return {TransformStatus::SKIPPED, ""}; } const auto& input_shape = inputs[0]->tensor.shape; if (input_shape.w != 1 || input_shape.h != 1) { return {TransformStatus::SKIPPED, ""}; } const auto& conv_attr = absl::any_cast<const Convolution2DAttributes&>( node->operation.attributes); if (!IsConvEquivalentToFullyConnected(conv_attr)) { return {TransformStatus::SKIPPED, ""}; } FullyConnectedAttributes fc_attr; fc_attr.weights = conv_attr.weights; fc_attr.bias = conv_attr.bias; node->operation.attributes = fc_attr; node->operation.type = ToString(OperationType::FULLY_CONNECTED); return {TransformStatus::APPLIED, "Replaced convolution with fully connected."}; } }; } std::unique_ptr<NodeTransformation> NewMakeFullyConnectedFromConvolution() { return absl::make_unique<MakeFullyConnectedFromConvolution>(); } } }
#include "tensorflow/lite/delegates/gpu/common/transformations/make_fully_connected.h" #include <memory> #include <string> #include <vector> #include <gtest/gtest.h> #include "absl/status/status.h" #include "absl/types/any.h" #include "tensorflow/lite/delegates/gpu/common/model.h" #include "tensorflow/lite/delegates/gpu/common/model_transformer.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/common/tensor.h" namespace tflite { namespace gpu { namespace { TEST(MakeFullyConnected, Smoke) { GraphFloat32 graph; auto input = graph.NewValue(); input->tensor.shape = BHWC(1, 4, 4, 8); Convolution2DAttributes attr0; attr0.padding.prepended = HW(0, 0); attr0.padding.appended = HW(0, 0); attr0.strides = HW(1, 1); attr0.dilations = HW(1, 1); attr0.weights.shape = OHWI(16, 1, 1, 8); attr0.bias.shape = Linear(16); Convolution2DAttributes attr1; attr1.padding.prepended = HW(0, 0); attr1.padding.appended = HW(0, 0); attr1.strides = HW(4, 4); attr1.dilations = HW(1, 1); attr1.weights.shape = OHWI(16, 4, 4, 16); attr1.bias.shape = Linear(16); Convolution2DAttributes attr2; attr2.padding.prepended = HW(0, 0); attr2.padding.appended = HW(0, 0); attr2.strides = HW(1, 1); attr2.dilations = HW(1, 1); attr2.weights.shape = OHWI(32, 1, 1, 16); attr2.bias.shape = Linear(32); auto conv1x1_node0 = graph.NewNode(); conv1x1_node0->operation.type = ToString(OperationType::CONVOLUTION_2D); conv1x1_node0->operation.attributes = attr0; auto conv4x4_node1 = graph.NewNode(); conv4x4_node1->operation.type = ToString(OperationType::CONVOLUTION_2D); conv4x4_node1->operation.attributes = attr1; auto conv1x1_node2 = graph.NewNode(); conv1x1_node2->operation.type = ToString(OperationType::CONVOLUTION_2D); conv1x1_node2->operation.attributes = attr2; ASSERT_TRUE(graph.AddConsumer(conv1x1_node0->id, input->id).ok()); Value* output = nullptr; ASSERT_TRUE(AddOutput(&graph, conv1x1_node2, &output).ok()); output->tensor.shape = BHWC(1, 1, 1, 32); Value* link1 = nullptr; ASSERT_TRUE( ConnectTwoNodes(&graph, conv1x1_node0, conv4x4_node1, &link1).ok()); link1->tensor.shape = BHWC(1, 4, 4, 16); Value* link2 = nullptr; ASSERT_TRUE( ConnectTwoNodes(&graph, conv4x4_node1, conv1x1_node2, &link2).ok()); link2->tensor.shape = BHWC(1, 1, 1, 16); ASSERT_EQ(3, graph.nodes().size()); ASSERT_EQ(4, graph.values().size()); auto transformation = NewMakeFullyConnectedFromConvolution(); ModelTransformer transformer(&graph); transformer.Apply("make_fully_connected", transformation.get()); ASSERT_EQ(3, graph.nodes().size()); ASSERT_EQ(4, graph.values().size()); ASSERT_EQ(ToString(OperationType::CONVOLUTION_2D), graph.nodes()[0]->operation.type); ASSERT_EQ(ToString(OperationType::CONVOLUTION_2D), graph.nodes()[1]->operation.type); ASSERT_EQ(ToString(OperationType::FULLY_CONNECTED), graph.nodes()[2]->operation.type); auto fc_attr = absl::any_cast<FullyConnectedAttributes>( graph.nodes()[2]->operation.attributes); EXPECT_EQ(OHWI(32, 1, 1, 16), fc_attr.weights.shape); EXPECT_EQ(Linear(32), fc_attr.bias.shape); } } } }
1,007
cpp
tensorflow/tensorflow
merge_padding_with
tensorflow/lite/delegates/gpu/common/transformations/merge_padding_with.cc
tensorflow/lite/delegates/gpu/common/transformations/merge_padding_with_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TRANSFORMATIONS_MERGE_PADDING_WITH_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TRANSFORMATIONS_MERGE_PADDING_WITH_H_ #include <memory> #include "tensorflow/lite/delegates/gpu/common/model_transformer.h" namespace tflite { namespace gpu { std::unique_ptr<SequenceTransformation> NewMergePaddingWithPooling(); std::unique_ptr<SequenceTransformation> NewMergePaddingWithConvolution2D(); std::unique_ptr<SequenceTransformation> NewMergePaddingWithDepthwiseConvolution(); std::unique_ptr<NodeTransformation> NewMergePaddingWithAdd(); } } #endif #include "tensorflow/lite/delegates/gpu/common/transformations/merge_padding_with.h" #include <memory> #include <string> #include <variant> #include <vector> #include "absl/memory/memory.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "absl/types/any.h" #include "tensorflow/lite/delegates/gpu/common/data_type.h" #include "tensorflow/lite/delegates/gpu/common/model.h" #include "tensorflow/lite/delegates/gpu/common/model_transformer.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/tensor.h" #include "tensorflow/lite/delegates/gpu/common/transformations/matching.h" namespace tflite { namespace gpu { namespace { template <typename Attr> class MergePaddingWith2DOperation : public SequenceTransformation { public: explicit MergePaddingWith2DOperation(OperationType operation_type) : operations_to_match_( {ToString(OperationType::PAD), ToString(operation_type)}) {} int ExpectedSequenceLength() const final { return 2; } TransformResult ApplyToNodesSequence(const std::vector<Node*>& sequence, GraphFloat32* graph) final { if (!MatchesByOperationType(sequence, operations_to_match_)) { return {TransformStatus::SKIPPED, ""}; } Node* pad_node = sequence.front(); Node* op_node = sequence.back(); PadAttributes pad_attr = absl::any_cast<PadAttributes>(pad_node->operation.attributes); if (pad_attr.type != PaddingContentType::ZEROS) { return {TransformStatus::DECLINED, "Only Zero padding is supported."}; } if (pad_attr.appended.c != 0 || pad_attr.prepended.c != 0 || pad_attr.appended.b != 0 || pad_attr.prepended.b != 0) { return {TransformStatus::DECLINED, "Pad has non-zero padding on non HW axis."}; } Attr* node_attr = absl::any_cast<Attr>(&op_node->operation.attributes); absl::Status status = RemovePrecedingNode(graph, pad_node, op_node); if (!status.ok()) { return {TransformStatus::INVALID, "Unable to remove Pad node with Operation node: " + std::string(status.message())}; } node_attr->padding.appended.h += pad_attr.appended.h; node_attr->padding.appended.w += pad_attr.appended.w; node_attr->padding.prepended.h += pad_attr.prepended.h; node_attr->padding.prepended.w += pad_attr.prepended.w; return { TransformStatus::APPLIED, absl::StrCat("Added padding: prepended = {h = ", pad_attr.prepended.h, ", w = ", pad_attr.prepended.w, "}, appended = { h = ", pad_attr.appended.h, ", w = ", pad_attr.appended.w, "}")}; } private: const std::vector<std::string> operations_to_match_; }; } std::unique_ptr<SequenceTransformation> NewMergePaddingWithPooling() { return absl::make_unique<MergePaddingWith2DOperation<Pooling2DAttributes>>( OperationType::POOLING_2D); } std::unique_ptr<SequenceTransformation> NewMergePaddingWithConvolution2D() { return absl::make_unique< MergePaddingWith2DOperation<Convolution2DAttributes>>( OperationType::CONVOLUTION_2D); } std::unique_ptr<SequenceTransformation> NewMergePaddingWithDepthwiseConvolution() { return absl::make_unique< MergePaddingWith2DOperation<DepthwiseConvolution2DAttributes>>( OperationType::DEPTHWISE_CONVOLUTION); } class MergePaddingWithAddOperation : public NodeTransformation { public: TransformResult ApplyToNode(Node* node, GraphFloat32* graph) final { if (node->operation.type != ToString(OperationType::PAD)) { return {TransformStatus::SKIPPED, ""}; } auto inputs = graph->FindInputs(node->id); if (inputs.size() != 1) { return {TransformStatus::SKIPPED, ""}; } const auto& input_shape = graph->FindInputs(node->id)[0]->tensor.shape; if (input_shape.c % 4 != 0) { return {TransformStatus::DECLINED, "Pad with input where src_channels % 4 != 0"}; } PadAttributes pad_attr = absl::any_cast<PadAttributes>(node->operation.attributes); if (pad_attr.type != PaddingContentType::ZEROS) { return {TransformStatus::DECLINED, "Only Zero padding is supported."}; } if (pad_attr.prepended != BHWC(0, 0, 0, 0) || pad_attr.appended.h != 0 || pad_attr.appended.w != 0 || pad_attr.appended.b != 0) { return {TransformStatus::DECLINED, "Pad has padding not only in appended channels axis."}; } auto pad_output = graph->FindOutputs(node->id)[0]; auto consumer_nodes = graph->FindConsumers(pad_output->id); if (consumer_nodes.size() != 1) { return {TransformStatus::SKIPPED, ""}; } auto add_node = consumer_nodes[0]; auto consumer_type = OperationTypeFromString(add_node->operation.type); if (consumer_type != OperationType::ADD) { return {TransformStatus::SKIPPED, ""}; } ElementwiseAttributes add_attr = absl::any_cast<ElementwiseAttributes>(add_node->operation.attributes); const bool is_add_hwc = absl::holds_alternative<Tensor<HWC, DataType::FLOAT32>>(add_attr.param); const bool is_add_linear = absl::holds_alternative<Tensor<Linear, DataType::FLOAT32>>( add_attr.param); const bool is_add_scalar = absl::holds_alternative<float>(add_attr.param); if (is_add_hwc || is_add_linear || is_add_scalar) { return {TransformStatus::SKIPPED, "Cannot remove padding when ADD has constant argument."}; } absl::Status status = RemovePrecedingNode(graph, node, add_node); if (!status.ok()) { return {TransformStatus::INVALID, "Unable to remove Pad node " + std::string(status.message())}; } return {TransformStatus::APPLIED, "Removed padding with zeroes in appended channels dimension"}; } }; std::unique_ptr<NodeTransformation> NewMergePaddingWithAdd() { return absl::make_unique<MergePaddingWithAddOperation>(); } } }
#include "tensorflow/lite/delegates/gpu/common/transformations/merge_padding_with.h" #include <memory> #include <string> #include <vector> #include <gtest/gtest.h> #include "absl/status/status.h" #include "absl/types/any.h" #include "tensorflow/lite/delegates/gpu/common/data_type.h" #include "tensorflow/lite/delegates/gpu/common/model.h" #include "tensorflow/lite/delegates/gpu/common/model_transformer.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/common/tensor.h" namespace tflite { namespace gpu { namespace { TEST(MergePaddingWith, Smoke) { GraphFloat32 graph; auto input = graph.NewValue(); auto pad_node = graph.NewNode(); ASSERT_TRUE(graph.AddConsumer(pad_node->id, input->id).ok()); pad_node->operation.type = ToString(OperationType::PAD); PadAttributes attr; attr.prepended = BHWC(0, 1, 1, 0); attr.appended = BHWC(0, 2, 2, 0); pad_node->operation.attributes = attr; auto conv_node = graph.NewNode(); Value* temp = nullptr; ASSERT_TRUE(ConnectTwoNodes(&graph, pad_node, conv_node, &temp).ok()); ASSERT_TRUE(AddOutput(&graph, conv_node, &temp).ok()); conv_node->operation.type = ToString(OperationType::CONVOLUTION_2D); Convolution2DAttributes conv_attr; conv_attr.padding.appended = HW(0, 0); conv_attr.padding.prepended = HW(0, 0); conv_node->operation.attributes = conv_attr; ASSERT_EQ(2, graph.nodes().size()); auto transformation = NewMergePaddingWithConvolution2D(); ModelTransformer transformer(&graph); transformer.Apply("merge_padding", transformation.get()); ASSERT_EQ(1, graph.nodes().size()); ASSERT_EQ(2, graph.values().size()); ASSERT_EQ(conv_node, graph.nodes()[0]); conv_attr = absl::any_cast<Convolution2DAttributes>(conv_node->operation.attributes); EXPECT_EQ(HW(1, 1), conv_attr.padding.prepended); EXPECT_EQ(HW(2, 2), conv_attr.padding.appended); } TEST(MergePaddingWith, MergeTwo) { GraphFloat32 graph; auto input = graph.NewValue(); auto pad_node1 = graph.NewNode(); ASSERT_TRUE(graph.AddConsumer(pad_node1->id, input->id).ok()); pad_node1->operation.type = ToString(OperationType::PAD); PadAttributes attr; attr.prepended = BHWC(0, 1, 1, 0); attr.appended = BHWC(0, 0, 0, 0); pad_node1->operation.attributes = attr; auto pad_node2 = graph.NewNode(); Value* temp1 = nullptr; ASSERT_TRUE(ConnectTwoNodes(&graph, pad_node1, pad_node2, &temp1).ok()); pad_node2->operation.type = ToString(OperationType::PAD); attr.prepended = BHWC(0, 0, 0, 0); attr.appended = BHWC(0, 2, 2, 0); pad_node2->operation.attributes = attr; auto conv_node = graph.NewNode(); Value* temp2 = nullptr; ASSERT_TRUE(ConnectTwoNodes(&graph, pad_node2, conv_node, &temp2).ok()); ASSERT_TRUE(AddOutput(&graph, conv_node, &temp2).ok()); conv_node->operation.type = ToString(OperationType::CONVOLUTION_2D); Convolution2DAttributes conv_attr; conv_attr.padding.appended = HW(0, 0); conv_attr.padding.prepended = HW(0, 0); conv_node->operation.attributes = conv_attr; ASSERT_EQ(3, graph.nodes().size()); auto transformation = NewMergePaddingWithConvolution2D(); ModelTransformer transformer(&graph); transformer.Apply("merge_padding", transformation.get()); ASSERT_EQ(1, graph.nodes().size()); ASSERT_EQ(2, graph.values().size()); ASSERT_EQ(conv_node, graph.nodes()[0]); conv_attr = absl::any_cast<Convolution2DAttributes>(conv_node->operation.attributes); EXPECT_EQ(HW(1, 1), conv_attr.padding.prepended); EXPECT_EQ(HW(2, 2), conv_attr.padding.appended); } TEST(MergePaddingWithAdd, MergeAlignedPadding) { GraphFloat32 graph; auto input0 = graph.NewValue(); input0->tensor.shape = BHWC(1, 4, 4, 8); auto input1 = graph.NewValue(); auto padded = graph.NewValue(); auto output = graph.NewValue(); auto pad_node = graph.NewNode(); pad_node->operation.type = ToString(OperationType::PAD); PadAttributes pad_attr; pad_attr.prepended = BHWC(0, 0, 0, 0); pad_attr.appended = BHWC(0, 0, 0, 32); pad_node->operation.attributes = pad_attr; ASSERT_TRUE(graph.AddConsumer(pad_node->id, input0->id).ok()); ASSERT_TRUE(graph.SetProducer(pad_node->id, padded->id).ok()); auto add_node = graph.NewNode(); ElementwiseAttributes add_attr; ASSERT_TRUE(graph.AddConsumer(add_node->id, padded->id).ok()); ASSERT_TRUE(graph.AddConsumer(add_node->id, input1->id).ok()); ASSERT_TRUE(graph.SetProducer(add_node->id, output->id).ok()); add_node->operation.type = ToString(OperationType::ADD); add_node->operation.attributes = add_attr; ASSERT_EQ(2, graph.nodes().size()); ASSERT_EQ(4, graph.values().size()); auto transformation = NewMergePaddingWithAdd(); ModelTransformer transformer(&graph); transformer.Apply("merge_padding", transformation.get()); ASSERT_EQ(1, graph.nodes().size()); ASSERT_EQ(3, graph.values().size()); EXPECT_EQ(add_node, graph.nodes()[0]); } TEST(MergePaddingWithAdd, DoNotTrigger_AddWithAttributes) { GraphFloat32 graph; auto input0 = graph.NewValue(); input0->tensor.shape = BHWC(1, 4, 4, 8); auto input1 = graph.NewValue(); auto padded = graph.NewValue(); auto output = graph.NewValue(); auto pad_node = graph.NewNode(); pad_node->operation.type = ToString(OperationType::PAD); PadAttributes pad_attr; pad_attr.prepended = BHWC(0, 0, 0, 0); pad_attr.appended = BHWC(0, 0, 0, 32); pad_node->operation.attributes = pad_attr; ASSERT_TRUE(graph.AddConsumer(pad_node->id, input0->id).ok()); ASSERT_TRUE(graph.SetProducer(pad_node->id, padded->id).ok()); auto add_node = graph.NewNode(); ElementwiseAttributes add_attr; add_attr.param = Tensor<HWC, DataType::FLOAT32>(); ASSERT_TRUE(graph.AddConsumer(add_node->id, padded->id).ok()); ASSERT_TRUE(graph.AddConsumer(add_node->id, input1->id).ok()); ASSERT_TRUE(graph.SetProducer(add_node->id, output->id).ok()); add_node->operation.type = ToString(OperationType::ADD); add_node->operation.attributes = add_attr; ASSERT_EQ(2, graph.nodes().size()); ASSERT_EQ(4, graph.values().size()); auto transformation = NewMergePaddingWithAdd(); ModelTransformer transformer(&graph); transformer.Apply("merge_padding", transformation.get()); ASSERT_EQ(2, graph.nodes().size()); ASSERT_EQ(4, graph.values().size()); EXPECT_EQ(pad_node, graph.nodes()[0]); EXPECT_EQ(add_node, graph.nodes()[1]); } } } }
1,008
cpp
tensorflow/tensorflow
fuse_mul_to_conv
tensorflow/lite/delegates/gpu/common/transformations/fuse_mul_to_conv.cc
tensorflow/lite/delegates/gpu/common/transformations/fuse_mul_to_conv_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TRANSFORMATIONS_FUSE_MUL_TO_CONV_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TRANSFORMATIONS_FUSE_MUL_TO_CONV_H_ #include <memory> #include "tensorflow/lite/delegates/gpu/common/model_transformer.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" namespace tflite { namespace gpu { std::unique_ptr<SequenceTransformation> NewMergeConvolutionWithMul(); std::unique_ptr<SequenceTransformation> NewMergeMulWithConvolution(); void FuseConvolution2DWithMultiply(const ElementwiseAttributes& mul_attr, Convolution2DAttributes* attr); void FuseDepthwiseConvolution2DWithMultiply( const ElementwiseAttributes& mul_attr, DepthwiseConvolution2DAttributes* attr); void FuseConvolutionTransposedWithMultiply( const ElementwiseAttributes& mul_attr, ConvolutionTransposedAttributes* attr); void FuseFullyConnectedWithMultiply(const ElementwiseAttributes& mul_attr, FullyConnectedAttributes* attr); void FuseMultiplyWithConvolution2D(const ElementwiseAttributes& mul_attr, Convolution2DAttributes* attr); void FuseMultiplyWithDepthwiseConvolution2D( const ElementwiseAttributes& mul_attr, DepthwiseConvolution2DAttributes* attr); void FuseMultiplyWithConvolutionTransposed( const ElementwiseAttributes& mul_attr, ConvolutionTransposedAttributes* attr); void FuseMultiplyWithFullyConnected(const ElementwiseAttributes& mul_attr, FullyConnectedAttributes* attr); } } #endif #include "tensorflow/lite/delegates/gpu/common/transformations/fuse_mul_to_conv.h" #include <any> #include <memory> #include <string> #include <variant> #include <vector> #include "absl/strings/string_view.h" #include "tensorflow/lite/delegates/gpu/common/data_type.h" #include "tensorflow/lite/delegates/gpu/common/model.h" #include "tensorflow/lite/delegates/gpu/common/model_transformer.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/tensor.h" namespace tflite { namespace gpu { namespace { class MergeConvolutionWithMul : public SequenceTransformation { public: int ExpectedSequenceLength() const final { return 2; } TransformResult ApplyToNodesSequence(const std::vector<Node*>& sequence, GraphFloat32* graph) final { auto& conv_node = *sequence[0]; if (graph->FindInputs(conv_node.id).size() != 1) { return {TransformStatus::DECLINED, "This fusion is only applicable to ops with one runtime input."}; } auto& mul_node = *sequence[1]; if (mul_node.operation.type != ToString(OperationType::MUL) || !mul_node.operation.attributes.has_value()) { return {TransformStatus::SKIPPED, ""}; } ElementwiseAttributes mul_attr = absl::any_cast<ElementwiseAttributes>(mul_node.operation.attributes); if (!absl::holds_alternative<Tensor<Linear, DataType::FLOAT32>>( mul_attr.param) && !absl::holds_alternative<float>(mul_attr.param)) { return { TransformStatus::DECLINED, "This fuse applicable only for broadcast or scalar multiplication."}; } if (conv_node.operation.type == ToString(OperationType::CONVOLUTION_2D)) { Convolution2DAttributes* conv_attr = absl::any_cast<Convolution2DAttributes>( &conv_node.operation.attributes); FuseConvolution2DWithMultiply(mul_attr, conv_attr); } else if (conv_node.operation.type == ToString(OperationType::CONVOLUTION_TRANSPOSED)) { ConvolutionTransposedAttributes* conv_attr = absl::any_cast<ConvolutionTransposedAttributes>( &conv_node.operation.attributes); FuseConvolutionTransposedWithMultiply(mul_attr, conv_attr); } else if (conv_node.operation.type == ToString(OperationType::DEPTHWISE_CONVOLUTION)) { DepthwiseConvolution2DAttributes* conv_attr = absl::any_cast<DepthwiseConvolution2DAttributes>( &conv_node.operation.attributes); FuseDepthwiseConvolution2DWithMultiply(mul_attr, conv_attr); } else if (conv_node.operation.type == ToString(OperationType::FULLY_CONNECTED)) { FullyConnectedAttributes* conv_attr = absl::any_cast<FullyConnectedAttributes>( &conv_node.operation.attributes); FuseFullyConnectedWithMultiply(mul_attr, conv_attr); } else { return {TransformStatus::SKIPPED, ""}; } absl::Status status = RemoveFollowingNode(graph, &mul_node, &conv_node); if (!status.ok()) { return {TransformStatus::INVALID, "Unable to remove mul node after convolution: " + std::string(status.message())}; } return {TransformStatus::APPLIED, ""}; } }; class MergeMulWithConvolution : public SequenceTransformation { public: int ExpectedSequenceLength() const final { return 2; } TransformResult ApplyToNodesSequence(const std::vector<Node*>& sequence, GraphFloat32* graph) final { auto& conv_node = *sequence[1]; if (graph->FindInputs(conv_node.id).size() != 1) { return {TransformStatus::DECLINED, "This fusion is only applicable to ops with one runtime input."}; } auto& mul_node = *sequence[0]; if (mul_node.operation.type != ToString(OperationType::MUL) || !mul_node.operation.attributes.has_value()) { return {TransformStatus::SKIPPED, ""}; } ElementwiseAttributes mul_attr = absl::any_cast<ElementwiseAttributes>(mul_node.operation.attributes); if (!absl::holds_alternative<Tensor<Linear, DataType::FLOAT32>>( mul_attr.param) && !absl::holds_alternative<float>(mul_attr.param)) { return { TransformStatus::DECLINED, "This fuse applicable only for broadcast or scalar multiplication."}; } if (conv_node.operation.type == ToString(OperationType::CONVOLUTION_2D)) { Convolution2DAttributes* conv_attr = absl::any_cast<Convolution2DAttributes>( &conv_node.operation.attributes); FuseMultiplyWithConvolution2D(mul_attr, conv_attr); } else if (conv_node.operation.type == ToString(OperationType::CONVOLUTION_TRANSPOSED)) { ConvolutionTransposedAttributes* conv_attr = absl::any_cast<ConvolutionTransposedAttributes>( &conv_node.operation.attributes); FuseMultiplyWithConvolutionTransposed(mul_attr, conv_attr); } else if (conv_node.operation.type == ToString(OperationType::DEPTHWISE_CONVOLUTION)) { DepthwiseConvolution2DAttributes* conv_attr = absl::any_cast<DepthwiseConvolution2DAttributes>( &conv_node.operation.attributes); FuseMultiplyWithDepthwiseConvolution2D(mul_attr, conv_attr); } else if (conv_node.operation.type == ToString(OperationType::FULLY_CONNECTED)) { FullyConnectedAttributes* conv_attr = absl::any_cast<FullyConnectedAttributes>( &conv_node.operation.attributes); FuseMultiplyWithFullyConnected(mul_attr, conv_attr); } else { return {TransformStatus::SKIPPED, ""}; } absl::Status status = RemovePrecedingNode(graph, &mul_node, &conv_node); if (!status.ok()) { return {TransformStatus::INVALID, "Unable to remove mul node after convolution: " + std::string(status.message())}; } return {TransformStatus::APPLIED, ""}; } }; } std::unique_ptr<SequenceTransformation> NewMergeConvolutionWithMul() { return absl::make_unique<MergeConvolutionWithMul>(); } std::unique_ptr<SequenceTransformation> NewMergeMulWithConvolution() { return absl::make_unique<MergeMulWithConvolution>(); } void FuseConvolution2DWithMultiply(const ElementwiseAttributes& mul_attr, Convolution2DAttributes* attr) { auto mul = absl::get_if<Tensor<Linear, DataType::FLOAT32>>(&mul_attr.param); auto mul_scalar = absl::get_if<float>(&mul_attr.param); for (int d = 0; d < attr->weights.shape.o; ++d) { const float multiplier = mul ? mul->data[d] : *mul_scalar; for (int s = 0; s < attr->weights.shape.i; ++s) { for (int k_y = 0; k_y < attr->weights.shape.h; ++k_y) { for (int k_x = 0; k_x < attr->weights.shape.w; ++k_x) { const int index = attr->weights.shape.LinearIndex({{d, k_y, k_x, s}}); attr->weights.data[index] *= multiplier; } } } if (!attr->bias.data.empty()) { attr->bias.data[d] *= multiplier; } } } void FuseDepthwiseConvolution2DWithMultiply( const ElementwiseAttributes& mul_attr, DepthwiseConvolution2DAttributes* attr) { auto mul = absl::get_if<Tensor<Linear, DataType::FLOAT32>>(&mul_attr.param); auto mul_scalar = absl::get_if<float>(&mul_attr.param); for (int g = 0; g < attr->weights.shape.o; ++g) { for (int s = 0; s < attr->weights.shape.i; ++s) { const int d = s * attr->weights.shape.o + g; const float multiplier = mul ? mul->data[d] : *mul_scalar; for (int k_y = 0; k_y < attr->weights.shape.h; ++k_y) { for (int k_x = 0; k_x < attr->weights.shape.w; ++k_x) { const int index = attr->weights.shape.LinearIndex({{g, k_y, k_x, s}}); attr->weights.data[index] *= multiplier; } } if (!attr->bias.data.empty()) { attr->bias.data[d] *= multiplier; } } } } void FuseConvolutionTransposedWithMultiply( const ElementwiseAttributes& mul_attr, ConvolutionTransposedAttributes* attr) { auto mul = absl::get_if<Tensor<Linear, DataType::FLOAT32>>(&mul_attr.param); auto mul_scalar = absl::get_if<float>(&mul_attr.param); for (int d = 0; d < attr->weights.shape.o; ++d) { const float multiplier = mul ? mul->data[d] : *mul_scalar; for (int s = 0; s < attr->weights.shape.i; ++s) { for (int k_y = 0; k_y < attr->weights.shape.h; ++k_y) { for (int k_x = 0; k_x < attr->weights.shape.w; ++k_x) { const int index = attr->weights.shape.LinearIndex({{d, k_y, k_x, s}}); attr->weights.data[index] *= multiplier; } } } if (!attr->bias.data.empty()) { attr->bias.data[d] *= multiplier; } } } void FuseFullyConnectedWithMultiply(const ElementwiseAttributes& mul_attr, FullyConnectedAttributes* attr) { auto mul = absl::get_if<Tensor<Linear, DataType::FLOAT32>>(&mul_attr.param); auto mul_scalar = absl::get_if<float>(&mul_attr.param); for (int d = 0; d < attr->weights.shape.o; ++d) { const float multiplier = mul ? mul->data[d] : *mul_scalar; for (int s = 0; s < attr->weights.shape.i; ++s) { const int index = attr->weights.shape.LinearIndex({{d, 0, 0, s}}); attr->weights.data[index] *= multiplier; } if (!attr->bias.data.empty()) { attr->bias.data[d] *= multiplier; } } } void FuseMultiplyWithConvolution2D(const ElementwiseAttributes& mul_attr, Convolution2DAttributes* attr) { auto mul = absl::get_if<Tensor<Linear, DataType::FLOAT32>>(&mul_attr.param); auto mul_scalar = absl::get_if<float>(&mul_attr.param); for (int s = 0; s < attr->weights.shape.i; ++s) { const float multiplier = mul ? mul->data[s] : *mul_scalar; for (int d = 0; d < attr->weights.shape.o; ++d) { for (int k_y = 0; k_y < attr->weights.shape.h; ++k_y) { for (int k_x = 0; k_x < attr->weights.shape.w; ++k_x) { const int index = attr->weights.shape.LinearIndex({{d, k_y, k_x, s}}); attr->weights.data[index] *= multiplier; } } } } } void FuseMultiplyWithDepthwiseConvolution2D( const ElementwiseAttributes& mul_attr, DepthwiseConvolution2DAttributes* attr) { auto mul = absl::get_if<Tensor<Linear, DataType::FLOAT32>>(&mul_attr.param); auto mul_scalar = absl::get_if<float>(&mul_attr.param); for (int s = 0; s < attr->weights.shape.i; ++s) { const float multiplier = mul ? mul->data[s] : *mul_scalar; for (int g = 0; g < attr->weights.shape.o; ++g) { for (int k_y = 0; k_y < attr->weights.shape.h; ++k_y) { for (int k_x = 0; k_x < attr->weights.shape.w; ++k_x) { const int index = attr->weights.shape.LinearIndex({{g, k_y, k_x, s}}); attr->weights.data[index] *= multiplier; } } } } } void FuseMultiplyWithConvolutionTransposed( const ElementwiseAttributes& mul_attr, ConvolutionTransposedAttributes* attr) { auto mul = absl::get_if<Tensor<Linear, DataType::FLOAT32>>(&mul_attr.param); auto mul_scalar = absl::get_if<float>(&mul_attr.param); for (int s = 0; s < attr->weights.shape.i; ++s) { const float multiplier = mul ? mul->data[s] : *mul_scalar; for (int d = 0; d < attr->weights.shape.o; ++d) { for (int k_y = 0; k_y < attr->weights.shape.h; ++k_y) { for (int k_x = 0; k_x < attr->weights.shape.w; ++k_x) { const int index = attr->weights.shape.LinearIndex({{d, k_y, k_x, s}}); attr->weights.data[index] *= multiplier; } } } } } void FuseMultiplyWithFullyConnected(const ElementwiseAttributes& mul_attr, FullyConnectedAttributes* attr) { auto mul = absl::get_if<Tensor<Linear, DataType::FLOAT32>>(&mul_attr.param); auto mul_scalar = absl::get_if<float>(&mul_attr.param); for (int s = 0; s < attr->weights.shape.i; ++s) { const float multiplier = mul ? mul->data[s] : *mul_scalar; for (int d = 0; d < attr->weights.shape.o; ++d) { const int index = attr->weights.shape.LinearIndex({{d, 0, 0, s}}); attr->weights.data[index] *= multiplier; } } } } }
#include "tensorflow/lite/delegates/gpu/common/transformations/fuse_mul_to_conv.h" #include <any> #include <memory> #include <string> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/status/status.h" #include "tensorflow/lite/delegates/gpu/common/data_type.h" #include "tensorflow/lite/delegates/gpu/common/model.h" #include "tensorflow/lite/delegates/gpu/common/model_transformer.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/common/tensor.h" using ::testing::FloatNear; using ::testing::Pointwise; namespace tflite { namespace gpu { namespace { TEST(MergeConvolutionWithMulTest, Smoke) { GraphFloat32 graph; auto input = graph.NewValue(); input->tensor.shape = BHWC(1, 4, 4, 8); Convolution2DAttributes conv_attr; conv_attr.padding.prepended = HW(0, 0); conv_attr.padding.appended = HW(0, 0); conv_attr.strides = HW(1, 1); conv_attr.dilations = HW(1, 1); conv_attr.weights.shape = OHWI(16, 3, 2, 8); conv_attr.weights.data.resize(conv_attr.weights.shape.DimensionsProduct()); conv_attr.bias.shape = Linear(16); conv_attr.bias.data.resize(16); Tensor<Linear, DataType::FLOAT32> mul_tensor; mul_tensor.shape = Linear(16); mul_tensor.data.resize(16); ElementwiseAttributes mul_attr; mul_attr.param = mul_tensor; auto conv_node = graph.NewNode(); conv_node->operation.type = ToString(OperationType::CONVOLUTION_2D); conv_node->operation.attributes = conv_attr; auto mul_node = graph.NewNode(); mul_node->operation.type = ToString(OperationType::MUL); mul_node->operation.attributes = mul_attr; ASSERT_TRUE(graph.AddConsumer(conv_node->id, input->id).ok()); Value* output = nullptr; ASSERT_TRUE(AddOutput(&graph, mul_node, &output).ok()); output->tensor.shape = BHWC(1, 4, 4, 16); Value* link1 = nullptr; ASSERT_TRUE(ConnectTwoNodes(&graph, conv_node, mul_node, &link1).ok()); link1->tensor.shape = BHWC(1, 4, 4, 16); ASSERT_EQ(2, graph.nodes().size()); ASSERT_EQ(3, graph.values().size()); auto transformation = NewMergeConvolutionWithMul(); ModelTransformer transformer(&graph); transformer.Apply("merge_convolution_with_mul", transformation.get()); EXPECT_EQ(1, graph.nodes().size()); EXPECT_EQ(2, graph.values().size()); EXPECT_EQ(ToString(OperationType::CONVOLUTION_2D), graph.nodes()[0]->operation.type); } TEST(MergeMulWithConvolutionTest, Smoke) { GraphFloat32 graph; auto input = graph.NewValue(); input->tensor.shape = BHWC(1, 4, 4, 8); Tensor<Linear, DataType::FLOAT32> mul_tensor; mul_tensor.shape = Linear(8); mul_tensor.data.resize(8); ElementwiseAttributes mul_attr; mul_attr.param = mul_tensor; Convolution2DAttributes conv_attr; conv_attr.padding.prepended = HW(0, 0); conv_attr.padding.appended = HW(0, 0); conv_attr.strides = HW(1, 1); conv_attr.dilations = HW(1, 1); conv_attr.weights.shape = OHWI(16, 3, 2, 8); conv_attr.weights.data.resize(conv_attr.weights.shape.DimensionsProduct()); conv_attr.bias.shape = Linear(16); conv_attr.bias.data.resize(16); auto conv_node = graph.NewNode(); conv_node->operation.type = ToString(OperationType::CONVOLUTION_2D); conv_node->operation.attributes = conv_attr; auto mul_node = graph.NewNode(); mul_node->operation.type = ToString(OperationType::MUL); mul_node->operation.attributes = mul_attr; ASSERT_TRUE(graph.AddConsumer(mul_node->id, input->id).ok()); Value* output = nullptr; ASSERT_TRUE(AddOutput(&graph, conv_node, &output).ok()); output->tensor.shape = BHWC(1, 4, 4, 16); Value* link1 = nullptr; ASSERT_TRUE(ConnectTwoNodes(&graph, mul_node, conv_node, &link1).ok()); link1->tensor.shape = BHWC(1, 4, 4, 16); ASSERT_EQ(2, graph.nodes().size()); ASSERT_EQ(3, graph.values().size()); auto transformation = NewMergeMulWithConvolution(); ModelTransformer transformer(&graph); transformer.Apply("merge_mul_with_convolution", transformation.get()); EXPECT_EQ(1, graph.nodes().size()); EXPECT_EQ(2, graph.values().size()); EXPECT_EQ(ToString(OperationType::CONVOLUTION_2D), graph.nodes()[0]->operation.type); } TEST(FuseMulAfterConvolution2DTest, Smoke) { Convolution2DAttributes attr; attr.weights.shape = OHWI(2, 1, 2, 2); attr.weights.data = {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f}; attr.bias.shape = Linear(2); attr.bias.data = {1.5f, 2.5f}; Tensor<Linear, DataType::FLOAT32> mul_tensor; mul_tensor.shape = Linear(2); mul_tensor.data = {0.5f, 2.0f}; ElementwiseAttributes mul_attr; mul_attr.param = mul_tensor; FuseConvolution2DWithMultiply(mul_attr, &attr); EXPECT_THAT(attr.weights.data, Pointwise(FloatNear(1e-6), {0.05f, 0.1f, 0.15f, 0.2f, 1.0f, 1.2f, 1.4f, 1.6f})); EXPECT_THAT(attr.bias.data, Pointwise(FloatNear(1e-6), {0.75f, 5.0f})); } TEST(FuseMulAfterDepthwiseConvolution2DTest, Smoke) { DepthwiseConvolution2DAttributes attr; attr.weights.shape = OHWI(2, 1, 2, 2); attr.weights.data = {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f}; attr.bias.shape = Linear(4); attr.bias.data = {1.5f, 2.5f, 1.0f, 2.0f}; Tensor<Linear, DataType::FLOAT32> mul_tensor; mul_tensor.shape = Linear(4); mul_tensor.data = {0.5f, 2.0f, 4.0f, 0.25f}; ElementwiseAttributes mul_attr; mul_attr.param = mul_tensor; FuseDepthwiseConvolution2DWithMultiply(mul_attr, &attr); EXPECT_THAT(attr.weights.data, Pointwise(FloatNear(1e-6), {0.05f, 0.8f, 0.15f, 1.6f, 1.0f, 0.15f, 1.4f, 0.2f})); EXPECT_THAT(attr.bias.data, Pointwise(FloatNear(1e-6), {0.75f, 5.0f, 4.0f, 0.5f})); } TEST(FuseMulAfterConvolutionTransposedTest, Smoke) { ConvolutionTransposedAttributes attr; attr.weights.shape = OHWI(2, 1, 2, 2); attr.weights.data = {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f}; attr.bias.shape = Linear(2); attr.bias.data = {1.5f, 2.5f}; Tensor<Linear, DataType::FLOAT32> mul_tensor; mul_tensor.shape = Linear(2); mul_tensor.data = {0.5f, 2.0f}; ElementwiseAttributes mul_attr; mul_attr.param = mul_tensor; FuseConvolutionTransposedWithMultiply(mul_attr, &attr); EXPECT_THAT(attr.weights.data, Pointwise(FloatNear(1e-6), {0.05f, 0.1f, 0.15f, 0.2f, 1.0f, 1.2f, 1.4f, 1.6f})); EXPECT_THAT(attr.bias.data, Pointwise(FloatNear(1e-6), {0.75f, 5.0f})); } TEST(FuseMulAfterFullyConnectedTest, Smoke) { FullyConnectedAttributes attr; attr.weights.shape = OHWI(2, 1, 1, 2); attr.weights.data = {0.1f, 0.2f, 0.3f, 0.4f}; attr.bias.shape = Linear(2); attr.bias.data = {1.5f, 2.5f}; Tensor<Linear, DataType::FLOAT32> mul_tensor; mul_tensor.shape = Linear(2); mul_tensor.data = {0.5f, 2.0f}; ElementwiseAttributes mul_attr; mul_attr.param = mul_tensor; FuseFullyConnectedWithMultiply(mul_attr, &attr); EXPECT_THAT(attr.weights.data, Pointwise(FloatNear(1e-6), {0.05f, 0.1f, 0.6f, 0.8f})); EXPECT_THAT(attr.bias.data, Pointwise(FloatNear(1e-6), {0.75f, 5.0f})); } TEST(FuseMulBeforeConvolution2DTest, Smoke) { Convolution2DAttributes attr; attr.weights.shape = OHWI(2, 1, 2, 2); attr.weights.data = {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f}; attr.bias.shape = Linear(2); attr.bias.data = {1.5f, 2.5f}; Tensor<Linear, DataType::FLOAT32> mul_tensor; mul_tensor.shape = Linear(2); mul_tensor.data = {0.5f, 2.0f}; ElementwiseAttributes mul_attr; mul_attr.param = mul_tensor; FuseMultiplyWithConvolution2D(mul_attr, &attr); EXPECT_THAT(attr.weights.data, Pointwise(FloatNear(1e-6), {0.05f, 0.4f, 0.15f, 0.8f, 0.25f, 1.2f, 0.35f, 1.6f})); EXPECT_THAT(attr.bias.data, Pointwise(FloatNear(1e-6), {1.5f, 2.5f})); } TEST(FuseMulBeforeDepthwiseConvolution2DTest, Smoke) { DepthwiseConvolution2DAttributes attr; attr.weights.shape = OHWI(2, 1, 2, 2); attr.weights.data = {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f}; attr.bias.shape = Linear(4); attr.bias.data = {1.5f, 2.5f, 1.0f, 2.0f}; Tensor<Linear, DataType::FLOAT32> mul_tensor; mul_tensor.shape = Linear(4); mul_tensor.data = {0.5f, 2.0f, 4.0f, 0.25f}; ElementwiseAttributes mul_attr; mul_attr.param = mul_tensor; FuseMultiplyWithDepthwiseConvolution2D(mul_attr, &attr); EXPECT_THAT(attr.weights.data, Pointwise(FloatNear(1e-6), {0.05f, 0.4f, 0.15f, 0.8f, 0.25f, 1.2f, 0.35f, 1.6f})); EXPECT_THAT(attr.bias.data, Pointwise(FloatNear(1e-6), {1.5f, 2.5f, 1.0f, 2.0f})); } TEST(FuseMulBeforeConvolutionTransposedTest, Smoke) { ConvolutionTransposedAttributes attr; attr.weights.shape = OHWI(2, 1, 2, 2); attr.weights.data = {0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f, 0.8f}; attr.bias.shape = Linear(2); attr.bias.data = {1.5f, 2.5f}; Tensor<Linear, DataType::FLOAT32> mul_tensor; mul_tensor.shape = Linear(2); mul_tensor.data = {0.5f, 2.0f}; ElementwiseAttributes mul_attr; mul_attr.param = mul_tensor; FuseMultiplyWithConvolutionTransposed(mul_attr, &attr); EXPECT_THAT(attr.weights.data, Pointwise(FloatNear(1e-6), {0.05f, 0.4f, 0.15f, 0.8f, 0.25f, 1.2f, 0.35f, 1.6f})); EXPECT_THAT(attr.bias.data, Pointwise(FloatNear(1e-6), {1.5f, 2.5f})); } TEST(FuseMulBeforeFullyConnectedTest, Smoke) { FullyConnectedAttributes attr; attr.weights.shape = OHWI(2, 1, 1, 2); attr.weights.data = {0.1f, 0.2f, 0.3f, 0.4f}; attr.bias.shape = Linear(2); attr.bias.data = {1.5f, 2.5f}; Tensor<Linear, DataType::FLOAT32> mul_tensor; mul_tensor.shape = Linear(2); mul_tensor.data = {0.5f, 2.0f}; ElementwiseAttributes mul_attr; mul_attr.param = mul_tensor; FuseMultiplyWithFullyConnected(mul_attr, &attr); EXPECT_THAT(attr.weights.data, Pointwise(FloatNear(1e-6), {0.05f, 0.4f, 0.15f, 0.8f})); EXPECT_THAT(attr.bias.data, Pointwise(FloatNear(1e-6), {1.5f, 2.5f})); } } } }
1,009
cpp
tensorflow/tensorflow
remove_noop
tensorflow/lite/delegates/gpu/common/transformations/remove_noop.cc
tensorflow/lite/delegates/gpu/common/transformations/remove_noop_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TRANSFORMATIONS_REMOVE_NOOP_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TRANSFORMATIONS_REMOVE_NOOP_H_ #include <memory> #include "tensorflow/lite/delegates/gpu/common/model_transformer.h" namespace tflite { namespace gpu { std::unique_ptr<SequenceTransformation> NewRemoveSingleInputConcat(); std::unique_ptr<SequenceTransformation> NewRemoveSingleInputAdd(); std::unique_ptr<SequenceTransformation> NewRemoveDegenerateUpsampling(); std::unique_ptr<NodeTransformation> NewRemoveIdentityReshape(); std::unique_ptr<NodeTransformation> NewRemoveIdentityStridedSlice(); } } #endif #include "tensorflow/lite/delegates/gpu/common/transformations/remove_noop.h" #include <algorithm> #include <any> #include <functional> #include <iterator> #include <memory> #include <string> #include <utility> #include <variant> #include <vector> #include "absl/memory/memory.h" #include "absl/strings/string_view.h" #include "tensorflow/lite/delegates/gpu/common/data_type.h" #include "tensorflow/lite/delegates/gpu/common/model.h" #include "tensorflow/lite/delegates/gpu/common/model_transformer.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/tensor.h" namespace tflite { namespace gpu { namespace { using ShouldRemoveOperation = std::function<bool(GraphFloat32* graph, Node*)>; class RemoveOperation : public SequenceTransformation { public: explicit RemoveOperation(ShouldRemoveOperation remove_predicate) : remove_predicate_(std::move(remove_predicate)) {} int ExpectedSequenceLength() const final { return 2; } TransformResult ApplyToNodesSequence(const std::vector<Node*>& sequence, GraphFloat32* graph) final { Node* prev_op_node = sequence.front(); Node* op_node = sequence.back(); if (!remove_predicate_(graph, op_node)) { return {TransformStatus::SKIPPED, ""}; } absl::Status status = RemoveFollowingNode(graph, op_node, prev_op_node); if (!status.ok()) { return {TransformStatus::INVALID, "Unable to remove a node: " + std::string(status.message())}; } return {TransformStatus::APPLIED, ""}; } private: ShouldRemoveOperation remove_predicate_; }; } std::unique_ptr<SequenceTransformation> NewRemoveSingleInputConcat() { auto type = ToString(OperationType::CONCAT); return absl::make_unique<RemoveOperation>( [type](GraphFloat32* graph, Node* node) { return type == node->operation.type; }); } std::unique_ptr<SequenceTransformation> NewRemoveSingleInputAdd() { auto type = ToString(OperationType::ADD); return absl::make_unique<RemoveOperation>( [type](GraphFloat32* graph, Node* node) { if (node->operation.type != type) { return false; } auto& attr = absl::any_cast<const ElementwiseAttributes&>( node->operation.attributes); return !absl::holds_alternative<Tensor<HWC, DataType::FLOAT32>>( attr.param) && !absl::holds_alternative<Tensor<Linear, DataType::FLOAT32>>( attr.param) && !absl::holds_alternative<float>(attr.param); }); } std::unique_ptr<SequenceTransformation> NewRemoveDegenerateUpsampling() { auto type = ToString(OperationType::RESIZE); return absl::make_unique<RemoveOperation>( [type](GraphFloat32* graph, Node* node) { if (node->operation.type != type) { return false; } auto inputs = graph->FindInputs(node->id); auto outputs = graph->FindOutputs(node->id); return inputs.size() == 1 && outputs.size() == 1 && inputs[0]->tensor.shape == outputs[0]->tensor.shape; }); } class RemoveIdentityReshape : public NodeTransformation { public: TransformResult ApplyToNode(Node* node, GraphFloat32* graph) final { if (node->operation.type != ToString(OperationType::RESHAPE)) { return {TransformStatus::SKIPPED, ""}; } auto input_shape = graph->FindInputs(node->id)[0]->tensor.shape; const auto& reshape_attr = absl::any_cast<const ReshapeAttributes&>(node->operation.attributes); if (input_shape != reshape_attr.new_shape) { return {TransformStatus::SKIPPED, ""}; } auto output = graph->FindOutputs(node->id)[0]; const auto& graph_outputs = graph->outputs(); if (std::find(graph_outputs.begin(), graph_outputs.end(), output) != graph_outputs.end()) { return {TransformStatus::SKIPPED, "Can not apply transformation when node output is graph output"}; } absl::Status status = RemoveSimpleNodeKeepInput(graph, node); if (!status.ok()) { return {TransformStatus::INVALID, "Unable to remove a node: " + std::string(status.message())}; } return {TransformStatus::APPLIED, "Removed reshape with input_shape == output_shape."}; } }; std::unique_ptr<NodeTransformation> NewRemoveIdentityReshape() { return absl::make_unique<RemoveIdentityReshape>(); } class RemoveIdentityStridedSlice : public NodeTransformation { public: TransformResult ApplyToNode(Node* node, GraphFloat32* graph) final { if (node->operation.type != ToString(OperationType::SLICE)) { return {TransformStatus::SKIPPED, ""}; } auto input = graph->FindInputs(node->id)[0]; auto output = graph->FindOutputs(node->id)[0]; const auto& slice_attr = absl::any_cast<const SliceAttributes&>(node->operation.attributes); if (input->tensor.shape != output->tensor.shape) { return {TransformStatus::SKIPPED, ""}; } if (slice_attr.starts != BHWC(0, 0, 0, 0)) { return {TransformStatus::SKIPPED, ""}; } if (slice_attr.strides != BHWC(1, 1, 1, 1)) { return {TransformStatus::SKIPPED, ""}; } if (slice_attr.ends != output->tensor.shape) { return {TransformStatus::SKIPPED, ""}; } const auto& graph_outputs = graph->outputs(); const auto& graph_inputs = graph->inputs(); const bool input_is_graph_input = std::find(graph_inputs.begin(), graph_inputs.end(), input) != graph_inputs.end(); const bool output_is_graph_output = std::find(graph_outputs.begin(), graph_outputs.end(), output) != graph_outputs.end(); if (input_is_graph_input && output_is_graph_output) { return {TransformStatus::SKIPPED, "Can not apply transformation when node input is graph input and " "node output is graph output"}; } if (output_is_graph_output) { if (graph->FindConsumers(input->id).size() != 1) { return {TransformStatus::SKIPPED, "Can not apply transformation when node output is graph output " "and input consumed by other nodes."}; } absl::Status status = RemoveSimpleNodeKeepOutput(graph, node); if (!status.ok()) { return {TransformStatus::INVALID, "Unable to remove a node: " + std::string(status.message())}; } return {TransformStatus::APPLIED, "Removed identity strided slice."}; } absl::Status status = RemoveSimpleNodeKeepInput(graph, node); if (!status.ok()) { return {TransformStatus::INVALID, "Unable to remove a node: " + std::string(status.message())}; } return {TransformStatus::APPLIED, "Removed identity strided slice."}; } }; std::unique_ptr<NodeTransformation> NewRemoveIdentityStridedSlice() { return absl::make_unique<RemoveIdentityStridedSlice>(); } } }
#include "tensorflow/lite/delegates/gpu/common/transformations/remove_noop.h" #include <any> #include <memory> #include <string> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/status/status.h" #include "tensorflow/lite/delegates/gpu/common/data_type.h" #include "tensorflow/lite/delegates/gpu/common/model.h" #include "tensorflow/lite/delegates/gpu/common/model_transformer.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/common/tensor.h" namespace tflite { namespace gpu { namespace { using ::testing::UnorderedElementsAre; TEST(RemoveSingleInputAdd, Smoke) { GraphFloat32 graph; auto input = graph.NewValue(); auto first_node = graph.NewNode(); ASSERT_TRUE(graph.AddConsumer(first_node->id, input->id).ok()); auto add_node = graph.NewNode(); Value* output = nullptr; ASSERT_TRUE(AddOutput(&graph, add_node, &output).ok()); add_node->operation.type = ToString(OperationType::ADD); add_node->operation.attributes = ElementwiseAttributes(); Value* temp = nullptr; ASSERT_TRUE(ConnectTwoNodes(&graph, first_node, add_node, &temp).ok()); ASSERT_EQ(2, graph.nodes().size()); ASSERT_EQ(3, graph.values().size()); auto transformation = NewRemoveSingleInputAdd(); ModelTransformer transformer(&graph); transformer.Apply("noop", transformation.get()); EXPECT_EQ(1, graph.nodes().size()); ASSERT_EQ(2, graph.values().size()); ASSERT_EQ(first_node, graph.nodes()[0]); ASSERT_EQ(input, graph.values()[0]); ASSERT_EQ(output, graph.values()[1]); } TEST(RemoveSingleInputAdd, DoNotTrigger_TensorHWC) { GraphFloat32 graph; auto input = graph.NewValue(); auto first_node = graph.NewNode(); ASSERT_TRUE(graph.AddConsumer(first_node->id, input->id).ok()); auto add_node = graph.NewNode(); Value* output = nullptr; ASSERT_TRUE(AddOutput(&graph, add_node, &output).ok()); add_node->operation.type = ToString(OperationType::ADD); ElementwiseAttributes attr; attr.param = Tensor<HWC, DataType::FLOAT32>(); add_node->operation.attributes = attr; Value* temp = nullptr; ASSERT_TRUE(ConnectTwoNodes(&graph, first_node, add_node, &temp).ok()); ASSERT_EQ(2, graph.nodes().size()); ASSERT_EQ(3, graph.values().size()); auto transformation = NewRemoveSingleInputAdd(); ModelTransformer transformer(&graph); transformer.Apply("noop", transformation.get()); EXPECT_EQ(2, graph.nodes().size()); ASSERT_EQ(3, graph.values().size()); } TEST(RemoveSingleInputAdd, DoNotTrigger_LinearTensor) { GraphFloat32 graph; auto input = graph.NewValue(); auto first_node = graph.NewNode(); ASSERT_TRUE(graph.AddConsumer(first_node->id, input->id).ok()); auto add_node = graph.NewNode(); Value* output = nullptr; ASSERT_TRUE(AddOutput(&graph, add_node, &output).ok()); add_node->operation.type = ToString(OperationType::ADD); ElementwiseAttributes attr; attr.param = Tensor<Linear, DataType::FLOAT32>(); add_node->operation.attributes = attr; Value* temp = nullptr; ASSERT_TRUE(ConnectTwoNodes(&graph, first_node, add_node, &temp).ok()); ASSERT_EQ(2, graph.nodes().size()); ASSERT_EQ(3, graph.values().size()); auto transformation = NewRemoveSingleInputAdd(); ModelTransformer transformer(&graph); transformer.Apply("noop", transformation.get()); EXPECT_EQ(2, graph.nodes().size()); ASSERT_EQ(3, graph.values().size()); } TEST(RemoveSingleInputAdd, DoNotTrigger_Scalar) { GraphFloat32 graph; auto input = graph.NewValue(); auto first_node = graph.NewNode(); ASSERT_TRUE(graph.AddConsumer(first_node->id, input->id).ok()); auto add_node = graph.NewNode(); Value* output = nullptr; ASSERT_TRUE(AddOutput(&graph, add_node, &output).ok()); add_node->operation.type = ToString(OperationType::ADD); ElementwiseAttributes attr; attr.param = 0.5f; add_node->operation.attributes = attr; Value* temp = nullptr; ASSERT_TRUE(ConnectTwoNodes(&graph, first_node, add_node, &temp).ok()); ASSERT_EQ(2, graph.nodes().size()); ASSERT_EQ(3, graph.values().size()); auto transformation = NewRemoveSingleInputAdd(); ModelTransformer transformer(&graph); transformer.Apply("noop", transformation.get()); EXPECT_EQ(2, graph.nodes().size()); ASSERT_EQ(3, graph.values().size()); } TEST(RemoveSingleInputAdd, DoNotTrigger_Multiple) { GraphFloat32 graph; auto input = graph.NewValue(); auto node_a = graph.NewNode(); auto node_b = graph.NewNode(); ASSERT_TRUE(graph.AddConsumer(node_a->id, input->id).ok()); ASSERT_TRUE(graph.AddConsumer(node_b->id, input->id).ok()); auto add_node = graph.NewNode(); Value* output = nullptr; ASSERT_TRUE(AddOutput(&graph, add_node, &output).ok()); add_node->operation.type = ToString(OperationType::ADD); Value* temp_a = nullptr; Value* temp_b = nullptr; ASSERT_TRUE(ConnectTwoNodes(&graph, node_a, add_node, &temp_a).ok()); ASSERT_TRUE(ConnectTwoNodes(&graph, node_b, add_node, &temp_b).ok()); ASSERT_EQ(3, graph.nodes().size()); ASSERT_EQ(4, graph.values().size()); auto transformation = NewRemoveSingleInputAdd(); ModelTransformer transformer(&graph); transformer.Apply("noop", transformation.get()); ASSERT_EQ(3, graph.nodes().size()); ASSERT_EQ(4, graph.values().size()); } TEST(RemoveDegenerateUpsampling, Smoke) { GraphFloat32 graph; auto input = graph.NewValue(); auto first_node = graph.NewNode(); ASSERT_TRUE(graph.AddConsumer(first_node->id, input->id).ok()); auto node_to_remove = graph.NewNode(); Value* output = nullptr; ASSERT_TRUE(AddOutput(&graph, node_to_remove, &output).ok()); output->tensor.shape = BHWC(1, 5, 5, 1); node_to_remove->operation.type = ToString(OperationType::RESIZE); Resize2DAttributes attr; attr.new_shape = HW(5, 5); attr.type = SamplingType::BILINEAR; node_to_remove->operation.attributes = attr; Value* link = nullptr; ASSERT_TRUE(ConnectTwoNodes(&graph, first_node, node_to_remove, &link).ok()); link->tensor.shape = output->tensor.shape; ASSERT_EQ(2, graph.nodes().size()); ASSERT_EQ(3, graph.values().size()); auto transformation = NewRemoveDegenerateUpsampling(); ModelTransformer transformer(&graph); transformer.Apply("noop", transformation.get()); ASSERT_EQ(1, graph.nodes().size()); ASSERT_EQ(2, graph.values().size()); EXPECT_EQ(first_node, graph.nodes()[0]); EXPECT_EQ(input, graph.values()[0]); EXPECT_EQ(output, graph.values()[1]); } TEST(RemoveIdentityReshape, Smoke) { GraphFloat32 graph; Node* simple_node = graph.NewNode(); Node* producer_node = graph.NewNode(); Node* consumer_node = graph.NewNode(); Value* graph_input = graph.NewValue(); Value* graph_output = graph.NewValue(); Value* value0 = graph.NewValue(); Value* value1 = graph.NewValue(); value0->tensor.shape = BHWC(1, 1, 1, 11); simple_node->operation.type = ToString(OperationType::RESHAPE); ReshapeAttributes attr; attr.new_shape = BHWC(1, 1, 1, 11); simple_node->operation.attributes = attr; ASSERT_TRUE(graph.AddConsumer(producer_node->id, graph_input->id).ok()); ASSERT_TRUE(graph.SetProducer(producer_node->id, value0->id).ok()); ASSERT_TRUE(graph.AddConsumer(simple_node->id, value0->id).ok()); ASSERT_TRUE(graph.SetProducer(simple_node->id, value1->id).ok()); ASSERT_TRUE(graph.AddConsumer(consumer_node->id, value1->id).ok()); ASSERT_TRUE(graph.SetProducer(consumer_node->id, graph_output->id).ok()); EXPECT_THAT(graph.inputs(), UnorderedElementsAre(graph_input)); EXPECT_THAT(graph.outputs(), UnorderedElementsAre(graph_output)); EXPECT_THAT(graph.nodes(), UnorderedElementsAre(simple_node, producer_node, consumer_node)); auto transformation = NewRemoveIdentityReshape(); ModelTransformer transformer(&graph); transformer.Apply("noop", transformation.get()); EXPECT_THAT(graph.inputs(), UnorderedElementsAre(graph_input)); EXPECT_THAT(graph.outputs(), UnorderedElementsAre(graph_output)); EXPECT_THAT(graph.nodes(), UnorderedElementsAre(producer_node, consumer_node)); EXPECT_THAT(graph.values(), UnorderedElementsAre(graph_input, graph_output, value0)); } TEST(RemoveIdentityStridedSlice, Smoke) { GraphFloat32 graph; Node* simple_node = graph.NewNode(); Node* producer_node = graph.NewNode(); Node* consumer_node = graph.NewNode(); Value* graph_input = graph.NewValue(); Value* graph_output = graph.NewValue(); Value* value0 = graph.NewValue(); Value* value1 = graph.NewValue(); value0->tensor.shape = BHWC(1, 1, 1, 11); value1->tensor.shape = BHWC(1, 1, 1, 11); simple_node->operation.type = ToString(OperationType::SLICE); SliceAttributes attr; attr.starts = BHWC(0, 0, 0, 0); attr.strides = BHWC(1, 1, 1, 1); attr.ends = BHWC(1, 1, 1, 11); simple_node->operation.attributes = attr; ASSERT_TRUE(graph.AddConsumer(producer_node->id, graph_input->id).ok()); ASSERT_TRUE(graph.SetProducer(producer_node->id, value0->id).ok()); ASSERT_TRUE(graph.AddConsumer(simple_node->id, value0->id).ok()); ASSERT_TRUE(graph.SetProducer(simple_node->id, value1->id).ok()); ASSERT_TRUE(graph.AddConsumer(consumer_node->id, value1->id).ok()); ASSERT_TRUE(graph.SetProducer(consumer_node->id, graph_output->id).ok()); EXPECT_THAT(graph.inputs(), UnorderedElementsAre(graph_input)); EXPECT_THAT(graph.outputs(), UnorderedElementsAre(graph_output)); EXPECT_THAT(graph.nodes(), UnorderedElementsAre(simple_node, producer_node, consumer_node)); auto transformation = NewRemoveIdentityStridedSlice(); ModelTransformer transformer(&graph); transformer.Apply("noop", transformation.get()); EXPECT_THAT(graph.inputs(), UnorderedElementsAre(graph_input)); EXPECT_THAT(graph.outputs(), UnorderedElementsAre(graph_output)); EXPECT_THAT(graph.nodes(), UnorderedElementsAre(producer_node, consumer_node)); EXPECT_THAT(graph.values(), UnorderedElementsAre(graph_input, graph_output, value0)); } TEST(RemoveIdentityStridedSlice, OutputIsGraphOutputInputConsumedByFewNodes) { GraphFloat32 graph; Node* first_node = graph.NewNode(); Node* slice_node = graph.NewNode(); Node* second_node = graph.NewNode(); Value* value0 = graph.NewValue(); Value* value1 = graph.NewValue(); Value* value2 = graph.NewValue(); Value* value3 = graph.NewValue(); value0->tensor.shape = BHWC(1, 1, 1, 11); value1->tensor.shape = BHWC(1, 1, 1, 11); value2->tensor.shape = BHWC(1, 1, 1, 11); value3->tensor.shape = BHWC(1, 1, 1, 11); slice_node->operation.type = ToString(OperationType::SLICE); SliceAttributes attr; attr.starts = BHWC(0, 0, 0, 0); attr.strides = BHWC(1, 1, 1, 1); attr.ends = BHWC(1, 1, 1, 11); slice_node->operation.attributes = attr; ASSERT_TRUE(graph.AddConsumer(first_node->id, value0->id).ok()); ASSERT_TRUE(graph.SetProducer(first_node->id, value1->id).ok()); ASSERT_TRUE(graph.AddConsumer(slice_node->id, value1->id).ok()); ASSERT_TRUE(graph.AddConsumer(second_node->id, value1->id).ok()); ASSERT_TRUE(graph.SetProducer(slice_node->id, value2->id).ok()); ASSERT_TRUE(graph.SetProducer(second_node->id, value3->id).ok()); EXPECT_THAT(graph.inputs(), UnorderedElementsAre(value0)); EXPECT_THAT(graph.outputs(), UnorderedElementsAre(value2, value3)); EXPECT_THAT(graph.nodes(), UnorderedElementsAre(first_node, slice_node, second_node)); auto transformation = NewRemoveIdentityStridedSlice(); ModelTransformer transformer(&graph); transformer.Apply("noop", transformation.get()); EXPECT_THAT(graph.inputs(), UnorderedElementsAre(value0)); EXPECT_THAT(graph.outputs(), UnorderedElementsAre(value2, value3)); EXPECT_THAT(graph.nodes(), UnorderedElementsAre(first_node, slice_node, second_node)); EXPECT_THAT(graph.values(), UnorderedElementsAre(value0, value1, value2, value3)); } } } }
1,010
cpp
tensorflow/tensorflow
add_quant_adjustments
tensorflow/lite/delegates/gpu/common/transformations/add_quant_adjustments.cc
tensorflow/lite/delegates/gpu/common/transformations/add_quant_adjustments_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TRANSFORMATIONS_ADD_QUANT_ADJUSTMENTS_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TRANSFORMATIONS_ADD_QUANT_ADJUSTMENTS_H_ #include <memory> #include "tensorflow/lite/delegates/gpu/common/model_transformer.h" namespace tflite { namespace gpu { std::unique_ptr<NodeTransformation> NewAddQuantAdjustments(); } } #endif #include "tensorflow/lite/delegates/gpu/common/transformations/add_quant_adjustments.h" #include <memory> #include <optional> #include <string> #include <vector> #include "absl/memory/memory.h" #include "absl/strings/str_cat.h" #include "absl/types/any.h" #include "tensorflow/lite/delegates/gpu/common/model.h" #include "tensorflow/lite/delegates/gpu/common/model_transformer.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/tensor.h" namespace tflite { namespace gpu { class AddQuantAdjustments : public NodeTransformation { public: TransformResult ApplyToNode(Node* node, GraphFloat32* graph) final { if (node->operation.type == ToString(OperationType::QUANTIZE_AND_DEQUANTIZE)) { return {TransformStatus::SKIPPED, ""}; } bool transform_applied = false; auto node_outputs = graph->FindOutputs(node->id); for (auto output_value : node_outputs) { if (!output_value->quant_params) continue; auto consumers = graph->FindConsumers(output_value->id); if (consumers.empty()) { continue; } Node* quant_and_dequant_node; absl::Status status = graph->InsertNodeAfter(node->id, &quant_and_dequant_node); if (!status.ok()) { return {TransformStatus::INVALID, "Could not insert new node."}; } quant_and_dequant_node->operation.type = ToString(OperationType::QUANTIZE_AND_DEQUANTIZE); QuantizeAndDequantizeAttributes attr; attr.min = output_value->quant_params.value().min; attr.max = output_value->quant_params.value().max; attr.scale = output_value->quant_params.value().scale; quant_and_dequant_node->operation.attributes = attr; Value* adjusted_value = graph->NewValue(); adjusted_value->tensor = output_value->tensor; status = graph->SetProducer(quant_and_dequant_node->id, adjusted_value->id); if (!status.ok()) { return {TransformStatus::INVALID, "Could not create QuantizeAndDequantize node."}; } for (auto& consumer : consumers) { status = graph->ReplaceInput(consumer->id, output_value->id, adjusted_value->id); if (!status.ok()) { return {TransformStatus::INVALID, absl::StrCat( "Failed to associate quant-adjusted value for consumer: ", status.message())}; } } status = graph->AddConsumer(quant_and_dequant_node->id, output_value->id); if (!status.ok()) { return {TransformStatus::INVALID, absl::StrCat( "Could not associate output to QuantizeAndDequantize: ", status.message())}; } output_value->quant_params.reset(); transform_applied = true; } if (transform_applied) { return {TransformStatus::APPLIED, ""}; } return {TransformStatus::SKIPPED, ""}; } }; std::unique_ptr<NodeTransformation> NewAddQuantAdjustments() { return std::make_unique<AddQuantAdjustments>(); } } }
#include "tensorflow/lite/delegates/gpu/common/transformations/add_quant_adjustments.h" #include <memory> #include <string> #include <vector> #include <gtest/gtest.h> #include "absl/status/status.h" #include "absl/types/any.h" #include "absl/types/optional.h" #include "tensorflow/lite/delegates/gpu/common/data_type.h" #include "tensorflow/lite/delegates/gpu/common/model.h" #include "tensorflow/lite/delegates/gpu/common/model_transformer.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/common/tensor.h" namespace tflite { namespace gpu { namespace { void AddQuantParams(absl::optional<QuantizationParams>* params, float min, float max, float scale) { params->emplace(); params->value().min = min; params->value().max = max; params->value().scale = scale; } TEST(AddQuantAdjustments, OneNode) { GraphFloat32 graph; auto input = graph.NewValue(); input->tensor.shape = BHWC(1, 4, 4, 8); AddQuantParams(&input->quant_params, 0.0, 1.0, 0.004); Tensor<Linear, DataType::FLOAT32> add_tensor; add_tensor.shape = Linear(8); add_tensor.data.resize(8); ElementwiseAttributes add_attr; add_attr.param = add_tensor; auto add_node = graph.NewNode(); add_node->operation.type = ToString(OperationType::ADD); add_node->operation.attributes = add_attr; ASSERT_TRUE(graph.AddConsumer(add_node->id, input->id).ok()); Value* output = nullptr; AddQuantParams(&input->quant_params, 0.0, 2.0, 0.008); ASSERT_TRUE(AddOutput(&graph, add_node, &output).ok()); output->tensor.shape = BHWC(1, 4, 4, 8); ASSERT_EQ(1, graph.nodes().size()); ASSERT_EQ(2, graph.values().size()); auto transformation = NewAddQuantAdjustments(); ModelTransformer transformer(&graph); transformer.Apply("add_quant_adjustments", transformation.get()); EXPECT_EQ(1, graph.nodes().size()); EXPECT_EQ(2, graph.values().size()); } TEST(AddQuantAdjustments, GeneralCase) { GraphFloat32 graph; auto input = graph.NewValue(); input->tensor.shape = BHWC(1, 4, 4, 8); AddQuantParams(&input->quant_params, 0.0, 1.0, 0.004); Tensor<Linear, DataType::FLOAT32> add_tensor; add_tensor.shape = Linear(8); add_tensor.data.resize(8); ElementwiseAttributes add_attr; add_attr.param = add_tensor; auto add1_node = graph.NewNode(); add1_node->operation.type = ToString(OperationType::ADD); add1_node->operation.attributes = add_attr; QuantizeAndDequantizeAttributes quant_attr; quant_attr.min = -1.0; quant_attr.max = 1.0; quant_attr.scale = 0.008; auto quant_node = graph.NewNode(); quant_node->operation.type = ToString(OperationType::QUANTIZE_AND_DEQUANTIZE); quant_node->operation.attributes = quant_attr; auto add2_node = graph.NewNode(); add2_node->operation.type = ToString(OperationType::ADD); ASSERT_TRUE(graph.AddConsumer(add1_node->id, input->id).ok()); Value* link1 = nullptr; ASSERT_TRUE(ConnectTwoNodes(&graph, add1_node, quant_node, &link1).ok()); AddQuantParams(&link1->quant_params, 0.0, 2.0, 0.008); link1->tensor.shape = BHWC(1, 4, 4, 8); ASSERT_TRUE(graph.AddConsumer(add2_node->id, link1->id).ok()); Value* link2 = nullptr; ASSERT_TRUE(ConnectTwoNodes(&graph, quant_node, add2_node, &link2).ok()); AddQuantParams(&link2->quant_params, -1.0, 1.0, 0.008); link2->tensor.shape = BHWC(1, 4, 4, 8); Value* output = nullptr; ASSERT_TRUE(AddOutput(&graph, add2_node, &output).ok()); AddQuantParams(&output->quant_params, -1.0, 1.0, 0.008); output->tensor.shape = BHWC(1, 4, 4, 8); ASSERT_EQ(3, graph.nodes().size()); ASSERT_EQ(4, graph.values().size()); auto transformation = NewAddQuantAdjustments(); ModelTransformer transformer(&graph); transformer.Apply("add_quant_adjustments", transformation.get()); EXPECT_EQ(4, graph.nodes().size()); EXPECT_EQ(5, graph.values().size()); EXPECT_EQ(ToString(OperationType::ADD), graph.nodes()[0]->operation.type); EXPECT_EQ(ToString(OperationType::QUANTIZE_AND_DEQUANTIZE), graph.nodes()[1]->operation.type); EXPECT_EQ(ToString(OperationType::QUANTIZE_AND_DEQUANTIZE), graph.nodes()[2]->operation.type); EXPECT_EQ(quant_node->id, graph.nodes()[2]->id); EXPECT_EQ(ToString(OperationType::ADD), graph.nodes()[3]->operation.type); auto new_quant_attr = absl::any_cast<QuantizeAndDequantizeAttributes>( graph.nodes()[1]->operation.attributes); EXPECT_EQ(0.0, new_quant_attr.min); EXPECT_EQ(2.0, new_quant_attr.max); const auto& new_quant_consumers = graph.FindConsumers(graph.values()[4]->id); EXPECT_EQ(2, new_quant_consumers.size()); EXPECT_EQ(quant_node, new_quant_consumers[0]); EXPECT_EQ(add2_node, new_quant_consumers[1]); transformer.Apply("add_quant_adjustments", transformation.get()); EXPECT_EQ(4, graph.nodes().size()); EXPECT_EQ(5, graph.values().size()); } } } }
1,011
cpp
tensorflow/tensorflow
convolution_transposed_3x3
tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_3x3.cc
tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_3x3_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_CONVOLUTION_TRANSPOSED_3X3_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_CONVOLUTION_TRANSPOSED_3X3_H_ #include <string> #include <vector> #include "tensorflow/lite/delegates/gpu/common/data_type.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/task/buffer_desc.h" #include "tensorflow/lite/delegates/gpu/common/task/gpu_operation.h" #include "tensorflow/lite/delegates/gpu/common/task/tensor_desc.h" #include "tensorflow/lite/delegates/gpu/common/task/weights_conversion.h" #include "tensorflow/lite/delegates/gpu/common/task/weights_layout.h" #include "tensorflow/lite/delegates/gpu/common/tensor.h" #include "tensorflow/lite/delegates/gpu/common/types.h" namespace tflite { namespace gpu { class ConvolutionTransposed3x3 : public GPUOperation { public: ConvolutionTransposed3x3() = default; void GetPossibleKernelWorkGroups( TuningType tuning_type, const GpuInfo& gpu_info, const KernelInfo& kernel_info, std::vector<int3>* work_groups) const override; absl::Status BindArguments(ArgumentsBinder* args) override; int3 GetGridSize() const override; ConvolutionTransposed3x3(ConvolutionTransposed3x3&& operation) = default; ConvolutionTransposed3x3& operator=(ConvolutionTransposed3x3&& operation) = default; ConvolutionTransposed3x3(const ConvolutionTransposed3x3&) = delete; ConvolutionTransposed3x3& operator=(const ConvolutionTransposed3x3&) = delete; WeightsDescription GetWeightsDescription() const { WeightsDescription desc; desc.type = DeduceDataTypeFromPrecision(definition_.precision); desc.layout = weights_layout_; desc.spatial_remap = GetSpatialWeightsRemap(); return desc; } enum class WeightsUploadType { LOCAL_MEM_ASYNC, LOCAL_MEM_BY_THREADS, GLOBAL_MEM, CONSTANT_MEM, }; private: ConvolutionTransposed3x3(const OperationDef& definition, const GpuInfo& gpu_info, int2 padding); friend ConvolutionTransposed3x3 CreateConvolutionTransposed3x3( const GpuInfo& gpu_info, const OperationDef& definition, const ConvolutionTransposedAttributes& attr); friend ConvolutionTransposed3x3 CreateConvolutionTransposed3x3DynamicWeights( const GpuInfo& gpu_info, const OperationDef& definition, const ConvolutionTransposedAttributes& attr); void UploadWeights( const tflite::gpu::Tensor<OHWI, DataType::FLOAT32>& weights); std::vector<int> GetSpatialWeightsRemap() const; std::string GenerateConvolutionTransposedCode( const GpuInfo& gpu_info, const OperationDef& op_def, ConvolutionTransposed3x3::WeightsUploadType weights_upload_type, int2 padding, int3 work_group_launch_order); int2 padding_; WeightsUploadType weights_upload_type_; WeightsLayout weights_layout_; }; bool IsConvolutionTransposed3x3Supported( const OperationDef& definition, const ConvolutionTransposedAttributes& attr); ConvolutionTransposed3x3 CreateConvolutionTransposed3x3( const GpuInfo& gpu_info, const OperationDef& definition, const ConvolutionTransposedAttributes& attr); ConvolutionTransposed3x3 CreateConvolutionTransposed3x3DynamicWeights( const GpuInfo& gpu_info, const OperationDef& definition, const ConvolutionTransposedAttributes& attr); } } #endif #include "tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_3x3.h" #include <memory> #include <string> #include <utility> #include <vector> #include "tensorflow/lite/delegates/gpu/common/task/work_group_picking.h" namespace tflite { namespace gpu { ConvolutionTransposed3x3::ConvolutionTransposed3x3( const OperationDef& definition, const GpuInfo& gpu_info, int2 padding) : GPUOperation(definition), padding_(padding) { work_group_size_ = int3(8, 4, 1); work_group_launch_order_ = int3(2, 0, 1); if (gpu_info.IsApple()) { if (gpu_info.apple_info.IsBionic()) { weights_upload_type_ = WeightsUploadType::GLOBAL_MEM; } else { weights_upload_type_ = WeightsUploadType::LOCAL_MEM_BY_THREADS; } } else if (gpu_info.IsPowerVR()) { weights_upload_type_ = WeightsUploadType::LOCAL_MEM_ASYNC; } else if (gpu_info.IsNvidia() || gpu_info.IsIntel()) { weights_upload_type_ = WeightsUploadType::LOCAL_MEM_BY_THREADS; } else if (gpu_info.IsAMD()) { weights_upload_type_ = WeightsUploadType::CONSTANT_MEM; } else { weights_upload_type_ = WeightsUploadType::GLOBAL_MEM; } if (gpu_info.IsApple()) { weights_layout_ = WeightsLayout::kOICustomSpatialO4I4; } else { weights_layout_ = WeightsLayout::kOICustomSpatialI4O4; } code_ = GenerateConvolutionTransposedCode(gpu_info, definition_, weights_upload_type_, padding_, work_group_launch_order_); if (definition_.precision == CalculationsPrecision::F16 && gpu_info.IsPowerVR()) { compiler_options_.push_back(CompilerOptions::kClFastRelaxedMath); } } std::string ConvolutionTransposed3x3::GenerateConvolutionTransposedCode( const GpuInfo& gpu_info, const OperationDef& op_def, ConvolutionTransposed3x3::WeightsUploadType weights_upload_type, int2 padding, int3 work_group_launch_order) { auto src_desc = op_def.src_tensors[0]; AddSrcTensor("src_tensor", src_desc); AddDstTensor("dst_tensor", op_def.src_tensors[0]); if (op_def.src_tensors.size() == 2) { BufferDescriptor desc; desc.element_type = op_def.src_tensors[1].GetDataType(); desc.element_size = 4; desc.memory_type = weights_upload_type == ConvolutionTransposed3x3::WeightsUploadType::CONSTANT_MEM ? MemoryType::CONSTANT : MemoryType::GLOBAL; AddSrcBuffer("weights", desc); } args_.AddInt("filter_offset"); args_.AddInt("padding_x"); args_.AddInt("padding_y"); const bool need_local_mem = weights_upload_type == ConvolutionTransposed3x3::WeightsUploadType::LOCAL_MEM_BY_THREADS || weights_upload_type == ConvolutionTransposed3x3::WeightsUploadType::LOCAL_MEM_ASYNC; std::string c; if (GetWeightsDescription().IsI4O4()) { switch (op_def.precision) { case CalculationsPrecision::F32: case CalculationsPrecision::F16: c += "#define CONV(R, SRC, F) \\\n"; c += " R += SRC.x * weights_cache[F]; \\\n"; c += " R += SRC.y * weights_cache[F + 1]; \\\n"; c += " R += SRC.z * weights_cache[F + 2]; \\\n"; c += " R += SRC.w * weights_cache[F + 3]; \n"; break; case CalculationsPrecision::F32_F16: c += "#define CONV(R, SRC, F) \\\n"; c += " R += TO_ACCUM_TYPE(SRC.x * weights_cache[F] + SRC.y * " "weights_cache[F + 1] + SRC.z * weights_cache[F + 2] + SRC.w * " "weights_cache[F + 3]);\n"; break; } } else { c += "#define CONV(R, SRC, F) \\\n"; c += " R.x += dot(SRC, weights_cache[F]); \\\n"; c += " R.y += dot(SRC, weights_cache[F + 1]); \\\n"; c += " R.z += dot(SRC, weights_cache[F + 2]); \\\n"; c += " R.w += dot(SRC, weights_cache[F + 3]); \n"; } const int wg_total_size = work_group_size_.x * work_group_size_.y * work_group_size_.z; const std::string barrier = wg_total_size == 32 && gpu_info.IsWaveSizeEqualTo32() ? "SIMD_LOCAL_MEM_BARRIER" : "LOCAL_MEM_BARRIER"; const std::string weights_space = weights_upload_type == ConvolutionTransposed3x3::WeightsUploadType::CONSTANT_MEM ? "__constant" : "__global"; if (gpu_info.IsApiOpenCl()) { c += "__attribute__((reqd_work_group_size(8, 4, 1)))\n"; } c += "MAIN_FUNCTION($0) {\n"; int3 launch_remap; launch_remap[work_group_launch_order.x] = 0; launch_remap[work_group_launch_order.y] = 1; launch_remap[work_group_launch_order.z] = 2; auto GetGlobalID = [&](int id) { std::string result; const std::string sid = std::to_string(id); if (work_group_launch_order[id] == id) { return "GLOBAL_ID_" + sid; } else { return "GROUP_ID_" + std::to_string(launch_remap[id]) + " * GROUP_SIZE_" + sid + " + LOCAL_ID_" + sid; } }; if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) { c += " int linear_id = " + GetGlobalID(0) + ";\n"; c += " int X = linear_id / args.dst_tensor.Batch();\n"; c += " int B = linear_id % args.dst_tensor.Batch();\n"; c += " args.src_tensor.SetBatchRef(B);\n"; c += " args.dst_tensor.SetBatchRef(B);\n"; } else { c += " int X = " + GetGlobalID(0) + ";\n"; } c += " int DST_X = X * 2;\n"; c += " int SRC_X = X + args.padding_x;\n"; c += " int Y = " + GetGlobalID(1) + ";\n"; c += " int DST_Y = Y * 2;\n"; c += " int SRC_Y = Y + args.padding_y;\n"; c += " int Z = " + GetGlobalID(2) + ";\n"; if (!need_local_mem) { c += " if (DST_X >= args.dst_tensor.Width() || DST_Y >= " "args.dst_tensor.Height() || Z >= args.dst_tensor.Slices()) return;\n"; } c += " ACCUM_FLT4 r0 = INIT_ACCUM_FLT4(0.0f);\n"; c += " ACCUM_FLT4 r1 = INIT_ACCUM_FLT4(0.0f);\n"; c += " ACCUM_FLT4 r2 = INIT_ACCUM_FLT4(0.0f);\n"; c += " ACCUM_FLT4 r3 = INIT_ACCUM_FLT4(0.0f);\n"; c += " int f_offset = Z * args.filter_offset;\n"; if (need_local_mem) { c += " __local FLT4 weights_cache[36];\n"; } if (weights_upload_type == ConvolutionTransposed3x3::WeightsUploadType::LOCAL_MEM_BY_THREADS) { c += " int local_id = LOCAL_ID_1 * 8 + LOCAL_ID_0;\n"; } if (!src_desc.SupportsZeroClamp(Axis::WIDTH, gpu_info)) { c += " bool in_x0 = SRC_X >= 0 && SRC_X < args.src_tensor.Width();\n"; c += " bool in_x1 = SRC_X + 1 >= 0 && SRC_X + 1 < " "args.src_tensor.Width();\n"; } if (!src_desc.SupportsZeroClamp(Axis::HEIGHT, gpu_info)) { c += " bool in_y0 = SRC_Y >= 0 && SRC_Y < args.src_tensor.Height();\n"; c += " bool in_y1 = SRC_Y + 1 >= 0 && SRC_Y + 1 < " "args.src_tensor.Height();\n"; } auto generate_check = [&](int x, int y) { std::string check; const std::vector<Axis> axes{Axis::WIDTH, Axis::HEIGHT}; const std::vector<std::string> names{"in_x" + std::to_string(x), "in_y" + std::to_string(y)}; for (int i = 0; i < axes.size(); ++i) { const auto& axis = axes[i]; if (src_desc.HasAxis(axis) && !src_desc.SupportsZeroClamp(axis, gpu_info)) { if (!check.empty()) { check += " && "; } check += names[i]; } } return check; }; if (src_desc.IsLinear()) { if (src_desc.ReturnsZeroForNegOneRead(gpu_info)) { c += " int addr_0 = args.src_tensor.GetAddress(SRC_X, SRC_Y, 0);\n"; c += " int addr_1 = args.src_tensor.GetAddress(SRC_X + 1, SRC_Y, 0);\n"; c += " int addr_2 = args.src_tensor.GetAddress(SRC_X, SRC_Y + 1, 0);\n"; c += " int addr_3 = args.src_tensor.GetAddress(SRC_X+1, SRC_Y+1, 0);\n"; c += " addr_0 = select(-1, addr_0, (in_x0 && in_y0));\n"; c += " addr_1 = select(-1, addr_1, (in_x1 && in_y0));\n"; c += " addr_2 = select(-1, addr_2, (in_x0 && in_y1));\n"; c += " addr_3 = select(-1, addr_3, (in_x1 && in_y1));\n"; c += " int dz_0 = select(0, args.src_tensor.SliceStride(), (in_x0 && " "in_y0));\n"; c += " int dz_1 = select(0, args.src_tensor.SliceStride(), (in_x1 && " "in_y0));\n"; c += " int dz_2 = select(0, args.src_tensor.SliceStride(), (in_x0 && " "in_y1));\n"; c += " int dz_3 = select(0, args.src_tensor.SliceStride(), (in_x1 && " "in_y1));\n"; } else { c += " int xc0 = clamp(SRC_X, 0, args.src_tensor.Width() - 1);\n"; c += " int xc1 = clamp(SRC_X + 1, 0, args.src_tensor.Width() - 1);\n"; c += " int yc0 = clamp(SRC_Y, 0, args.src_tensor.Height() - 1);\n"; c += " int yc1 = clamp(SRC_Y + 1, 0, args.src_tensor.Height() - 1);\n"; c += " int addr_0 = args.src_tensor.GetAddress(xc0, yc0, 0);\n"; c += " int addr_1 = args.src_tensor.GetAddress(xc1, yc0, 0);\n"; c += " int addr_2 = args.src_tensor.GetAddress(xc0, yc1, 0);\n"; c += " int addr_3 = args.src_tensor.GetAddress(xc1, yc1, 0);\n"; c += " int dz = args.src_tensor.SliceStride();\n"; } } auto read_src = [&](int x, int y) { if (src_desc.IsLinear()) { const std::string id = std::to_string(y * 2 + x); const std::string addr = "addr_" + std::to_string(y * 2 + x); if (src_desc.ReturnsZeroForNegOneRead(gpu_info)) { return "args.src_tensor.Read(" + addr + "); " + addr + " += dz_" + id + ";\n"; } else { return "args.src_tensor.Read(" + addr + ") * INIT_FLT(in_x" + std::to_string(x) + " && in_y" + std::to_string(y) + "); " + addr + " += dz;\n"; } } else { std::string check = generate_check(x, y); if (!check.empty()) { check = " * INIT_FLT(" + check + ")"; } return "args.src_tensor.Read(SRC_X + " + std::to_string(x) + ", SRC_Y + " + std::to_string(y) + ", s)" + check + ";\n"; } }; const int padding_x_rem = abs(padding.x) % 2; const int padding_y_rem = abs(padding.y) % 2; std::vector<std::pair<int, int>> permutation; if (padding_x_rem == 1 && padding_y_rem == 1) { permutation = {{0, 0}, {1, 0}, {1, 1}, {2, 0}, {2, 2}, {3, 0}, {3, 1}, {3, 2}, {3, 3}}; } else if (padding_x_rem == 0 && padding_y_rem == 1) { permutation = {{0, 0}, {0, 1}, {1, 1}, {2, 0}, {2, 1}, {2, 2}, {2, 3}, {3, 1}, {3, 3}}; } else if (padding_x_rem == 1 && padding_y_rem == 0) { permutation = {{0, 0}, {0, 2}, {1, 0}, {1, 1}, {1, 2}, {1, 3}, {2, 2}, {3, 2}, {3, 3}}; } else { permutation = {{0, 0}, {0, 1}, {0, 2}, {0, 3}, {1, 1}, {1, 3}, {2, 2}, {2, 3}, {3, 3}}; } c += " for (int s = 0; s < args.src_tensor.Slices(); ++s) {\n"; if (need_local_mem) { c += " " + barrier + ";\n"; } if (weights_upload_type == ConvolutionTransposed3x3::WeightsUploadType::LOCAL_MEM_ASYNC) { c += " async_work_group_copy(weights_cache, " "args.weights.GetPtr(f_offset), 36, " "0);\n"; } else if (weights_upload_type == ConvolutionTransposed3x3::WeightsUploadType:: LOCAL_MEM_BY_THREADS) { c += " weights_cache[local_id] = args.weights.Read(f_offset + " "local_id);\n"; c += " if (local_id < 4) {\n"; c += " weights_cache[local_id + 32] = args.weights.Read(f_offset + " "local_id + " "32);\n"; c += " };\n"; } else { c += " " + weights_space + " FLT4* weights_cache = args.weights.GetPtr(f_offset);\n"; } c += " FLT4 src0 = " + read_src(0, 0); c += " FLT4 src1 = " + read_src(1, 0); c += " FLT4 src2 = " + read_src(0, 1); c += " FLT4 src3 = " + read_src(1, 1); c += " f_offset += 36;\n"; if (need_local_mem) { c += " " + barrier + ";\n"; } for (int i = 0; i < 9; ++i) { const std::string r_name = "r" + std::to_string(permutation[i].first); const std::string s_name = "src" + std::to_string(permutation[i].second); const std::string w_name = std::to_string(i * 4); c += " CONV(" + r_name + ", " + s_name + ", " + w_name + ");\n"; } c += " }\n"; if (need_local_mem) { c += " if (DST_X >= args.dst_tensor.Width() || DST_Y >= " "args.dst_tensor.Height() || Z >= args.dst_tensor.Slices()) return;\n"; } c += " FLT4 bias_val = args.biases.Read(Z);\n"; for (int y = 0; y < 2; ++y) { for (int x = 0; x < 2; ++x) { const std::string s_x = std::to_string(x); const std::string s_y = std::to_string(y); const std::string id = std::to_string(y * 2 + x); const std::string x_c = "DST_X + " + s_x; const std::string y_c = "DST_Y + " + s_y; c += " if (" + x_c + " < args.dst_tensor.Width() && " + y_c + " < args.dst_tensor.Height()) {\n"; c += " FLT4 res0 = TO_FLT4(r" + id + ") + bias_val;\n"; c += " args.dst_tensor.Write(res0, " + x_c + ", " + y_c + ", Z);\n"; c += " }\n"; } } c += "}\n"; return c; } absl::Status ConvolutionTransposed3x3::BindArguments(ArgumentsBinder* args) { RETURN_IF_ERROR(args->SetInt("filter_offset", 4 * 9 * src_[0]->Slices())); const int padding_x = padding_.x >= 1 ? (padding_.x - 1) / 2 : (padding_.x - 2) / 2; const int padding_y = padding_.y >= 1 ? (padding_.y - 1) / 2 : (padding_.y - 2) / 2; RETURN_IF_ERROR(args->SetInt("padding_x", padding_x)); return args->SetInt("padding_y", padding_y); } void ConvolutionTransposed3x3::GetPossibleKernelWorkGroups( TuningType tuning_type, const GpuInfo& gpu_info, const KernelInfo& kernel_info, std::vector<int3>* work_groups) const { if (weights_upload_type_ == WeightsUploadType::LOCAL_MEM_ASYNC || weights_upload_type_ == WeightsUploadType::LOCAL_MEM_BY_THREADS) { work_groups->push_back(work_group_size_); return; } GetPossibleWorkGroupsConv(tuning_type, gpu_info, kernel_info, grid_size_, work_groups); } int3 ConvolutionTransposed3x3::GetGridSize() const { const int grid_x = DivideRoundUp(dst_[0]->Width(), 2) * dst_[0]->Batch(); const int grid_y = DivideRoundUp(dst_[0]->Height(), 2); const int grid_z = dst_[0]->Slices(); return int3(grid_x, grid_y, grid_z); } std::vector<int> ConvolutionTransposed3x3::GetSpatialWeightsRemap() const { const int padding_x_rem = abs(padding_.x) % 2; const int padding_y_rem = abs(padding_.y) % 2; std::vector<int> remap; if (padding_x_rem == 1 && padding_y_rem == 1) { return std::vector<int>{4, 5, 3, 7, 1, 8, 6, 2, 0}; } else if (padding_x_rem == 0 && padding_y_rem == 1) { return std::vector<int>{5, 3, 4, 8, 6, 2, 0, 7, 1}; } else if (padding_x_rem == 1 && padding_y_rem == 0) { return std::vector<int>{7, 1, 8, 6, 2, 0, 4, 5, 3}; } else { return std::vector<int>{8, 6, 2, 0, 7, 1, 5, 3, 4}; } } void ConvolutionTransposed3x3::UploadWeights( const tflite::gpu::Tensor<OHWI, DataType::FLOAT32>& weights) { const auto weights_desc = GetWeightsDescription(); const int flt_count = GetTotalElementsCountForLayout(weights_desc, weights.shape); BufferDescriptor desc; desc.element_type = weights_desc.type; desc.element_size = 4; desc.memory_type = weights_upload_type_ == ConvolutionTransposed3x3::WeightsUploadType::CONSTANT_MEM ? MemoryType::CONSTANT : MemoryType::GLOBAL; desc.size = flt_count * SizeOf(desc.element_type); desc.data.resize(desc.size); RearrangeWeights(weights, weights_desc, absl::MakeSpan(desc.data)); args_.AddObject("weights", std::make_unique<BufferDescriptor>(std::move(desc))); } bool IsConvolutionTransposed3x3Supported( const OperationDef& definition, const ConvolutionTransposedAttributes& attr) { return attr.weights.shape.w == 3 && attr.weights.shape.h == 3 && attr.stride.w == 2 && attr.stride.h == 2; } ConvolutionTransposed3x3 CreateConvolutionTransposed3x3( const GpuInfo& gpu_info, const OperationDef& definition, const ConvolutionTransposedAttributes& attr) { const int2 padding = int2(attr.padding.prepended.w, attr.padding.prepended.h); ConvolutionTransposed3x3 result(definition, gpu_info, padding); result.UploadWeights(attr.weights); TensorDescriptor bias_tensor_desc = CreateConstantLinearTensorDescriptor( gpu_info, definition.src_tensors[0].GetDataType(), attr.bias); result.args_.AddObject("biases", std::make_unique<TensorDescriptor>( std::move(bias_tensor_desc))); return result; } ConvolutionTransposed3x3 CreateConvolutionTransposed3x3DynamicWeights( const GpuInfo& gpu_info, const OperationDef& definition, const ConvolutionTransposedAttributes& attr) { OperationDef new_def = definition; new_def.src_tensors = { definition.src_tensors[0]}; const DataType weights_type = definition.GetDataType(); new_def.src_tensors.push_back( {weights_type, TensorStorageType::BUFFER, Layout::HWC}); const int2 padding = int2(attr.padding.prepended.w, attr.padding.prepended.h); ConvolutionTransposed3x3 result(new_def, gpu_info, padding); TensorDescriptor bias_tensor_desc = CreateConstantLinearTensorDescriptor( gpu_info, definition.src_tensors[0].GetDataType(), attr.bias); result.args_.AddObject("biases", std::make_unique<TensorDescriptor>( std::move(bias_tensor_desc))); return result; } } }
#include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_3x3_test_util.h" namespace tflite { namespace gpu { namespace cl { TEST_F(OpenCLOperationTest, ConvolutionTransposed3x3) { auto status = ConvolutionTransposed3x3Test(&exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } } } }
1,012
cpp
tensorflow/tensorflow
mean_stddev_normalization
tensorflow/lite/delegates/gpu/common/tasks/mean_stddev_normalization.cc
tensorflow/lite/delegates/gpu/cl/kernels/mean_stddev_normalization_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_MEAN_STDDEV_NORMALIZATION_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_MEAN_STDDEV_NORMALIZATION_H_ #include <map> #include <set> #include <string> #include <vector> #include "tensorflow/lite/delegates/gpu/common/model.h" #include "tensorflow/lite/delegates/gpu/common/selectors/subgraph.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/task/gpu_operation.h" #include "tensorflow/lite/delegates/gpu/common/task/work_group_picking.h" #include "tensorflow/lite/delegates/gpu/common/types.h" namespace tflite { namespace gpu { class MeanStdDevNormalization : public GPUOperation { public: explicit MeanStdDevNormalization(const OperationDef& definition, const GpuInfo& gpu_info, const BHWC& shape, float variance_bias, bool two_step); void GetPossibleKernelWorkGroups( TuningType tuning_type, const GpuInfo& gpu_info, const KernelInfo& kernel_info, std::vector<int3>* work_groups) const override { if (!work_group_reduction_) { GetPossibleWorkGroups(tuning_type, gpu_info, kernel_info, grid_size_, work_groups); return; } work_groups->push_back(work_group_size_); } int3 GetGridSize() const override; MeanStdDevNormalization(MeanStdDevNormalization&& kernel) = default; MeanStdDevNormalization& operator=(MeanStdDevNormalization&& kernel) = default; MeanStdDevNormalization(const MeanStdDevNormalization&) = delete; MeanStdDevNormalization& operator=(const MeanStdDevNormalization&) = delete; private: std::string GetNormalizationCode(const GpuInfo& gpu_info, bool channels_x4, bool two_step); bool work_group_reduction_ = true; }; MeanStdDevNormalization CreateMeanStdDevNormalization( const OperationDef& definition, const GpuInfo& gpu_info, const BHWC& shape, float variance_bias = 1.0e-8f, bool two_step = true); absl::Status TryMeanStdDevNormalization( const GpuInfo& gpu_info, CalculationsPrecision precision, const GraphFloat32& graph, NodeId first_node_id, const std::map<ValueId, TensorDescriptor>& tensor_descriptors, std::set<NodeId>* consumed_nodes, GPUOperationsSubgraph* gpu_subgraph); class LayerNormalization : public GPUOperation { public: LayerNormalization(const OperationDef& definition, const GpuInfo& gpu_info, const BHWC& shape, float variance_bias, const Tensor<Linear, DataType::FLOAT32>& mul_linear, const Tensor<Linear, DataType::FLOAT32>& sub_linear, bool two_step); void GetPossibleKernelWorkGroups( TuningType tuning_type, const GpuInfo& gpu_info, const KernelInfo& kernel_info, std::vector<int3>* work_groups) const override { if (!work_group_reduction_) { GetPossibleWorkGroups(tuning_type, gpu_info, kernel_info, grid_size_, work_groups); return; } work_groups->push_back(work_group_size_); } int3 GetGridSize() const override; LayerNormalization(LayerNormalization&& kernel) = default; LayerNormalization& operator=(LayerNormalization&& kernel) = default; LayerNormalization(const LayerNormalization&) = delete; LayerNormalization& operator=(const LayerNormalization&) = delete; private: std::string GetNormalizationCode(const GpuInfo& gpu_info, bool channels_x4, bool two_step); bool work_group_reduction_ = true; }; LayerNormalization CreateLayerNormalization( const OperationDef& definition, const GpuInfo& gpu_info, const BHWC& shape, float variance_bias, const Tensor<Linear, DataType::FLOAT32>& mul_linear, const Tensor<Linear, DataType::FLOAT32>& sub_linear, bool two_step); } } #endif #include "tensorflow/lite/delegates/gpu/common/tasks/mean_stddev_normalization.h" #include <algorithm> #include <map> #include <memory> #include <set> #include <string> #include <utility> #include "absl/strings/substitute.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/util.h" namespace tflite { namespace gpu { namespace { absl::Status CheckIfValidNodeOfType(const Node* node, OperationType required_type) { if (node == nullptr) { return absl::NotFoundError("Invalid node."); } if (OperationTypeFromString(node->operation.type) != required_type) { return absl::NotFoundError("Type mismatch."); } return absl::OkStatus(); } absl::Status GetElementwiseScalarValue(const Node* node, float* result) { auto attr = absl::any_cast<ElementwiseAttributes>(node->operation.attributes); const float* value = absl::get_if<float>(&attr.param); if (!value) { return absl::NotFoundError("Not a scalar value inside attributes."); } *result = *value; return absl::OkStatus(); } absl::Status GetNextSingleNode(const GraphFloat32& graph, const Node& node, OperationType next_type, Node** next_node) { auto consumers = graph.FindConsumers(graph.FindOutputs(node.id)[0]->id); if (consumers.size() != 1) { return absl::NotFoundError("Not a single consumer."); } RETURN_IF_ERROR(CheckIfValidNodeOfType(consumers[0], next_type)); *next_node = consumers[0]; return absl::OkStatus(); } std::string GetReduceCode(const std::string& value, int3 work_group_size, bool two_step) { int reduction_size = work_group_size.z; std::string mem_name = work_group_size.x * work_group_size.y != 1 ? "shared_mem[LOCAL_ID_1][LOCAL_ID_0]" : "shared_mem"; if (reduction_size <= 8) { std::string result; result += " { result += " " + mem_name + "[local_id] = " + value + ";\n"; result += " LOCAL_MEM_BARRIER;\n"; result += " if (LOCAL_ID_2 == 0) {\n"; result += " " + value + " = " + mem_name + "[0];\n"; for (int i = 1; i < reduction_size; ++i) { result += " " + value + " += " + mem_name + "[" + std::to_string(i) + "];\n"; } result += " " + mem_name + "[0] = " + value + ";\n"; result += " }\n"; result += " LOCAL_MEM_BARRIER;\n"; result += " " + value + " = " + mem_name + "[0];\n"; if (two_step) { result += " LOCAL_MEM_BARRIER;\n"; } result += " }\n"; return result; } else { return absl::Substitute(R"( { $2[local_id] = $1; LOCAL_MEM_BARRIER; int reduction_size = $0; while (reduction_size > 1) { int active_thread_limit = reduction_size / 2; int offset = (reduction_size + 1) / 2; if (local_id < active_thread_limit) { $1 += $2[local_id + offset]; $2[local_id] = $1; } LOCAL_MEM_BARRIER; reduction_size = offset; } $1 = $2[0]; } )", reduction_size, value, mem_name); } } std::string ZeroClampVec4Code(const std::string& slice_name, const std::string& channels_name, const std::string& value_name) { return absl::Substitute(R"( if ($0 * 4 + 1 >= $1) { $2.y = 0.0f; } if ($0 * 4 + 2 >= $1) { $2.z = 0.0f; } if ($0 * 4 + 3 >= $1) { $2.w = 0.0f; } )", slice_name, channels_name, value_name); } bool UseWorkGroupReduction(const GpuInfo& gpu_info, const BHWC& shape) { const int tensor_slices = DivideRoundUp(shape.c, 4); if (gpu_info.IsAdreno() && tensor_slices <= 32 && shape.w * shape.h * shape.b >= 128) { return false; } else { return true; } } int3 GetRecommendedWorkGroupSize(const GpuInfo& gpu_info, const BHWC& shape) { const int tensor_slices = DivideRoundUp(shape.c, 4); int desired_work_group_size = gpu_info.GetMaxWorkGroupSizeForZ(); if (gpu_info.IsMali()) { desired_work_group_size = 64; } if (gpu_info.IsAdreno()) { AdrenoInfo info = gpu_info.adreno_info; desired_work_group_size = 256; if (info.IsAdreno3xx()) { if (info.adreno_gpu == AdrenoGpu::kAdreno320 || info.adreno_gpu == AdrenoGpu::kAdreno330) { desired_work_group_size = 128; } else { desired_work_group_size = 64; } } else if (info.IsAdreno4xx()) { if (info.adreno_gpu == AdrenoGpu::kAdreno430) { desired_work_group_size = 256; } else { desired_work_group_size = 128; } } else if (info.IsAdreno5xx()) { if (info.adreno_gpu == AdrenoGpu::kAdreno530 || info.adreno_gpu == AdrenoGpu::kAdreno540) { desired_work_group_size = 256; } else { desired_work_group_size = 128; } } } if (gpu_info.IsPowerVR()) { desired_work_group_size = 64; } if (gpu_info.IsApple()) { desired_work_group_size = 64; } if (gpu_info.IsAMD()) { desired_work_group_size = 512; } int3 work_group_size(1, 1, 1); if (shape.w * shape.h == 1) { desired_work_group_size = std::min(desired_work_group_size, gpu_info.GetMaxWorkGroupSizeForZ()); while (desired_work_group_size >= tensor_slices * 2) { desired_work_group_size /= 2; } work_group_size.x = 1; work_group_size.y = 1; work_group_size.z = desired_work_group_size; } else { if (tensor_slices >= 16) { work_group_size.z = 8; } else if (tensor_slices >= 10) { work_group_size.z = 4; } else { std::map<int, int> slices_to_group_size = { {1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 3}, {6, 3}, {7, 4}, {8, 4}, {9, 3}, }; work_group_size.z = slices_to_group_size[tensor_slices]; } desired_work_group_size = std::min(desired_work_group_size, gpu_info.GetMaxWorkGroupTotalSize()); work_group_size.x = 1; work_group_size.y = desired_work_group_size / AlignByN(work_group_size.z, 4); while (work_group_size.y > work_group_size.x) { work_group_size.y /= 2; work_group_size.x *= 2; } } return work_group_size; } std::string GetVarianceCalculationCode(const GpuInfo& gpu_info, bool work_group_reduction, const int3& work_group_size, bool has_batch, bool channels_x4, bool two_step) { std::string c; if (work_group_reduction && gpu_info.IsApiOpenCl()) { c += "__attribute__((reqd_work_group_size(" + std::to_string(work_group_size.x) + ", " + std::to_string(work_group_size.y) + ", " + std::to_string(work_group_size.z) + ")))\n"; } c += "MAIN_FUNCTION($0) {\n"; if (work_group_reduction) { std::string accum_type = two_step ? "float" : "float2"; if (work_group_size.x * work_group_size.y == 1) { c += "__local " + accum_type + " shared_mem[" + std::to_string(work_group_size.z) + "];\n"; } else { c += "__local " + accum_type + " shared_mem[" + std::to_string(work_group_size.y) + "][" + std::to_string(work_group_size.x) + "][" + std::to_string(work_group_size.z) + "];\n"; } } if (has_batch) { c += " int linear_id = GLOBAL_ID_0;\n"; c += " int X = linear_id / args.dst_tensor.Batch();\n"; c += " int B = linear_id % args.dst_tensor.Batch();\n"; c += " args.src_tensor.SetBatchRef(B);\n"; c += " args.dst_tensor.SetBatchRef(B);\n"; } else { c += " int X = GLOBAL_ID_0;\n"; } c += " int Y = GLOBAL_ID_1;\n"; if (!work_group_reduction) { c += " if (X >= args.dst_tensor.Width()) { return; }\n"; c += " if (Y >= args.dst_tensor.Height()) { return; }\n"; } if (!two_step) { c += " float4 private_sum4_sq = INIT_FLOAT4(0.0f);\n"; } if (work_group_reduction) { c += " int local_id = LOCAL_ID_2;\n"; c += " int reduction_group_size = GROUP_SIZE_2;\n"; } else { c += " int local_id = 0;\n"; c += " int reduction_group_size = 1;\n"; } c += R"( float4 private_sum4 = INIT_FLOAT4(0.0f); for (int S = local_id; S < args.src_tensor.Slices(); S += reduction_group_size) { int x_clamped = min(X, args.src_tensor.Width() - 1); int y_clamped = min(Y, args.src_tensor.Height() - 1); float4 t = args.src_tensor.Read<float>(x_clamped, y_clamped, S); )"; if (!channels_x4) { c += ZeroClampVec4Code("S", "args.src_tensor.Channels()", "t"); } if (two_step) { c += " private_sum4 += t;\n"; c += " }\n"; c += " float sum = dot(private_sum4, INIT_FLOAT4(1.0f));\n"; } else { c += " private_sum4 += t;\n"; c += " private_sum4_sq += t * t;\n"; c += " }\n"; c += " float2 sum;\n"; c += " sum.x = dot(private_sum4, INIT_FLOAT4(1.0f));\n"; c += " sum.y = dot(private_sum4_sq, INIT_FLOAT4(1.0f));\n"; } if (work_group_reduction) { c += GetReduceCode("sum", work_group_size, two_step); } if (two_step) { c += R"( float mean = sum * args.inv_ch_count; float4 private_sum_diff_sq4 = INIT_FLOAT4(0.0f); for (int S = local_id; S < args.src_tensor.Slices(); S += reduction_group_size) { int x_clamped = min(X, args.src_tensor.Width() - 1); int y_clamped = min(Y, args.src_tensor.Height() - 1); float4 t = args.src_tensor.Read<float>(x_clamped, y_clamped, S); float4 diff = t - mean;)"; if (!channels_x4) { c += ZeroClampVec4Code("S", "args.src_tensor.Channels()", "diff"); } c += R"( private_sum_diff_sq4 += diff * diff; } float sum_diff_sq = dot(private_sum_diff_sq4, INIT_FLOAT4(1.0f)); )"; if (work_group_reduction) { c += GetReduceCode("sum_diff_sq", work_group_size, two_step); } c += " float variance = sum_diff_sq * args.inv_ch_count;\n"; } else { c += " float mean = sum.x * args.inv_ch_count;\n"; c += " float mean_sq = sum.y * args.inv_ch_count;\n"; c += " float variance = mean_sq - mean * mean;\n"; } if (work_group_reduction) { c += " c += " if (X >= args.dst_tensor.Width()) { return; }\n"; c += " if (Y >= args.dst_tensor.Height()) { return; }\n"; } return c; } } MeanStdDevNormalization::MeanStdDevNormalization(const OperationDef& definition, const GpuInfo& gpu_info, const BHWC& shape, float variance_bias, bool two_step) : GPUOperation(definition) { work_group_reduction_ = UseWorkGroupReduction(gpu_info, shape); if (work_group_reduction_) { work_group_size_ = GetRecommendedWorkGroupSize(gpu_info, shape); } else { work_group_size_ = int3(8, 8, 1); } args_.AddFloat("variance_bias", variance_bias); args_.AddFloat("inv_ch_count", 1.0f / shape.c); AddSrcTensor("src_tensor", definition_.src_tensors[0]); AddDstTensor("dst_tensor", definition_.dst_tensors[0]); code_ = GetNormalizationCode(gpu_info, shape.c % 4 == 0, two_step); } std::string MeanStdDevNormalization::GetNormalizationCode( const GpuInfo& gpu_info, bool channels_x4, bool two_step) { std::string c = GetVarianceCalculationCode( gpu_info, work_group_reduction_, work_group_size_, definition_.dst_tensors[0].HasAxis(Axis::BATCH), channels_x4, two_step); c += R"( float stddev_inv = rsqrt(variance + args.variance_bias); for (int S = local_id; S < args.src_tensor.Slices(); S += reduction_group_size) { float4 t = args.src_tensor.Read<float>(X, Y, S); FLT4 result = TO_FLT4((t - mean) * stddev_inv); args.dst_tensor.Write(result, X, Y, S); } })"; return c; } int3 MeanStdDevNormalization::GetGridSize() const { const int grid_x = dst_[0]->Width() * dst_[0]->Batch(); const int grid_y = dst_[0]->Height(); const int grid_z = work_group_reduction_ ? work_group_size_.z : 1; return int3(grid_x, grid_y, grid_z); } MeanStdDevNormalization CreateMeanStdDevNormalization( const OperationDef& definition, const GpuInfo& gpu_info, const BHWC& shape, float variance_bias, bool two_step) { return MeanStdDevNormalization(definition, gpu_info, shape, variance_bias, two_step); } absl::Status TryMeanStdDevNormalization( const GpuInfo& gpu_info, CalculationsPrecision precision, const GraphFloat32& graph, NodeId first_node_id, const std::map<ValueId, TensorDescriptor>& tensor_descriptors, std::set<NodeId>* consumed_nodes, GPUOperationsSubgraph* gpu_subgraph) { Node* first_mean_node = graph.GetNode(first_node_id); RETURN_IF_ERROR(CheckIfValidNodeOfType(first_mean_node, OperationType::MEAN)); auto first_mean_attr = absl::any_cast<MeanAttributes>(first_mean_node->operation.attributes); if (first_mean_attr.dims != std::set<Axis>{Axis::CHANNELS}) { return absl::NotFoundError("MeanStdDevNormalization not suitable."); } Node* sub_node; RETURN_IF_ERROR(GetNextSingleNode(graph, *first_mean_node, OperationType::SUB, &sub_node)); auto sub_inputs = graph.FindInputs(sub_node->id); if (sub_inputs.size() != 2) { return absl::NotFoundError("MeanStdDevNormalization not suitable."); } else { Node* sub_first_parent = graph.FindProducer(sub_inputs[0]->id); Node* sub_second_parent = graph.FindProducer(sub_inputs[1]->id); if (sub_second_parent != first_mean_node) { return absl::NotFoundError("MeanStdDevNormalization not suitable."); } auto mean_inputs = graph.FindInputs(first_mean_node->id); Node* mean_parent = graph.FindProducer(mean_inputs[0]->id); if (mean_parent != sub_first_parent) { return absl::NotFoundError("MeanStdDevNormalization not suitable."); } } auto sub_output = graph.FindOutputs(sub_node->id)[0]->id; auto consumers = graph.FindConsumers(sub_output); if (consumers.size() != 2) { return absl::NotFoundError("MeanStdDevNormalization not suitable."); } Node* square_node = consumers[0]; Node* sub_child_mul_node = consumers[1]; if (!CheckIfValidNodeOfType(square_node, OperationType::SQUARE).ok()) { square_node = consumers[1]; sub_child_mul_node = consumers[0]; } RETURN_IF_ERROR(CheckIfValidNodeOfType(square_node, OperationType::SQUARE)); RETURN_IF_ERROR( CheckIfValidNodeOfType(sub_child_mul_node, OperationType::MUL)); Node* second_mean_node; RETURN_IF_ERROR(GetNextSingleNode(graph, *square_node, OperationType::MEAN, &second_mean_node)); auto second_mean_attr = absl::any_cast<MeanAttributes>(second_mean_node->operation.attributes); if (second_mean_attr.dims != std::set<Axis>{Axis::CHANNELS}) { return absl::NotFoundError("MeanStdDevNormalization not suitable."); } Node* add_node; RETURN_IF_ERROR(GetNextSingleNode(graph, *second_mean_node, OperationType::ADD, &add_node)); float add_value; RETURN_IF_ERROR(GetElementwiseScalarValue(add_node, &add_value)); Node* rsqrt_node; RETURN_IF_ERROR( GetNextSingleNode(graph, *add_node, OperationType::RSQRT, &rsqrt_node)); Node* mul_node; RETURN_IF_ERROR( GetNextSingleNode(graph, *rsqrt_node, OperationType::MUL, &mul_node)); if (sub_child_mul_node != mul_node) { return absl::NotFoundError("MeanStdDevNormalization not suitable."); } OperationDef op_def; op_def.precision = precision; auto input_id = graph.FindInputs(first_mean_node->id)[0]->id; auto it = tensor_descriptors.find(input_id); if (it != tensor_descriptors.end()) { op_def.src_tensors.push_back(it->second); } auto output_id = graph.FindOutputs(mul_node->id)[0]->id; it = tensor_descriptors.find(output_id); if (it != tensor_descriptors.end()) { op_def.dst_tensors.push_back(it->second); } auto subgraph_inputs = graph.FindInputs(first_mean_node->id); auto subgraph_outputs = graph.FindOutputs(mul_node->id); std::unique_ptr<GPUOperation>* gpu_op = InitSingleOpSubgraph(subgraph_inputs, subgraph_outputs, gpu_subgraph); *gpu_op = std::make_unique<MeanStdDevNormalization>(CreateMeanStdDevNormalization( op_def, gpu_info, subgraph_inputs[0]->tensor.shape, add_value, false)); consumed_nodes->insert(first_mean_node->id); consumed_nodes->insert(sub_node->id); consumed_nodes->insert(square_node->id); consumed_nodes->insert(second_mean_node->id); consumed_nodes->insert(add_node->id); consumed_nodes->insert(rsqrt_node->id); consumed_nodes->insert(mul_node->id); return absl::OkStatus(); } LayerNormalization::LayerNormalization( const OperationDef& definition, const GpuInfo& gpu_info, const BHWC& shape, float variance_bias, const Tensor<Linear, DataType::FLOAT32>& mul_linear, const Tensor<Linear, DataType::FLOAT32>& sub_linear, bool two_step) : GPUOperation(definition) { work_group_reduction_ = UseWorkGroupReduction(gpu_info, shape); if (work_group_reduction_) { work_group_size_ = GetRecommendedWorkGroupSize(gpu_info, shape); } else { work_group_size_ = int3(8, 8, 1); } args_.AddFloat("variance_bias", variance_bias); args_.AddFloat("inv_ch_count", 1.0f / shape.c); AddSrcTensor("src_tensor", definition_.src_tensors[0]); AddDstTensor("dst_tensor", definition_.dst_tensors[0]); TensorDescriptor mul_tensor_desc = CreateConstantLinearTensorDescriptor( gpu_info, definition.src_tensors[0].GetDataType(), mul_linear); args_.AddObject("mul_linear", std::make_unique<TensorDescriptor>( std::move(mul_tensor_desc))); TensorDescriptor sub_tensor_desc = CreateConstantLinearTensorDescriptor( gpu_info, definition.src_tensors[0].GetDataType(), sub_linear); args_.AddObject("sub_linear", std::make_unique<TensorDescriptor>( std::move(sub_tensor_desc))); code_ = GetNormalizationCode(gpu_info, shape.c % 4 == 0, two_step); } std::string LayerNormalization::GetNormalizationCode(const GpuInfo& gpu_info, bool channels_x4, bool two_step) { std::string c = GetVarianceCalculationCode( gpu_info, work_group_reduction_, work_group_size_, definition_.dst_tensors[0].HasAxis(Axis::BATCH), channels_x4, two_step); c += R"( float stddev_inv = rsqrt(variance + args.variance_bias); for (int S = local_id; S < args.src_tensor.Slices(); S += reduction_group_size) { float4 t = args.src_tensor.Read<float>(X, Y, S); float4 mul0_res = stddev_inv * args.mul_linear.Read<float>(S); float4 mul1_res = mul0_res * t; float4 mul2_res = mul0_res * mean; float4 sub_res = args.sub_linear.Read<float>(S) - mul2_res; FLT4 result = TO_FLT4(mul1_res + sub_res); args.dst_tensor.Write(result, X, Y, S); } })"; return c; } int3 LayerNormalization::GetGridSize() const { const int grid_x = dst_[0]->Width() * dst_[0]->Batch(); const int grid_y = dst_[0]->Height(); const int grid_z = work_group_reduction_ ? work_group_size_.z : 1; return int3(grid_x, grid_y, grid_z); } LayerNormalization CreateLayerNormalization( const OperationDef& definition, const GpuInfo& gpu_info, const BHWC& shape, float variance_bias, const Tensor<Linear, DataType::FLOAT32>& mul_linear, const Tensor<Linear, DataType::FLOAT32>& sub_linear, bool two_step) { return LayerNormalization(definition, gpu_info, shape, variance_bias, mul_linear, sub_linear, two_step); } } }
#include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/tasks/mean_stddev_normalization_test_util.h" namespace tflite { namespace gpu { namespace cl { namespace { TEST_F(OpenCLOperationTest, MeanStddevNormSeparateBatches) { auto status = MeanStddevNormSeparateBatchesTest(0.0f, 0.0f, 0.0f, &exec_env_); ASSERT_TRUE(status.ok()) << status.message(); status = MeanStddevNormSeparateBatchesTest(0.0f, 0.01f, 2.63e-4f, &exec_env_); ASSERT_TRUE(status.ok()) << status.message(); status = MeanStddevNormSeparateBatchesTest(0.0f, 100.0f, 2.63e-4f, &exec_env_); ASSERT_TRUE(status.ok()) << status.message(); status = MeanStddevNormSeparateBatchesTest(0.01f, 0.0f, 0.0f, &exec_env_); ASSERT_TRUE(status.ok()) << status.message(); status = MeanStddevNormSeparateBatchesTest(0.01f, 0.01f, 3.57e-4f, &exec_env_); ASSERT_TRUE(status.ok()) << status.message(); status = MeanStddevNormSeparateBatchesTest(1.0f, 100.0f, 2.63e-4f, &exec_env_); ASSERT_TRUE(status.ok()) << status.message(); status = MeanStddevNormSeparateBatchesTest(100.0f, 0.0f, 0.0f, &exec_env_); ASSERT_TRUE(status.ok()) << status.message(); status = MeanStddevNormSeparateBatchesTest(100.0f, 1.0f, 2.63e-4f, &exec_env_); ASSERT_TRUE(status.ok()) << status.message(); status = MeanStddevNormSeparateBatchesTest(100.0f, 100.0f, 2.63e-4f, &exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } TEST_F(OpenCLOperationTest, MeanStddevNormalizationAllBatches) { auto status = MeanStddevNormalizationAllBatchesTest(&exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } TEST_F(OpenCLOperationTest, MeanStddevNormalizationLargeVector) { auto status = MeanStddevNormalizationLargeVectorTest(&exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } } } } }
1,013
cpp
tensorflow/tensorflow
conv_weights_converter
tensorflow/lite/delegates/gpu/common/tasks/conv_weights_converter.cc
tensorflow/lite/delegates/gpu/cl/kernels/conv_weights_converter_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_CONV_WEIGHTS_CONVERTER_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_CONV_WEIGHTS_CONVERTER_H_ #include <string> #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/task/gpu_operation.h" #include "tensorflow/lite/delegates/gpu/common/task/weights_layout.h" #include "tensorflow/lite/delegates/gpu/common/types.h" namespace tflite { namespace gpu { class ConverterToConvWeights : public GPUOperation { public: ConverterToConvWeights(const OperationDef& definition, const WeightsDescription& weights_desc, Layout input_layout); absl::Status BindArguments(ArgumentsBinder* args) override; int3 GetGridSize() const override; ConverterToConvWeights(ConverterToConvWeights&& operation) = default; ConverterToConvWeights& operator=(ConverterToConvWeights&& operation) = default; ConverterToConvWeights(const ConverterToConvWeights&) = delete; ConverterToConvWeights& operator=(const ConverterToConvWeights&) = delete; private: std::string GetConverterToConvWeightsCode(); OHWI GetWeightsSize() const; WeightsDescription weights_desc_; Layout input_layout_; }; ConverterToConvWeights CreateConverterToConvWeights( const OperationDef& definition, const WeightsDescription& weights_desc, Layout input_layout); } } #endif #include "tensorflow/lite/delegates/gpu/common/tasks/conv_weights_converter.h" #include <cstring> #include <memory> #include <string> #include <utility> #include "tensorflow/lite/delegates/gpu/common/task/util.h" namespace tflite { namespace gpu { ConverterToConvWeights::ConverterToConvWeights( const OperationDef& definition, const WeightsDescription& weights_desc, Layout input_layout) : GPUOperation(definition), weights_desc_(weights_desc), input_layout_(input_layout) { code_ = GetConverterToConvWeightsCode(); } std::string ConverterToConvWeights::GetConverterToConvWeightsCode() { AddSrcTensor("src_tensor", definition_.src_tensors[0]); args_.AddFloat("mask_x"); args_.AddFloat("mask_y"); args_.AddFloat("mask_z"); args_.AddFloat("mask_w"); args_.AddInt("out_ch"); args_.AddInt("out_ch_x4_groups"); args_.AddInt("in_ch"); args_.AddInt("in_ch_x4_groups"); args_.AddInt("kernel_width"); args_.AddInt("kernel_height"); args_.AddInt("kernel_spatial_size"); if (weights_desc_.layout == WeightsLayout::kOICustomSpatialI4O4 || weights_desc_.layout == WeightsLayout::kOICustomSpatialO4I4) { std::vector<int32_t> remap(weights_desc_.spatial_remap.size()); for (int i = 0; i < remap.size(); ++i) { remap[i] = weights_desc_.spatial_remap[i]; } BufferDescriptor desc; desc.element_type = DataType::INT32; desc.element_size = 1; desc.memory_type = MemoryType::GLOBAL; desc.size = remap.size() * sizeof(int32_t); desc.data.resize(desc.size); std::memcpy(desc.data.data(), remap.data(), desc.size); args_.AddObject("spatial_remap", std::make_unique<BufferDescriptor>(std::move(desc))); } std::string c; c += "MAIN_FUNCTION($0) {\n"; c += " int O = GLOBAL_ID_0;\n"; c += " int I = GLOBAL_ID_1;\n"; c += " int spatial_linear = GLOBAL_ID_2;\n"; c += " if (O >= args.out_ch_x4_groups) return;\n"; c += " if (I >= args.in_ch_x4_groups) return;\n"; c += " if (spatial_linear >= args.kernel_spatial_size) return;\n"; if (weights_desc_.layout == WeightsLayout::kOICustomSpatialI4O4 || weights_desc_.layout == WeightsLayout::kOICustomSpatialO4I4) { c += " int linear_remap = args.spatial_remap.Read(spatial_linear);\n"; c += " int W = linear_remap % args.kernel_width;\n"; c += " int H = linear_remap / args.kernel_width;\n"; } else { c += " int W = spatial_linear % args.kernel_width;\n"; c += " int H = spatial_linear / args.kernel_width;\n"; } c += " FLT4 v0 = INIT_FLT4(0.0f);\n"; c += " FLT4 v1 = INIT_FLT4(0.0f);\n"; c += " FLT4 v2 = INIT_FLT4(0.0f);\n"; c += " FLT4 v3 = INIT_FLT4(0.0f);\n"; if (input_layout_ == Layout::OHWI) { c += " if (O * 4 < args.out_ch) {\n"; c += " v0 = args.src_tensor.Read(W, H, I, O * 4);\n"; c += " }\n"; c += " if (O * 4 + 1 < args.out_ch) {\n"; c += " v1 = args.src_tensor.Read(W, H, I, O * 4 + 1);\n"; c += " }\n"; c += " if (O * 4 + 2 < args.out_ch) {\n"; c += " v2 = args.src_tensor.Read(W, H, I, O * 4 + 2);\n"; c += " }\n"; c += " if (O * 4 + 3 < args.out_ch) {\n"; c += " v3 = args.src_tensor.Read(W, H, I, O * 4 + 3);\n"; c += " }\n"; c += " if (I == args.src_tensor.Slices() - 1) {\n"; c += " FLT4 mask = INIT_FLT4v4(args.mask_x, args.mask_y, args.mask_z, " "args.mask_w);\n"; c += " v0 *= mask;\n"; c += " v1 *= mask;\n"; c += " v2 *= mask;\n"; c += " v3 *= mask;\n"; c += " }\n"; } else if (input_layout_ == Layout::HWIO) { c += " if (I * 4 < args.in_ch && O < args.src_tensor.Slices()) {\n"; c += " v0 = args.src_tensor.Read(I * 4, W, O, H);\n"; c += " }\n"; c += " if (I * 4 + 1 < args.in_ch && O < args.src_tensor.Slices()) {\n"; c += " v1 = args.src_tensor.Read(I * 4 + 1, W, O, H);\n"; c += " }\n"; c += " if (I * 4 + 2 < args.in_ch && O < args.src_tensor.Slices()) {\n"; c += " v2 = args.src_tensor.Read(I * 4 + 2, W, O, H);\n"; c += " }\n"; c += " if (I * 4 + 3 < args.in_ch && O < args.src_tensor.Slices()) {\n"; c += " v3 = args.src_tensor.Read(I * 4 + 3, W, O, H);\n"; c += " }\n"; c += " if (O == args.src_tensor.Slices() - 1) {\n"; c += " FLT4 mask = INIT_FLT4v4(args.mask_x, args.mask_y, args.mask_z, " "args.mask_w);\n"; c += " v0 *= mask;\n"; c += " v1 *= mask;\n"; c += " v2 *= mask;\n"; c += " v3 *= mask;\n"; c += " }\n"; } const bool need_transpose = (input_layout_ == Layout::HWIO && weights_desc_.IsO4I4()) || (input_layout_ == Layout::OHWI && weights_desc_.IsI4O4()); if (need_transpose) { c += " FLT4 r0 = INIT_FLT4v4(v0.x, v1.x, v2.x, v3.x);\n"; c += " FLT4 r1 = INIT_FLT4v4(v0.y, v1.y, v2.y, v3.y);\n"; c += " FLT4 r2 = INIT_FLT4v4(v0.z, v1.z, v2.z, v3.z);\n"; c += " FLT4 r3 = INIT_FLT4v4(v0.w, v1.w, v2.w, v3.w);\n"; } else { c += " FLT4 r0 = v0;\n"; c += " FLT4 r1 = v1;\n"; c += " FLT4 r2 = v2;\n"; c += " FLT4 r3 = v3;\n"; } if (weights_desc_.layout == WeightsLayout::k2DX4I4YIsSpatialIAndXIsOOGroupO4 || weights_desc_.layout == WeightsLayout::k2DX4O4YIsSpatialIAndXIsOOGroupI4) { AddDstTensor("dst_tensor0", definition_.dst_tensors[0]); AddDstTensor("dst_tensor1", definition_.dst_tensors[1]); AddDstTensor("dst_tensor2", definition_.dst_tensors[2]); AddDstTensor("dst_tensor3", definition_.dst_tensors[3]); c += " int yc = spatial_linear * args.in_ch_x4_groups + I;\n"; c += " args.dst_tensor0.Write2D(r0, O, yc);\n"; c += " args.dst_tensor1.Write2D(r1, O, yc);\n"; c += " args.dst_tensor2.Write2D(r2, O, yc);\n"; c += " args.dst_tensor3.Write2D(r3, O, yc);\n"; c += "}\n"; } else { AddDstTensor("dst_tensor", definition_.dst_tensors[0]); c += " int OUTPUT_GROUP_SIZE = " + std::to_string(weights_desc_.GetOutputGroupSize()) + ";\n"; c += " int d_index = (O * 4) / (OUTPUT_GROUP_SIZE * 4);\n"; c += " int k_index = ((O * 4) % (OUTPUT_GROUP_SIZE * 4)) / 4;\n"; std::string index; if (weights_desc_.layout == WeightsLayout::kOICustomSpatialI4O4 || weights_desc_.layout == WeightsLayout::kOICustomSpatialO4I4) { index = "(d_index * args.in_ch_x4_groups + I) * args.kernel_spatial_size + " "spatial_linear"; } else if (weights_desc_.layout == WeightsLayout::kOSpatialIOGroupI4O4 || weights_desc_.layout == WeightsLayout::kOSpatialIOGroupO4I4) { index = "(d_index * args.kernel_spatial_size + spatial_linear) * " "args.in_ch_x4_groups + I"; } c += " int dst_offset = (" + index + ") * OUTPUT_GROUP_SIZE + k_index;\n"; c += " args.dst_tensor.WriteLinear(r0, dst_offset * 4 + 0);\n"; c += " args.dst_tensor.WriteLinear(r1, dst_offset * 4 + 1);\n"; c += " args.dst_tensor.WriteLinear(r2, dst_offset * 4 + 2);\n"; c += " args.dst_tensor.WriteLinear(r3, dst_offset * 4 + 3);\n"; c += "}\n"; } return c; } OHWI ConverterToConvWeights::GetWeightsSize() const { int output_channels = 0; int input_channels = 0; int kernel_width = 0; int kernel_height = 0; if (input_layout_ == Layout::HWIO) { output_channels = src_[0]->Channels(); input_channels = src_[0]->Width(); kernel_width = src_[0]->Height(); kernel_height = src_[0]->Batch(); } else if (input_layout_ == Layout::OHWI) { output_channels = src_[0]->Batch(); input_channels = src_[0]->Channels(); kernel_width = src_[0]->Width(); kernel_height = src_[0]->Height(); } return OHWI(output_channels, kernel_height, kernel_width, input_channels); } absl::Status ConverterToConvWeights::BindArguments(ArgumentsBinder* args) { const auto& weights_shape = GetWeightsSize(); const int output_channels_x4_groups = DivideRoundUp( AlignByN(weights_shape.o, 4 * weights_desc_.GetOutputGroupSize()), 4); RETURN_IF_ERROR(args->SetInt("out_ch", weights_shape.o)); RETURN_IF_ERROR(args->SetInt("out_ch_x4_groups", output_channels_x4_groups)); RETURN_IF_ERROR(args->SetInt("in_ch", weights_shape.i)); RETURN_IF_ERROR( args->SetInt("in_ch_x4_groups", DivideRoundUp(weights_shape.i, 4))); RETURN_IF_ERROR(args->SetInt("kernel_width", weights_shape.w)); RETURN_IF_ERROR(args->SetInt("kernel_height", weights_shape.h)); RETURN_IF_ERROR( args->SetInt("kernel_spatial_size", weights_shape.w * weights_shape.h)); float4 mask = GetMaskForLastPlane(src_[0]->Channels()); RETURN_IF_ERROR(args->SetFloat("mask_x", mask.x)); RETURN_IF_ERROR(args->SetFloat("mask_y", mask.y)); RETURN_IF_ERROR(args->SetFloat("mask_z", mask.z)); return args->SetFloat("mask_w", mask.w); } int3 ConverterToConvWeights::GetGridSize() const { const auto& weights_shape = GetWeightsSize(); const int out_group_size = weights_desc_.GetOutputGroupSize(); const int grid_x = DivideRoundUp(AlignByN(weights_shape.o, 4 * out_group_size), 4); const int grid_y = DivideRoundUp(weights_shape.i, 4); const int grid_z = weights_shape.w * weights_shape.h; return int3(grid_x, grid_y, grid_z); } ConverterToConvWeights CreateConverterToConvWeights( const OperationDef& definition, const WeightsDescription& weights_desc, Layout input_layout) { return ConverterToConvWeights(definition, weights_desc, input_layout); } } }
#include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/tasks/conv_weights_converter_test_util.h" namespace tflite { namespace gpu { namespace cl { TEST_F(OpenCLOperationTest, ConverterToConvWeights1x1OutX4) { const auto status = ConverterToConvWeights1x1OutX4Test(&exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } TEST_F(OpenCLOperationTest, ConverterToConvWeights1x1OutX4Unaligned) { const auto status = ConverterToConvWeights1x1OutX4UnalignedTest(&exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } TEST_F(OpenCLOperationTest, ConverterToConvWeights1x1OutX2) { const auto status = ConverterToConvWeights1x1OutX2Test(&exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } TEST_F(OpenCLOperationTest, ConverterToConvWeightsOutX2) { const auto status = ConverterToConvWeightsOutX2Test(&exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } TEST_F(OpenCLOperationTest, ConverterToConvTransposedWeights4x4) { const auto status = ConverterToConvTransposedWeights4x4Test(&exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } TEST_F(OpenCLOperationTest, ConverterToConvWeights4xTextures) { const auto status = ConverterToConvWeights4xTexturesTest(&exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } } } }
1,014
cpp
tensorflow/tensorflow
convolution_transposed_4x4
tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_4x4.cc
tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_4x4_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_CONVOLUTION_TRANSPOSED_4X4_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_CONVOLUTION_TRANSPOSED_4X4_H_ #include <string> #include <vector> #include "tensorflow/lite/delegates/gpu/common/data_type.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/task/buffer_desc.h" #include "tensorflow/lite/delegates/gpu/common/task/gpu_operation.h" #include "tensorflow/lite/delegates/gpu/common/task/tensor_desc.h" #include "tensorflow/lite/delegates/gpu/common/task/weights_conversion.h" #include "tensorflow/lite/delegates/gpu/common/task/weights_layout.h" #include "tensorflow/lite/delegates/gpu/common/tensor.h" #include "tensorflow/lite/delegates/gpu/common/types.h" namespace tflite { namespace gpu { class ConvolutionTransposed4x4 : public GPUOperation { public: ConvolutionTransposed4x4() = default; void GetPossibleKernelWorkGroups( TuningType tuning_type, const GpuInfo& gpu_info, const KernelInfo& kernel_info, std::vector<int3>* work_groups) const override { work_groups->push_back(work_group_size_); } absl::Status BindArguments(ArgumentsBinder* args) override; int3 GetGridSize() const override; ConvolutionTransposed4x4(ConvolutionTransposed4x4&& operation) = default; ConvolutionTransposed4x4& operator=(ConvolutionTransposed4x4&& operation) = default; ConvolutionTransposed4x4(const ConvolutionTransposed4x4&) = delete; ConvolutionTransposed4x4& operator=(const ConvolutionTransposed4x4&) = delete; WeightsDescription GetWeightsDescription() const { WeightsDescription desc; desc.type = DeduceDataTypeFromPrecision(definition_.precision); desc.layout = weights_layout_; desc.spatial_remap = GetSpatialWeightsRemap(); return desc; } enum class WeightsUploadType { LOCAL_MEM_ASYNC, LOCAL_MEM_BY_THREADS, GLOBAL_MEM, CONSTANT_MEM, }; private: ConvolutionTransposed4x4(const OperationDef& definition, const GpuInfo& gpu_info); friend ConvolutionTransposed4x4 CreateConvolutionTransposed4x4( const GpuInfo& gpu_info, const OperationDef& definition, const ConvolutionTransposedAttributes& attr); friend ConvolutionTransposed4x4 CreateConvolutionTransposed4x4DynamicWeights( const GpuInfo& gpu_info, const OperationDef& definition, const ConvolutionTransposedAttributes& attr); void UploadWeights( const tflite::gpu::Tensor<OHWI, DataType::FLOAT32>& weights, WeightsUploadType weights_upload_type); std::vector<int> GetSpatialWeightsRemap() const; std::string GenerateConvolutionTransposedCode( const GpuInfo& gpu_info, const OperationDef& op_def, WeightsUploadType weights_upload_type); WeightsLayout weights_layout_; }; bool IsConvolutionTransposed4x4Supported( const OperationDef& definition, const ConvolutionTransposedAttributes& attr); ConvolutionTransposed4x4 CreateConvolutionTransposed4x4( const GpuInfo& gpu_info, const OperationDef& definition, const ConvolutionTransposedAttributes& attr); ConvolutionTransposed4x4 CreateConvolutionTransposed4x4DynamicWeights( const GpuInfo& gpu_info, const OperationDef& definition, const ConvolutionTransposedAttributes& attr); } } #endif #include "tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_4x4.h" #include <memory> #include <string> #include <utility> #include <vector> #include "tensorflow/lite/delegates/gpu/common/task/work_group_picking.h" namespace tflite { namespace gpu { namespace { ConvolutionTransposed4x4::WeightsUploadType GetBestWeightsUploadType( const GpuInfo& gpu_info) { ConvolutionTransposed4x4::WeightsUploadType weights_upload_type = ConvolutionTransposed4x4::WeightsUploadType::GLOBAL_MEM; if (gpu_info.IsApple()) { if (gpu_info.apple_info.IsBionic()) { weights_upload_type = ConvolutionTransposed4x4::WeightsUploadType::GLOBAL_MEM; } else { weights_upload_type = ConvolutionTransposed4x4::WeightsUploadType::LOCAL_MEM_BY_THREADS; } } else if (gpu_info.IsPowerVR()) { weights_upload_type = ConvolutionTransposed4x4::WeightsUploadType::LOCAL_MEM_ASYNC; } else if (gpu_info.IsNvidia() || gpu_info.IsIntel()) { weights_upload_type = ConvolutionTransposed4x4::WeightsUploadType::LOCAL_MEM_BY_THREADS; } else if (gpu_info.IsAMD()) { weights_upload_type = ConvolutionTransposed4x4::WeightsUploadType::CONSTANT_MEM; } else { weights_upload_type = ConvolutionTransposed4x4::WeightsUploadType::GLOBAL_MEM; } return weights_upload_type; } } ConvolutionTransposed4x4::ConvolutionTransposed4x4( const OperationDef& definition, const GpuInfo& gpu_info) : GPUOperation(definition) { work_group_size_ = int3(8, 4, 1); if (gpu_info.IsApple()) { work_group_launch_order_ = int3(2, 0, 1); } if (gpu_info.IsApple()) { weights_layout_ = WeightsLayout::kOICustomSpatialO4I4; } else { weights_layout_ = WeightsLayout::kOICustomSpatialI4O4; } code_ = GenerateConvolutionTransposedCode(gpu_info, definition_, GetBestWeightsUploadType(gpu_info)); if (definition_.precision == CalculationsPrecision::F16 && gpu_info.IsPowerVR()) { compiler_options_.push_back(CompilerOptions::kClFastRelaxedMath); } } std::string ConvolutionTransposed4x4::GenerateConvolutionTransposedCode( const GpuInfo& gpu_info, const OperationDef& op_def, WeightsUploadType weights_upload_type) { auto src_desc = op_def.src_tensors[0]; AddSrcTensor("src_tensor", src_desc); AddDstTensor("dst_tensor", op_def.dst_tensors[0]); if (op_def.src_tensors.size() == 2) { BufferDescriptor desc; desc.element_type = op_def.src_tensors[1].GetDataType(); desc.element_size = 4; desc.memory_type = weights_upload_type == ConvolutionTransposed4x4::WeightsUploadType::CONSTANT_MEM ? MemoryType::CONSTANT : MemoryType::GLOBAL; AddSrcBuffer("weights", desc); } args_.AddInt("filter_offset"); const bool need_local_mem = weights_upload_type == ConvolutionTransposed4x4::WeightsUploadType::LOCAL_MEM_BY_THREADS || weights_upload_type == ConvolutionTransposed4x4::WeightsUploadType::LOCAL_MEM_ASYNC; const int wg_total_size = work_group_size_.x * work_group_size_.y * work_group_size_.z; const std::string barrier = wg_total_size == 32 && gpu_info.IsWaveSizeEqualTo32() ? "SIMD_LOCAL_MEM_BARRIER" : "LOCAL_MEM_BARRIER"; std::string c; if (GetWeightsDescription().IsI4O4()) { switch (op_def.precision) { case CalculationsPrecision::F32: case CalculationsPrecision::F16: c += "#define CONV(R, SRC, F) \\\n"; c += " R += SRC.x * weights_cache[F]; \\\n"; c += " R += SRC.y * weights_cache[F + 1]; \\\n"; c += " R += SRC.z * weights_cache[F + 2]; \\\n"; c += " R += SRC.w * weights_cache[F + 3]; \n"; break; case CalculationsPrecision::F32_F16: c += "#define CONV(R, SRC, F) \\\n"; c += " R += TO_ACCUM_TYPE(SRC.x * weights_cache[F] + SRC.y * " "weights_cache[F + 1] + SRC.z * weights_cache[F + 2] + SRC.w * " "weights_cache[F + 3]);\n"; break; } } else { c += "#define CONV(R, SRC, F) \\\n"; c += " R.x += dot(SRC, weights_cache[F]); \\\n"; c += " R.y += dot(SRC, weights_cache[F + 1]); \\\n"; c += " R.z += dot(SRC, weights_cache[F + 2]); \\\n"; c += " R.w += dot(SRC, weights_cache[F + 3]); \n"; } const std::string weights_space = weights_upload_type == ConvolutionTransposed4x4::WeightsUploadType::CONSTANT_MEM ? "__constant" : "__global"; if (gpu_info.IsApiOpenCl()) { c += "__attribute__((reqd_work_group_size(8, 4, 1)))\n"; } c += "MAIN_FUNCTION($0) {\n"; std::string grid_coords[3]; int3 launch_remap; launch_remap[work_group_launch_order_.x] = 0; launch_remap[work_group_launch_order_.y] = 1; launch_remap[work_group_launch_order_.z] = 2; if (work_group_launch_order_[0] == 0) { grid_coords[0] = "GLOBAL_ID_0"; } else { grid_coords[0] = "(GROUP_ID_" + std::to_string(launch_remap[0]) + " * GROUP_SIZE_0 + LOCAL_ID_0);\n"; } if (work_group_launch_order_[1] == 1) { grid_coords[1] = "GLOBAL_ID_1"; } else { grid_coords[1] = "(GROUP_ID_" + std::to_string(launch_remap[1]) + " * GROUP_SIZE_1 + LOCAL_ID_1);\n"; } if (work_group_launch_order_[2] == 2) { grid_coords[2] = "GLOBAL_ID_2"; } else { grid_coords[2] = "(GROUP_ID_" + std::to_string(launch_remap[2]) + " * GROUP_SIZE_2 + LOCAL_ID_2);\n"; } if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) { c += " int linear_id = " + grid_coords[0] + ";\n"; c += " int X = linear_id / args.dst_tensor.Batch();\n"; c += " int B = linear_id % args.dst_tensor.Batch();\n"; c += " args.src_tensor.SetBatchRef(B);\n"; c += " args.dst_tensor.SetBatchRef(B);\n"; } else { c += " int X = " + grid_coords[0] + ";\n"; } c += " int Y = " + grid_coords[1] + ";\n"; c += " int Z = " + grid_coords[2] + ";\n"; if (!need_local_mem) { c += " if (X * 2 > args.dst_tensor.Width() || Y * 2 > " "args.dst_tensor.Height() || Z >= args.dst_tensor.Slices()) " "return;\n"; } c += " ACCUM_FLT4 r0 = INIT_ACCUM_FLT4(0.0f);\n"; c += " ACCUM_FLT4 r1 = INIT_ACCUM_FLT4(0.0f);\n"; c += " ACCUM_FLT4 r2 = INIT_ACCUM_FLT4(0.0f);\n"; c += " ACCUM_FLT4 r3 = INIT_ACCUM_FLT4(0.0f);\n"; c += " int f_offset = Z * args.filter_offset;\n"; if (need_local_mem) { c += " __local FLT4 weights_cache[64];\n"; } if (weights_upload_type == ConvolutionTransposed4x4::WeightsUploadType::LOCAL_MEM_BY_THREADS) { c += " int local_id = LOCAL_ID_1 * 8 + LOCAL_ID_0;\n"; } if (!src_desc.SupportsZeroClamp(Axis::WIDTH, gpu_info)) { c += " bool in_x0 = X - 1 >= 0 && X - 1 < args.src_tensor.Width();\n"; c += " bool in_x1 = X >= 0 && X < args.src_tensor.Width();\n"; } if (!src_desc.SupportsZeroClamp(Axis::HEIGHT, gpu_info)) { c += " bool in_y0 = Y - 1 >= 0 && Y - 1 < args.src_tensor.Height();\n"; c += " bool in_y1 = Y >= 0 && Y < args.src_tensor.Height();\n"; } auto generate_check = [&](int x, int y) { std::string check; const std::vector<Axis> axes{Axis::WIDTH, Axis::HEIGHT}; const std::vector<std::string> names{"in_x" + std::to_string(x), "in_y" + std::to_string(y)}; for (int i = 0; i < axes.size(); ++i) { const auto& axis = axes[i]; if (src_desc.HasAxis(axis) && !src_desc.SupportsZeroClamp(axis, gpu_info)) { if (!check.empty()) { check += " && "; } check += names[i]; } } return check; }; if (src_desc.IsLinear()) { if (src_desc.ReturnsZeroForNegOneRead(gpu_info)) { c += " int addr_0 = args.src_tensor.GetAddress(X - 1, Y - 1, 0);\n"; c += " int addr_1 = args.src_tensor.GetAddress(X, Y - 1, 0);\n"; c += " int addr_2 = args.src_tensor.GetAddress(X - 1, Y, 0);\n"; c += " int addr_3 = args.src_tensor.GetAddress(X, Y, 0);\n"; c += " addr_0 = select(-1, addr_0, (in_x0 && in_y0));\n"; c += " addr_1 = select(-1, addr_1, (in_x1 && in_y0));\n"; c += " addr_2 = select(-1, addr_2, (in_x0 && in_y1));\n"; c += " addr_3 = select(-1, addr_3, (in_x1 && in_y1));\n"; c += " int dz_0 = select(0, args.src_tensor.SliceStride(), (in_x0 && " "in_y0));\n"; c += " int dz_1 = select(0, args.src_tensor.SliceStride(), (in_x1 && " "in_y0));\n"; c += " int dz_2 = select(0, args.src_tensor.SliceStride(), (in_x0 && " "in_y1));\n"; c += " int dz_3 = select(0, args.src_tensor.SliceStride(), (in_x1 && " "in_y1));\n"; } else { c += " int xc0 = clamp(X - 1, 0, args.src_tensor.Width() - 1);\n"; c += " int xc1 = clamp(X, 0, args.src_tensor.Width() - 1);\n"; c += " int yc0 = clamp(Y - 1, 0, args.src_tensor.Height() - 1);\n"; c += " int yc1 = clamp(Y, 0, args.src_tensor.Height() - 1);\n"; c += " int addr_0 = args.src_tensor.GetAddress(xc0, yc0, 0);\n"; c += " int addr_1 = args.src_tensor.GetAddress(xc1, yc0, 0);\n"; c += " int addr_2 = args.src_tensor.GetAddress(xc0, yc1, 0);\n"; c += " int addr_3 = args.src_tensor.GetAddress(xc1, yc1, 0);\n"; c += " int dz = args.src_tensor.SliceStride();\n"; } } auto read_src = [&](int x, int y) { if (src_desc.IsLinear()) { const std::string id = std::to_string(y * 2 + x); const std::string addr = "addr_" + std::to_string(y * 2 + x); if (src_desc.ReturnsZeroForNegOneRead(gpu_info)) { return "args.src_tensor.Read(" + addr + "); " + addr + " += dz_" + id + ";"; } else { return "args.src_tensor.Read(" + addr + ") * INIT_FLT(in_x" + std::to_string(x) + " && in_y" + std::to_string(y) + "); " + addr + " += dz;"; } } else { std::string check = generate_check(x, y); if (!check.empty()) { check = " * INIT_FLT(" + check + ")"; } return "args.src_tensor.Read(X + " + std::to_string(x - 1) + ", Y + " + std::to_string(y - 1) + ", s)" + check + ";"; } }; c += " for (int s = 0; s < args.src_tensor.Slices(); ++s) {\n"; if (need_local_mem) { c += " " + barrier + ";\n"; } if (weights_upload_type == ConvolutionTransposed4x4::WeightsUploadType::LOCAL_MEM_ASYNC) { c += " async_work_group_copy(weights_cache, " "args.weights.GetPtr(f_offset), 64, " "0);\n"; } else if (weights_upload_type == ConvolutionTransposed4x4::WeightsUploadType:: LOCAL_MEM_BY_THREADS) { c += " weights_cache[local_id] = args.weights.Read(f_offset + " "local_id);\n"; c += " weights_cache[local_id + 32] = args.weights.Read(f_offset + " "local_id + " "32);\n"; } else { c += " " + weights_space + " FLT4* weights_cache = args.weights.GetPtr(f_offset);\n"; } c += " FLT4 src0 = " + read_src(0, 0) + ";\n"; c += " FLT4 src1 = " + read_src(1, 0) + ";\n"; c += " FLT4 src2 = " + read_src(0, 1) + ";\n"; c += " FLT4 src3 = " + read_src(1, 1) + ";\n"; c += " f_offset += 64;\n"; if (need_local_mem) { c += " " + barrier + ";\n"; } c += " CONV(r0, src0, 0);\n"; c += " CONV(r1, src0, 4);\n"; c += " CONV(r2, src0, 8);\n"; c += " CONV(r3, src0, 12);\n"; c += " CONV(r0, src1, 16);\n"; c += " CONV(r1, src1, 20);\n"; c += " CONV(r2, src1, 24);\n"; c += " CONV(r3, src1, 28);\n"; c += " CONV(r0, src2, 32);\n"; c += " CONV(r1, src2, 36);\n"; c += " CONV(r2, src2, 40);\n"; c += " CONV(r3, src2, 44);\n"; c += " CONV(r0, src3, 48);\n"; c += " CONV(r1, src3, 52);\n"; c += " CONV(r2, src3, 56);\n"; c += " CONV(r3, src3, 60);\n"; c += " }\n"; c += "\n"; if (need_local_mem) { c += " if (X * 2 > args.dst_tensor.Width() || Y * 2 > " "args.dst_tensor.Height() || Z >= args.dst_tensor.Slices()) " "return;\n"; } c += " X = X * 2 - 1;\n"; c += " Y = Y * 2 - 1;\n"; c += "\n"; c += " FLT4 bias_val = args.biases.Read(Z);\n"; c += " if (X >= 0 && Y >= 0) {\n"; c += " FLT4 result = TO_FLT4(r0) + bias_val;\n"; c += " args.dst_tensor.Write(result, X, Y, Z);\n"; c += " }\n"; c += " if (X + 1 < args.dst_tensor.Width() && Y >= 0) {\n"; c += " FLT4 result = TO_FLT4(r1) + bias_val;\n"; c += " args.dst_tensor.Write(result, X + 1, Y, Z);\n"; c += " }\n"; c += " if (X >= 0 && Y + 1 < args.dst_tensor.Height()) {\n"; c += " FLT4 result = TO_FLT4(r2) + bias_val;\n"; c += " args.dst_tensor.Write(result, X, Y + 1, Z);\n"; c += " }\n"; c += " if (X + 1 < args.dst_tensor.Width() && Y + 1 < " "args.dst_tensor.Height()) {\n"; c += " FLT4 result = TO_FLT4(r3) + bias_val;\n"; c += " args.dst_tensor.Write(result, X + 1, Y + 1, Z);\n"; c += " }\n"; c += "}\n"; return c; } absl::Status ConvolutionTransposed4x4::BindArguments(ArgumentsBinder* args) { return args->SetInt("filter_offset", 4 * 16 * src_[0]->Slices()); } int3 ConvolutionTransposed4x4::GetGridSize() const { const int grid_x = DivideRoundUp(dst_[0]->Width() + 2, 2) * dst_[0]->Batch(); const int grid_y = DivideRoundUp(dst_[0]->Height() + 2, 2); const int grid_z = dst_[0]->Slices(); return int3(grid_x, grid_y, grid_z); } std::vector<int> ConvolutionTransposed4x4::GetSpatialWeightsRemap() const { return std::vector<int>{10, 11, 14, 15, 8, 9, 12, 13, 2, 3, 6, 7, 0, 1, 4, 5}; } void ConvolutionTransposed4x4::UploadWeights( const tflite::gpu::Tensor<OHWI, DataType::FLOAT32>& weights, WeightsUploadType weights_upload_type) { const auto weights_desc = GetWeightsDescription(); const int flt_count = GetTotalElementsCountForLayout(weights_desc, weights.shape); BufferDescriptor desc; desc.element_type = weights_desc.type; desc.element_size = 4; desc.memory_type = weights_upload_type == ConvolutionTransposed4x4::WeightsUploadType::CONSTANT_MEM ? MemoryType::CONSTANT : MemoryType::GLOBAL; desc.size = flt_count * SizeOf(desc.element_type); desc.data.resize(desc.size); RearrangeWeights(weights, weights_desc, absl::MakeSpan(desc.data)); args_.AddObject("weights", std::make_unique<BufferDescriptor>(std::move(desc))); } bool IsConvolutionTransposed4x4Supported( const OperationDef& definition, const ConvolutionTransposedAttributes& attr) { return attr.weights.shape.w == 4 && attr.weights.shape.h == 4 && attr.stride.w == 2 && attr.stride.h == 2 && attr.padding.prepended.w == 1 && attr.padding.prepended.h == 1; } ConvolutionTransposed4x4 CreateConvolutionTransposed4x4( const GpuInfo& gpu_info, const OperationDef& definition, const ConvolutionTransposedAttributes& attr) { ConvolutionTransposed4x4 result(definition, gpu_info); result.UploadWeights(attr.weights, GetBestWeightsUploadType(gpu_info)); TensorDescriptor bias_tensor_desc = CreateConstantLinearTensorDescriptor( gpu_info, definition.src_tensors[0].GetDataType(), attr.bias); result.args_.AddObject("biases", std::make_unique<TensorDescriptor>( std::move(bias_tensor_desc))); return result; } ConvolutionTransposed4x4 CreateConvolutionTransposed4x4DynamicWeights( const GpuInfo& gpu_info, const OperationDef& definition, const ConvolutionTransposedAttributes& attr) { OperationDef new_def = definition; new_def.src_tensors = { definition.src_tensors[0]}; const DataType weights_type = definition.GetDataType(); new_def.src_tensors.push_back( {weights_type, TensorStorageType::BUFFER, Layout::HWC}); ConvolutionTransposed4x4 result(new_def, gpu_info); TensorDescriptor bias_tensor_desc = CreateConstantLinearTensorDescriptor( gpu_info, definition.src_tensors[0].GetDataType(), attr.bias); result.args_.AddObject("biases", std::make_unique<TensorDescriptor>( std::move(bias_tensor_desc))); return result; } } }
#include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_4x4_test_util.h" namespace tflite { namespace gpu { namespace cl { namespace { TEST_F(OpenCLOperationTest, ConvolutionTransposed4x4SimpleWeights) { auto status = ConvolutionTransposed4x4SimpleWeightsTest(&exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } } } } }
1,015
cpp
tensorflow/tensorflow
convolution_transposed_thin
tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_thin.cc
tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_thin_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_CONVOLUTION_TRANSPOSED_THIN_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_CONVOLUTION_TRANSPOSED_THIN_H_ #include <memory> #include <string> #include <utility> #include <vector> #include "tensorflow/lite/delegates/gpu/common/data_type.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/task/buffer_desc.h" #include "tensorflow/lite/delegates/gpu/common/task/gpu_operation.h" #include "tensorflow/lite/delegates/gpu/common/task/tensor_desc.h" #include "tensorflow/lite/delegates/gpu/common/tensor.h" #include "tensorflow/lite/delegates/gpu/common/types.h" namespace tflite { namespace gpu { class ConvolutionTransposedThin : public GPUOperation { public: ConvolutionTransposedThin() = default; int3 GetGridSize() const override; ConvolutionTransposedThin(ConvolutionTransposedThin&& operation); ConvolutionTransposedThin& operator=(ConvolutionTransposedThin&& operation); ConvolutionTransposedThin(const ConvolutionTransposedThin&) = delete; ConvolutionTransposedThin& operator=(const ConvolutionTransposedThin&) = delete; private: friend ConvolutionTransposedThin CreateConvolutionTransposedThin( const GpuInfo& gpu_info, const OperationDef& definition, const ConvolutionTransposedAttributes& attr); ConvolutionTransposedThin(const OperationDef& definition, const ConvolutionTransposedAttributes& attr, const GpuInfo& gpu_info); template <DataType T> void UploadData(const tflite::gpu::Tensor<OHWI, T>& weights, const tflite::gpu::Tensor<Linear, T>& biases); template <DataType S, typename T> void RearrangeWeightsData(const tflite::gpu::Tensor<OHWI, S>& weights, absl::Span<T> dst); std::string GenerateConvolutionTransposedCode(const OperationDef& op_def, int src_depth, int dst_channels, const int2& kernel_size); }; template <DataType T> void ConvolutionTransposedThin::UploadData( const tflite::gpu::Tensor<OHWI, T>& weights, const tflite::gpu::Tensor<Linear, T>& biases) { const int src_depth = DivideRoundUp(weights.shape.i, 4); const int flt4_count = weights.shape.w * weights.shape.h * src_depth * weights.shape.o; const bool f32_weights = definition_.precision == CalculationsPrecision::F32; const int flt4_size = f32_weights ? sizeof(float4) : sizeof(half4); BufferDescriptor desc; desc.element_type = f32_weights ? DataType::FLOAT32 : DataType::FLOAT16; desc.element_size = 4; desc.memory_type = MemoryType::CONSTANT; desc.size = flt4_size * (flt4_count + 1); desc.data.resize(desc.size); if (f32_weights) { float4* gpu_data = reinterpret_cast<float4*>(desc.data.data()); RearrangeWeightsData(weights, absl::MakeSpan(gpu_data, flt4_count)); float4 bias_value(0.0f); for (int i = 0; i < weights.shape.o; ++i) { bias_value[i] = biases.data[i]; } gpu_data[flt4_count] = bias_value; } else { half4* gpu_data = reinterpret_cast<half4*>(desc.data.data()); RearrangeWeightsData(weights, absl::MakeSpan(gpu_data, flt4_count)); half4 bias_value(0.0f); for (int i = 0; i < weights.shape.o; ++i) { bias_value[i] = biases.data[i]; } gpu_data[flt4_count] = bias_value; } args_.AddObject("weights", std::make_unique<BufferDescriptor>(std::move(desc))); } template <DataType S, typename T> void ConvolutionTransposedThin::RearrangeWeightsData( const tflite::gpu::Tensor<OHWI, S>& weights, absl::Span<T> dst) { const int src_depth = DivideRoundUp(weights.shape.i, 4); const int kernel_x = weights.shape.w; const int kernel_y = weights.shape.h; int counter = 0; for (int s = 0; s < src_depth; ++s) { for (int y = 0; y < kernel_y; ++y) { for (int x = 0; x < kernel_x; ++x) { std::vector<T> filters(weights.shape.o); for (int j = 0; j < weights.shape.o; ++j) { for (int i = 0; i < 4; ++i) { const int s_ch = s * 4 + i; const int d_ch = j; if (s_ch < weights.shape.i && d_ch < weights.shape.o) { const int f_index = weights.shape.LinearIndex({d_ch, y, x, s_ch}); filters[j][i] = weights.data[f_index]; } else { filters[j][i] = 0.0f; } } } for (int j = 0; j < weights.shape.o; ++j) { dst[counter++] = filters[j]; } } } } } bool IsConvolutionTransposedThinSupported( const ConvolutionTransposedAttributes& attr); ConvolutionTransposedThin CreateConvolutionTransposedThin( const GpuInfo& gpu_info, const OperationDef& definition, const ConvolutionTransposedAttributes& attr); } } #endif #include "tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_thin.h" #include <string> #include <utility> #include <vector> #include "tensorflow/lite/delegates/gpu/common/task/work_group_picking.h" namespace tflite { namespace gpu { ConvolutionTransposedThin::ConvolutionTransposedThin( const OperationDef& definition, const ConvolutionTransposedAttributes& attr, const GpuInfo& gpu_info) : GPUOperation(definition) { code_ = GenerateConvolutionTransposedCode( definition_, DivideRoundUp(attr.weights.shape.i, 4), attr.weights.shape.o, int2(attr.weights.shape.w, attr.weights.shape.h)); if (definition_.precision == CalculationsPrecision::F16 && gpu_info.IsAdreno() && gpu_info.adreno_info.IsAdreno3xx()) { compiler_options_.push_back(CompilerOptions::kAdrenoFullSimd); } } ConvolutionTransposedThin::ConvolutionTransposedThin( ConvolutionTransposedThin&& operation) : GPUOperation(std::move(operation)) {} ConvolutionTransposedThin& ConvolutionTransposedThin::operator=( ConvolutionTransposedThin&& operation) { if (this != &operation) { GPUOperation::operator=(std::move(operation)); } return *this; } std::string ConvolutionTransposedThin::GenerateConvolutionTransposedCode( const OperationDef& op_def, int src_depth, int dst_channels, const int2& kernel_size) { AddSrcTensor("src_tensor", op_def.src_tensors[0]); AddDstTensor("dst_tensor", op_def.dst_tensors[0]); const std::string channel_x = dst_channels == 1 ? "" : ".x"; const std::vector<std::string> postfix = {channel_x, ".y", ".z", ".w"}; const std::vector<std::string> channel = {".x", ".y", ".z", ".w"}; const std::string type_postfix = dst_channels == 1 ? "" : std::to_string(dst_channels); std::string accum_type; switch (op_def.precision) { case CalculationsPrecision::F32: case CalculationsPrecision::F32_F16: accum_type = "float" + type_postfix; break; case CalculationsPrecision::F16: accum_type = "half" + type_postfix; break; } std::string c; c += "MAIN_FUNCTION($0) {\n"; if (op_def.IsBatchSupported()) { c += " int linear_id = GLOBAL_ID_0;\n"; c += " int X = linear_id / args.dst_tensor.Batch();\n"; c += " int B = linear_id % args.dst_tensor.Batch();\n"; c += " args.dst_tensor.SetBatchRef(B);\n"; c += " args.src_tensor.SetBatchRef(B);\n"; } else { c += " int X = GLOBAL_ID_0;\n"; } c += " int Y = GLOBAL_ID_1;\n"; c += " if (X >= args.src_tensor.Width() || Y >= args.src_tensor.Height()) " "return;\n"; c += " " + accum_type + " r[" + std::to_string(kernel_size.y) + "][" + std::to_string(kernel_size.x) + "];\n"; c += " {\n"; c += " FLT4 src = args.src_tensor.Read(X, Y, 0);\n"; int index = 0; for (int y = 0; y < kernel_size.y; ++y) { for (int x = 0; x < kernel_size.x; ++x) { std::string r_s = " r[" + std::to_string(y) + "][" + std::to_string(x) + "]"; for (int d = 0; d < dst_channels; ++d) { c += r_s + postfix[d] + " = dot(src, args.weights.Read(" + std::to_string(index) + "));\n"; index++; } } } c += " }\n"; for (int i = 1; i < src_depth; ++i) { c += " if (X > " + std::to_string(-i) + ") { c += " FLT4 src = args.src_tensor.Read(X, Y, " + std::to_string(i) + ");\n"; for (int y = 0; y < kernel_size.y; ++y) { for (int x = 0; x < kernel_size.x; ++x) { std::string r_s = " r[" + std::to_string(y) + "][" + std::to_string(x) + "]"; for (int d = 0; d < dst_channels; ++d) { c += r_s + postfix[d] + " += dot(src, args.weights.Read(" + std::to_string(index) + "));\n"; index++; } } } c += " }\n"; } c += " X *= " + std::to_string(kernel_size.x) + ";\n"; c += " Y *= " + std::to_string(kernel_size.y) + ";\n"; for (int y = 0; y < kernel_size.y; ++y) { for (int x = 0; x < kernel_size.x; ++x) { const std::string x_coord = "X + " + std::to_string(x); const std::string y_coord = "Y + " + std::to_string(y); c += " if (" + x_coord + " < args.dst_tensor.Width() && " + y_coord + " < args.dst_tensor.Height()) {\n"; c += " FLT4 result = args.weights.Read(" + std::to_string(index) + ");\n"; for (int d = 0; d < dst_channels; ++d) { c += " result" + channel[d] + " += r[" + std::to_string(y) + "][" + std::to_string(x) + "]" + postfix[d] + ";\n"; } c += " args.dst_tensor.Write(result, " + x_coord + ", " + y_coord + ", 0);\n"; c += " }\n"; } } c += "}\n"; return c; } int3 ConvolutionTransposedThin::GetGridSize() const { const int grid_x = src_[0]->Width() * dst_[0]->Batch(); const int grid_y = src_[0]->Height(); const int grid_z = 1; return int3(grid_x, grid_y, grid_z); } bool IsConvolutionTransposedThinSupported( const ConvolutionTransposedAttributes& attr) { return attr.weights.shape.o <= 4 && attr.weights.shape.w == attr.stride.w && attr.weights.shape.h == attr.stride.h && attr.padding.prepended.w == 0 && attr.padding.prepended.h == 0 && attr.padding.appended.w == 0 && attr.padding.appended.h == 0; } ConvolutionTransposedThin CreateConvolutionTransposedThin( const GpuInfo& gpu_info, const OperationDef& definition, const ConvolutionTransposedAttributes& attr) { ConvolutionTransposedThin result(definition, attr, gpu_info); result.UploadData(attr.weights, attr.bias); return result; } } }
#include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_thin_test_util.h" namespace tflite { namespace gpu { namespace cl { TEST_F(OpenCLOperationTest, ConvolutionTransposedThinSimpleWeights) { auto status = ConvolutionTransposedThinSimpleWeightsTest(&exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } TEST_F(OpenCLOperationTest, ConvolutionTransposedThin) { auto status = ConvolutionTransposedThinTest(&exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } } } }
1,016
cpp
tensorflow/tensorflow
conv_generic
tensorflow/lite/delegates/gpu/common/tasks/conv_generic.cc
tensorflow/lite/delegates/gpu/cl/kernels/conv_generic_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_CONV_GENERIC_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_CONV_GENERIC_H_ #include <cstring> #include <memory> #include <string> #include <utility> #include <vector> #include "tensorflow/lite/delegates/gpu/common/data_type.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/task/buffer_desc.h" #include "tensorflow/lite/delegates/gpu/common/task/gpu_operation.h" #include "tensorflow/lite/delegates/gpu/common/task/tensor_desc.h" #include "tensorflow/lite/delegates/gpu/common/task/weights_conversion.h" #include "tensorflow/lite/delegates/gpu/common/task/weights_layout.h" #include "tensorflow/lite/delegates/gpu/common/tensor.h" #include "tensorflow/lite/delegates/gpu/common/types.h" #include "tensorflow/lite/delegates/gpu/common/winograd_util.h" namespace tflite { namespace gpu { class ConvGeneric : public GPUOperation { public: enum class WeightsUploadType { LOCAL_MEM_ASYNC_SUBGROUP, LOCAL_MEM_BY_THREADS, GLOBAL_MEM, CONSTANT_MEM, PRIVATE_MEM_SIMD_BROADCAST, TEXTURES_MEM_X4, }; struct ConvParams { DataType weights_data_type; int4 block_size; bool fixed_work_group_size; int3 work_group_size; int3 work_group_launch_order; bool linear_spatial; bool linear_all; bool different_weights_for_height; bool groups_support = false; int src_depth_loop_size; bool need_src_loop = true; bool need_dst_loop = true; WeightsUploadType weights_upload_type; bool x_kernel_is_1 = false; bool y_kernel_is_1 = false; bool z_kernel_is_1 = false; WeightsLayout weights_layout; int simd_size = 1; bool AreWeightsBuffer() const { return weights_upload_type != WeightsUploadType::TEXTURES_MEM_X4; } bool IsPrivateMemBroadcast() const { return weights_upload_type == WeightsUploadType::PRIVATE_MEM_SIMD_BROADCAST; } }; ConvGeneric() = default; void GetPossibleKernelWorkGroups( TuningType tuning_type, const GpuInfo& gpu_info, const KernelInfo& kernel_info, std::vector<int3>* work_groups) const override; absl::Status BindArguments(ArgumentsBinder* args) override; int3 GetGridSize() const override; WeightsDescription GetWeightsDescription() const { WeightsDescription desc; desc.type = conv_params_.weights_data_type; desc.layout = conv_params_.weights_layout; desc.output_group_size = conv_params_.block_size.w; return desc; } ConvGeneric(ConvGeneric&& operation); ConvGeneric& operator=(ConvGeneric&& operation); ConvGeneric(const ConvGeneric&) = delete; ConvGeneric& operator=(const ConvGeneric&) = delete; private: ConvGeneric(const OperationDef& definition, const Convolution2DAttributes& attr, const GpuInfo& gpu_info, const BHWC* dst_shape = nullptr); ConvGeneric(const OperationDef& definition, const Convolution2DAttributes& attr, const BHWC& weights_shape, const GpuInfo& gpu_info, const BHWC* dst_shape = nullptr); ConvGeneric(const OperationDef& definition, const FullyConnectedAttributes& attr, const GpuInfo& gpu_info, const BHWC* dst_shape = nullptr); explicit ConvGeneric(const OperationDef& definition); ConvGeneric(const OperationDef& definition, const Convolution3DAttributes& attr, const GpuInfo& gpu_info, const BHWDC* dst_shape = nullptr); void GenerateCode(const GpuInfo& gpu_info); template <DataType T> void UploadData(const tflite::gpu::Tensor<OHWI, T>& weights, const tflite::gpu::Tensor<Linear, T>& biases); template <DataType T> void UploadDataForWinograd4x4To6x6( const tflite::gpu::Tensor<OHWI, T>& weights); template <DataType T> void UploadWeights(const tflite::gpu::Tensor<OHWI, T>& weights); template <DataType T> void UploadWeights(const tflite::gpu::Tensor<OHWDI, T>& weights); template <DataType T> void UploadBias(const tflite::gpu::Tensor<Linear, T>& bias); friend ConvGeneric CreateConvGeneric(const GpuInfo& gpu_info, const OperationDef& definition, const Convolution2DAttributes& attr, const BHWC* dst_shape); friend ConvGeneric CreateConvGeneric(const GpuInfo& gpu_info, const OperationDef& definition, const FullyConnectedAttributes& attr, const BHWC* dst_shape); friend ConvGeneric CreateConvGenericBatchedMatMul( const GpuInfo& gpu_info, const OperationDef& definition, const OHWI& weights_shape, const BHWC* dst_shape); friend ConvGeneric CreateConvGenericDynamicWeights( const GpuInfo& gpu_info, const OperationDef& definition, const Convolution2DAttributes& attr, const BHWC& weights_shape, const BHWC* dst_shape); friend ConvGeneric CreateConvGenericWino4x4To6x6( const GpuInfo& gpu_info, const OperationDef& definition, const Convolution2DAttributes& attr, const BHWC* dst_shape); friend ConvGeneric CreateConvGeneric3D(const GpuInfo& gpu_info, const OperationDef& definition, const Convolution3DAttributes& attr, const BHWDC* dst_shape); ConvParams GuessBestParams(const GpuInfo& gpu_info, const OperationDef& definition, const Convolution2DAttributes& attr, const BHWC* dst_shape = nullptr); ConvParams GuessBestParams(const GpuInfo& gpu_info, const OperationDef& definition, const Convolution2DAttributes& attr, const BHWC& weights_shape, const BHWC* dst_shape = nullptr); ConvParams GuessBestParams(const GpuInfo& gpu_info, const OperationDef& definition, const FullyConnectedAttributes& attr, const BHWC* dst_shape = nullptr); ConvParams GuessBestParamsPointwise(const GpuInfo& gpu_info, const OperationDef& definition, const OHWI& weights_shape, const BHWC* dst_shape = nullptr); ConvParams GuessBestParams(const GpuInfo& gpu_info, const OperationDef& definition, const Convolution3DAttributes& attr, const BHWDC* dst_shape = nullptr); ConvParams GuessBestParams(const GpuInfo& gpu_info, const OperationDef& definition, int src_depth, int dst_depth, bool x_kernel_is_1, bool y_kernel_is_1, bool different_weights_for_height, const BHWC* dst_shape = nullptr); ConvParams GuessBestParamsApple(const GpuInfo& gpu_info, const OperationDef& definition, int src_depth, int dst_depth, bool x_kernel_is_1, bool y_kernel_is_1, bool different_weights_for_height, const BHWC& dst_shape); std::string GenerateConv(const GpuInfo& gpu_info, const OperationDef& op_def, const ConvParams& conv_params); int4 stride_; int4 padding_; int4 kernel_size_; int4 dilation_; ConvParams conv_params_; }; template <DataType T> void ConvGeneric::UploadData(const tflite::gpu::Tensor<OHWI, T>& weights, const tflite::gpu::Tensor<Linear, T>& biases) { UploadWeights(weights); UploadBias(biases); } template <DataType T> void ConvGeneric::UploadDataForWinograd4x4To6x6( const tflite::gpu::Tensor<OHWI, T>& weights) { tflite::gpu::Tensor<OHWI, T> wino_weights; RearrangeWeightsToWinograd4x4To6x6Weights(weights, &wino_weights); UploadWeights(wino_weights); tflite::gpu::Tensor<Linear, DataType::FLOAT32> biases; biases.shape = Linear(weights.shape.o); biases.data.resize(weights.shape.o, 0.0f); UploadBias(biases); } template <DataType T> void ConvGeneric::UploadBias(const tflite::gpu::Tensor<Linear, T>& bias) { BufferDescriptor desc; desc.element_type = conv_params_.weights_data_type; desc.element_size = 4; desc.memory_type = conv_params_.weights_upload_type == ConvGeneric::WeightsUploadType::CONSTANT_MEM ? MemoryType::CONSTANT : MemoryType::GLOBAL; const int float_size = conv_params_.weights_data_type == DataType::FLOAT32 ? sizeof(float) : sizeof(half); int aligned_channels = AlignByN(bias.shape.v, 4 * conv_params_.block_size.w); desc.size = float_size * aligned_channels; desc.data.resize(desc.size); if (conv_params_.weights_data_type == DataType::FLOAT32) { float* gpu_data = reinterpret_cast<float*>(desc.data.data()); for (int i = 0; i < aligned_channels; ++i) { gpu_data[i] = i < bias.shape.v ? bias.data[i] : 0.0f; } } else { half* gpu_data = reinterpret_cast<half*>(desc.data.data()); for (int i = 0; i < aligned_channels; ++i) { gpu_data[i] = i < bias.shape.v ? bias.data[i] : 0.0f; } } args_.AddObject("biases", std::make_unique<BufferDescriptor>(std::move(desc))); } template <DataType T> void ConvGeneric::UploadWeights(const tflite::gpu::Tensor<OHWI, T>& weights) { const auto weights_desc = GetWeightsDescription(); const int flt_count = GetTotalElementsCountForLayout(weights_desc, weights.shape); std::vector<uint8_t> weights_data(flt_count * SizeOf(weights_desc.type)); RearrangeWeights(weights, weights_desc, absl::MakeSpan(weights_data)); if (conv_params_.AreWeightsBuffer()) { BufferDescriptor desc; desc.element_type = weights_desc.type; desc.element_size = 4; desc.memory_type = conv_params_.weights_upload_type == ConvGeneric::WeightsUploadType::CONSTANT_MEM ? MemoryType::CONSTANT : MemoryType::GLOBAL; desc.size = weights_data.size(); desc.data = std::move(weights_data); args_.AddObject("weights", std::make_unique<BufferDescriptor>(std::move(desc))); } else { uint2 tex_size = Get2dResourceSize(weights_desc, weights.shape); int sub_size = SizeOf(weights_desc.type) * 4 * tex_size.x * tex_size.y; for (int i = 0; i < 4; ++i) { TensorDescriptor desc = CreateConstantHWVec4TensorDescriptor( weights_desc.type, TensorStorageType::TEXTURE_2D, tex_size.x, tex_size.y, weights_data.data() + sub_size * i); args_.AddObject("weights" + std::to_string(i), std::make_unique<TensorDescriptor>(std::move(desc))); } } } template <DataType T> void ConvGeneric::UploadWeights(const tflite::gpu::Tensor<OHWDI, T>& weights) { const auto weights_desc = GetWeightsDescription(); const int flt_count = GetTotalElementsCountForLayout(weights_desc, weights.shape); std::vector<uint8_t> weights_data(flt_count * SizeOf(weights_desc.type)); RearrangeWeights(weights, weights_desc, absl::MakeSpan(weights_data)); if (conv_params_.AreWeightsBuffer()) { BufferDescriptor desc; desc.element_type = weights_desc.type; desc.element_size = 4; desc.size = weights_data.size(); desc.data = std::move(weights_data); args_.AddObject("weights", std::make_unique<BufferDescriptor>(std::move(desc))); } else { uint2 tex_size = Get2dResourceSize(weights_desc, weights.shape); int sub_size = SizeOf(weights_desc.type) * 4 * tex_size.x * tex_size.y; for (int i = 0; i < 4; ++i) { TensorDescriptor desc = CreateConstantHWVec4TensorDescriptor( weights_desc.type, TensorStorageType::TEXTURE_2D, tex_size.x, tex_size.y, weights_data.data() + sub_size * i); args_.AddObject("weights" + std::to_string(i), std::make_unique<TensorDescriptor>(std::move(desc))); } } } ConvGeneric CreateConvGeneric(const GpuInfo& gpu_info, const OperationDef& definition, const Convolution2DAttributes& attr, const BHWC* dst_shape = nullptr); ConvGeneric CreateConvGeneric(const GpuInfo& gpu_info, const OperationDef& definition, const FullyConnectedAttributes& attr, const BHWC* dst_shape = nullptr); ConvGeneric CreateConvGenericDynamicWeights(const GpuInfo& gpu_info, const OperationDef& definition, const Convolution2DAttributes& attr, const BHWC& weights_shape, const BHWC* dst_shape = nullptr); ConvGeneric CreateConvGenericBatchedMatMul(const GpuInfo& gpu_info, const OperationDef& definition, const OHWI& weights_shape, const BHWC* dst_shape = nullptr); ConvGeneric CreateConvGenericWino4x4To6x6(const GpuInfo& gpu_info, const OperationDef& definition, const Convolution2DAttributes& attr, const BHWC* dst_shape = nullptr); ConvGeneric CreateConvGeneric3D(const GpuInfo& gpu_info, const OperationDef& definition, const Convolution3DAttributes& attr, const BHWDC* dst_shape = nullptr); } } #endif #include "tensorflow/lite/delegates/gpu/common/tasks/conv_generic.h" #include <algorithm> #include <string> #include <utility> #include <vector> #include "absl/strings/substitute.h" #include "tensorflow/lite/delegates/gpu/common/data_type.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/task/util.h" #include "tensorflow/lite/delegates/gpu/common/task/work_group_picking.h" namespace tflite { namespace gpu { namespace { std::string GenerateUploadByThreads( const std::string& local_ptr_name, const std::string& name, bool use_ptrs, const std::string& global_offset_name, const std::string type_conversion, const std::string& lid_name, int total_work_items, int elements_to_upload) { std::string c; std::string offset = global_offset_name.empty() ? "" : global_offset_name + " + "; const int groups = elements_to_upload / total_work_items; const int reminder = elements_to_upload % total_work_items; const std::string access_start = name + (use_ptrs ? "[" : ".Read("); const std::string access_end = use_ptrs ? "]" : ")"; for (int i = 0; i < groups; ++i) { const std::string value = access_start + offset + lid_name + " + " + std::to_string(total_work_items * i) + access_end; c += " " + local_ptr_name + "[" + lid_name + " + " + std::to_string(total_work_items * i) + "] = " + absl::Substitute(type_conversion, value) + ";\n"; } if (reminder != 0) { const std::string value = access_start + offset + lid_name + " + " + std::to_string(total_work_items * groups) + access_end; c += " if (" + lid_name + " < " + std::to_string(reminder) + ") {\n"; c += " " + local_ptr_name + "[" + lid_name + " + " + std::to_string(total_work_items * groups) + "] = " + absl::Substitute(type_conversion, value) + ";\n"; c += " }\n"; } return c; } std::string GenerateAsyncUpload(const std::string& local_ptr_name, const std::string& global_ptr_name, const std::string& global_offset_name, int elements_to_upload) { std::string c; std::string offset = global_offset_name.empty() ? "" : " + " + global_offset_name; c += " async_work_group_copy(" + local_ptr_name + ", " + global_ptr_name + offset + ", " + std::to_string(elements_to_upload) + ", 0);\n"; return c; } std::string GenerateBlockCoords(const int4& block_size, const int3& work_group_launch_order, bool linear_spatial, bool linear_all, bool need_depth, bool need_batch) { std::string c; int3 launch_remap; launch_remap[work_group_launch_order.x] = 0; launch_remap[work_group_launch_order.y] = 1; launch_remap[work_group_launch_order.z] = 2; if (linear_all) { c += " int linear_all = GLOBAL_ID_0;\n"; if (need_batch) { c += " int B = linear_all % args.task_size_b;\n"; c += " linear_all = linear_all / args.task_size_b;\n"; } c += " int DST_X = linear_all % args.task_size_x;\n"; c += " linear_all = linear_all / args.task_size_x;\n"; c += " int DST_Y = linear_all % args.task_size_y;\n"; c += " linear_all = linear_all / args.task_size_y;\n"; if (need_depth) { c += " int DST_Z = linear_all % args.task_size_z;\n"; c += " linear_all = linear_all / args.task_size_z;\n"; } c += " int DST_S = linear_all;\n"; } else if (linear_spatial) { if (work_group_launch_order[0] == 0) { c += " int linear_spatial = GLOBAL_ID_0;\n"; } else { c += " int linear_spatial = GROUP_ID_" + std::to_string(launch_remap[0]) + " * GROUP_SIZE_0 + LOCAL_ID_0;\n"; } if (need_batch) { c += " int B = linear_spatial % args.task_size_b;\n"; c += " linear_spatial = linear_spatial / args.task_size_b;\n"; } c += " int DST_X = linear_spatial % args.task_size_x;\n"; c += " linear_spatial = linear_spatial / args.task_size_x;\n"; c += " int DST_Y = linear_spatial % args.task_size_y;\n"; c += " linear_spatial = linear_spatial / args.task_size_y;\n"; if (need_depth) { c += " int DST_Z = linear_spatial;\n"; } if (work_group_launch_order[1] == 1) { c += " int DST_S = GLOBAL_ID_1;\n"; } else { c += " int DST_S = GROUP_ID_" + std::to_string(launch_remap[1]) + " * GROUP_SIZE_1 + LOCAL_ID_1;\n"; } } else { if (work_group_launch_order[0] == 0) { c += " int DST_X = GLOBAL_ID_0;\n"; } else { c += " int DST_X = GROUP_ID_" + std::to_string(launch_remap[0]) + " * GROUP_SIZE_0 + LOCAL_ID_0;\n"; } if (need_batch) { c += " int B = DST_X % args.task_size_b;\n"; c += " DST_X = DST_X / args.task_size_b;\n"; } std::string global_id_1; if (work_group_launch_order[1] == 1) { global_id_1 = "GLOBAL_ID_1"; } else { global_id_1 = "GROUP_ID_" + std::to_string(launch_remap[1]) + " * GROUP_SIZE_1 + LOCAL_ID_1"; } if (need_depth) { c += " int linear_id_1 = " + global_id_1 + ";\n"; c += " int DST_Y = linear_id_1 % args.task_size_y;\n"; c += " int DST_Z = linear_id_1 / args.task_size_y;\n"; } else { c += " int DST_Y = " + global_id_1 + ";\n"; } if (work_group_launch_order[2] == 2) { c += " int DST_S = GLOBAL_ID_2;\n"; } else { c += " int DST_S = GROUP_ID_" + std::to_string(launch_remap[2]) + " * GROUP_SIZE_2 + LOCAL_ID_2;\n"; } } if (block_size.x != 1) { c += " DST_X *= " + std::to_string(block_size.x) + ";\n"; } if (block_size.y != 1) { c += " DST_Y *= " + std::to_string(block_size.y) + ";\n"; } if (need_depth && block_size.z != 1) { c += " DST_Z *= " + std::to_string(block_size.z) + ";\n"; } if (block_size.w != 1) { c += " DST_S *= " + std::to_string(block_size.w) + ";\n"; } return c; } } ConvGeneric::ConvGeneric(const OperationDef& definition, const Convolution2DAttributes& attr, const GpuInfo& gpu_info, const BHWC* dst_shape) : GPUOperation(definition), stride_(attr.strides.w, attr.strides.h, 1, 1), padding_(-attr.padding.prepended.w, -attr.padding.prepended.h, 0, 0), kernel_size_(attr.weights.shape.w, attr.weights.shape.h, 1, 1), dilation_(attr.dilations.w, attr.dilations.h, 1, 1), conv_params_(GuessBestParams(gpu_info, definition, attr, dst_shape)) { const int src_slices = DivideRoundUp(attr.weights.shape.i, 4); const int dst_slices = DivideRoundUp(attr.weights.shape.o, 4); if (attr.groups != 1) { conv_params_.groups_support = true; const int dst_group_slices = dst_slices / attr.groups; if (dst_group_slices % conv_params_.block_size.w != 0) { if (conv_params_.block_size.w == 4 && dst_group_slices % 2 == 0) { conv_params_.block_size.w = 2; } else { conv_params_.block_size.w = 1; } } args_.AddInt("src_group_size", src_slices); args_.AddInt("dst_group_size", dst_slices / attr.groups); } } ConvGeneric::ConvGeneric(const OperationDef& definition, const Convolution2DAttributes& attr, const BHWC& weights_shape, const GpuInfo& gpu_info, const BHWC* dst_shape) : GPUOperation(definition), stride_(attr.strides.w, attr.strides.h, 1, 1), padding_(-attr.padding.prepended.w, -attr.padding.prepended.h, 0, 0), kernel_size_(weights_shape.w, weights_shape.h, 1, 1), dilation_(attr.dilations.w, attr.dilations.h, 1, 1), conv_params_(GuessBestParams(gpu_info, definition, attr, weights_shape, dst_shape)) {} ConvGeneric::ConvGeneric(const OperationDef& definition, const FullyConnectedAttributes& attr, const GpuInfo& gpu_info, const BHWC* dst_shape) : GPUOperation(definition), stride_(1, 1, 1, 1), padding_(0, 0, 0, 0), kernel_size_(1, 1, 1, 1), dilation_(1, 1, 1, 1), conv_params_(GuessBestParams(gpu_info, definition, attr, dst_shape)) {} ConvGeneric::ConvGeneric(const OperationDef& definition) : GPUOperation(definition), stride_(1, 1, 1, 1), padding_(0, 0, 0, 0), kernel_size_(1, 1, 1, 1), dilation_(1, 1, 1, 1) {} ConvGeneric::ConvGeneric(ConvGeneric&& operation) : GPUOperation(std::move(operation)), stride_(operation.stride_), padding_(operation.padding_), kernel_size_(operation.kernel_size_), dilation_(operation.dilation_), conv_params_(operation.conv_params_) {} ConvGeneric::ConvGeneric(const OperationDef& definition, const Convolution3DAttributes& attr, const GpuInfo& gpu_info, const BHWDC* dst_shape) : GPUOperation(definition), stride_(attr.strides.w, attr.strides.h, attr.strides.d, 1), padding_(-attr.padding.prepended.w, -attr.padding.prepended.h, -attr.padding.prepended.d, 0), kernel_size_(attr.weights.shape.w, attr.weights.shape.h, attr.weights.shape.d, 1), dilation_(attr.dilations.w, attr.dilations.h, attr.dilations.d, 1), conv_params_(GuessBestParams(gpu_info, definition, attr, dst_shape)) {} ConvGeneric& ConvGeneric::operator=(ConvGeneric&& operation) { if (this != &operation) { std::swap(stride_, operation.stride_); std::swap(padding_, operation.padding_); std::swap(kernel_size_, operation.kernel_size_); std::swap(dilation_, operation.dilation_); std::swap(conv_params_, operation.conv_params_); GPUOperation::operator=(std::move(operation)); } return *this; } void ConvGeneric::GenerateCode(const GpuInfo& gpu_info) { if (conv_params_.linear_all) { grid_dimension_ = 1; } else if (conv_params_.linear_spatial) { grid_dimension_ = 2; } AddSrcTensor("src_tensor", definition_.src_tensors[0]); AddDstTensor("dst_tensor", definition_.dst_tensors[0]); if (definition_.src_tensors.size() == 2) { const DataType weights_type = definition_.GetDataType(); if (conv_params_.weights_layout == WeightsLayout::kOSpatialIOGroupI4O4 || conv_params_.weights_layout == WeightsLayout::kOSpatialIOGroupO4I4) { definition_.src_tensors[1] = {weights_type, TensorStorageType::BUFFER, Layout::HWC}; BufferDescriptor desc; desc.element_type = weights_type; desc.element_size = 4; desc.memory_type = conv_params_.weights_upload_type == ConvGeneric::WeightsUploadType::CONSTANT_MEM ? MemoryType::CONSTANT : MemoryType::GLOBAL; AddSrcBuffer("weights", desc); } else { TensorDescriptor desc{weights_type, TensorStorageType::TEXTURE_2D, Layout::HW}; definition_.src_tensors[1] = desc; definition_.src_tensors.push_back(desc); definition_.src_tensors.push_back(desc); definition_.src_tensors.push_back(desc); for (int i = 0; i < 4; ++i) { const std::string name = "weights" + std::to_string(i); AddSrcTensor(name, definition_.src_tensors[1 + i]); } } } code_ = GenerateConv(gpu_info, definition_, conv_params_); if (definition_.precision == CalculationsPrecision::F16 && gpu_info.IsPowerVR()) { compiler_options_.push_back(CompilerOptions::kClFastRelaxedMath); } if (gpu_info.IsMali()) { compiler_options_.push_back(CompilerOptions::kClFastRelaxedMath); } if (conv_params_.IsPrivateMemBroadcast() && (gpu_info.IsCL20OrHigher() || gpu_info.opencl_info.IsCLVK())) { compiler_options_.push_back(CompilerOptions::kCl20); } bool kernel_is_trivial = conv_params_.x_kernel_is_1 && conv_params_.y_kernel_is_1; if (definition_.src_tensors[0].HasAxis(Axis::DEPTH)) { kernel_is_trivial = kernel_is_trivial & conv_params_.z_kernel_is_1; } if (gpu_info.IsAdreno() && gpu_info.adreno_info.IsAdreno3xx() && definition_.precision == CalculationsPrecision::F16 && kernel_is_trivial) { compiler_options_.push_back(CompilerOptions::kAdrenoFullSimd); } } absl::Status ConvGeneric::BindArguments(ArgumentsBinder* args) { const int task_size_b = dst_[0]->Batch(); const int task_size_x = DivideRoundUp(dst_[0]->Width(), conv_params_.block_size.x); const int task_size_y = DivideRoundUp(dst_[0]->Height(), conv_params_.block_size.y); const int task_size_z = DivideRoundUp(dst_[0]->Depth(), conv_params_.block_size.z); RETURN_IF_ERROR(args->SetInt("task_size_b", task_size_b)); RETURN_IF_ERROR(args->SetInt("task_size_x", task_size_x)); RETURN_IF_ERROR(args->SetInt("task_size_y", task_size_y)); RETURN_IF_ERROR(args->SetInt("task_size_z", task_size_z)); return absl::OkStatus(); } int3 ConvGeneric::GetGridSize() const { const int task_size_b = dst_[0]->Batch(); const int task_size_x = DivideRoundUp(dst_[0]->Width(), conv_params_.block_size.x); const int task_size_y = DivideRoundUp(dst_[0]->Height(), conv_params_.block_size.y); const int task_size_z = DivideRoundUp(dst_[0]->Depth(), conv_params_.block_size.z); const int task_size_s = DivideRoundUp(dst_[0]->Slices(), conv_params_.block_size.w); int3 wg; if (conv_params_.linear_all) { return int3( task_size_x * task_size_b * task_size_y * task_size_z * task_size_s, 1, 1); } else if (conv_params_.linear_spatial) { return int3(task_size_x * task_size_b * task_size_y * task_size_z, task_size_s, 1); } else { return int3(task_size_x * task_size_b, task_size_y * task_size_z, task_size_s); } } void ConvGeneric::GetPossibleKernelWorkGroups( TuningType tuning_type, const GpuInfo& gpu_info, const KernelInfo& kernel_info, std::vector<int3>* work_groups) const { if (conv_params_.weights_upload_type == WeightsUploadType::LOCAL_MEM_ASYNC_SUBGROUP || conv_params_.weights_upload_type == WeightsUploadType::LOCAL_MEM_BY_THREADS || conv_params_.fixed_work_group_size) { work_groups->push_back(work_group_size_); return; } GetPossibleWorkGroupsConv(tuning_type, gpu_info, kernel_info, grid_size_, work_groups); } std::string ConvGeneric::GenerateConv(const GpuInfo& gpu_info, const OperationDef& op_def, const ConvParams& conv_params) { const auto& src_def = op_def.src_tensors[0]; auto generate_id = [&](const std::string& x, const std::string& y, const std::string& z) { std::string id; if (src_def.HasAxis(Axis::WIDTH)) { id += "_w" + x; } if (src_def.HasAxis(Axis::HEIGHT)) { id += "_h" + y; } if (src_def.HasAxis(Axis::DEPTH)) { id += "_d" + z; } return id; }; auto generate_id_full = [&](const std::string& x, const std::string& y, const std::string& z, const std::string& s) { return generate_id(x, y, z) + "_s" + s; }; auto generate_check = [&](const std::string& x, const std::string& y, const std::string& z) { std::string check; const std::vector<Axis> axes{Axis::WIDTH, Axis::HEIGHT, Axis::DEPTH}; const std::vector<std::string> names{"in_x", "in_y", "in_z"}; const std::vector<bool> is_1{conv_params_.x_kernel_is_1, conv_params_.y_kernel_is_1, conv_params_.z_kernel_is_1}; const std::vector<std::string> coords{x, y, z}; for (int i = 0; i < axes.size(); ++i) { const auto& axis = axes[i]; if (src_def.HasAxis(axis) && !src_def.SupportsZeroClamp(axis, gpu_info) && !is_1[i]) { if (!check.empty()) { check += " && "; } check += names[i] + coords[i]; } } return check; }; if (!conv_params_.x_kernel_is_1) { args_.AddInt("stride_x", stride_.x); args_.AddInt("padding_x",
#include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/tasks/conv_generic_test_util.h" namespace tflite { namespace gpu { namespace cl { TEST_F(OpenCLOperationTest, ConvGeneric1x1SimpleWeights) { const auto status = ConvGeneric1x1SimpleWeightsTest(&exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } TEST_F(OpenCLOperationTest, ConvGeneric1x1) { const auto status = ConvGeneric1x1Test(&exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } TEST_F(OpenCLOperationTest, ConvGenericSimpleWeights) { const auto status = ConvGenericSimpleWeightsTest(&exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } TEST_F(OpenCLOperationTest, ConvGeneric) { const auto status = ConvGenericTest(&exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } TEST_F(OpenCLOperationTest, ConvGenericGrouped) { const auto status = ConvGenericGroupedTest(&exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } } } }
1,017
cpp
tensorflow/tensorflow
softmax1x1
tensorflow/lite/delegates/gpu/common/tasks/softmax1x1.cc
tensorflow/lite/delegates/gpu/cl/kernels/softmax1x1_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_SOFTMAX1X1_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_SOFTMAX1X1_H_ #include <string> #include <vector> #include "tensorflow/lite/delegates/gpu/common/task/gpu_operation.h" #include "tensorflow/lite/delegates/gpu/common/task/tensor_desc.h" namespace tflite { namespace gpu { class Softmax1x1 : public GPUOperation { public: Softmax1x1() = default; Softmax1x1(const OperationDef& definition, const GpuInfo& gpu_info, const BHWC& shape); void GetPossibleKernelWorkGroups( TuningType tuning_type, const GpuInfo& gpu_info, const KernelInfo& kernel_info, std::vector<int3>* work_groups) const override { work_groups->push_back(work_group_size_); } absl::Status BindArguments(ArgumentsBinder* args) override; int3 GetGridSize() const override; Softmax1x1(Softmax1x1&& kernel); Softmax1x1& operator=(Softmax1x1&& kernel); Softmax1x1(const Softmax1x1&) = delete; Softmax1x1& operator=(const Softmax1x1&) = delete; friend Softmax1x1 CreateSoftmax1x1(); private: std::string GetSoftmaxKernelCode(const OperationDef& op_def); }; Softmax1x1 CreateSoftmax1x1(const OperationDef& definition, const GpuInfo& gpu_info, const BHWC& shape); } } #endif #include "tensorflow/lite/delegates/gpu/common/tasks/softmax1x1.h" #include <string> #include <utility> #include <vector> #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/task/util.h" namespace tflite { namespace gpu { namespace { std::string MakeAccOp(OperationType op_type, const std::string& a, const std::string& b) { if (op_type == OperationType::ADD) { return a + " = " + a + " + " + b; } else if (op_type == OperationType::MAXIMUM) { return a + " = max(" + a + ", " + b + ")"; } else { return a; } } std::string GetReduceCode(const std::string& value, OperationType op_type, int group_reduction_size) { std::vector<int> stages; if (group_reduction_size == 1024) { stages = {8, 8, 4, 4}; } else if (group_reduction_size == 512) { stages = {8, 8, 8}; } else if (group_reduction_size == 256) { stages = {8, 8, 4}; } else if (group_reduction_size == 128) { stages = {8, 4, 4}; } else if (group_reduction_size == 64) { stages = {8, 8}; } else if (group_reduction_size == 32) { stages = {8, 4}; } else if (group_reduction_size == 16) { stages = {4, 4}; } else if (group_reduction_size <= 8) { stages = {group_reduction_size}; } std::string c; c += " LOCAL_MEM_BARRIER;\n"; c += " loc_mem[tid] = " + value + ";\n"; int stride = 1; for (int i = 0; i < stages.size(); ++i) { const bool last_stage = i == stages.size() - 1; const std::string condition = last_stage ? "tid == 0" : "tid % " + std::to_string(stride * stages[i]) + " == 0"; const std::string location = last_stage ? "loc_mem[0]" : "loc_mem[tid]"; c += " LOCAL_MEM_BARRIER;\n"; c += " if (" + condition + ") {\n"; for (int j = 1; j < stages[i]; ++j) { c += " " + MakeAccOp(op_type, value, "loc_mem[tid + " + std::to_string(stride * j) + "]") + ";\n"; } c += " " + location + " = " + value + ";\n"; c += " }\n"; stride *= stages[i]; } c += " LOCAL_MEM_BARRIER;\n"; c += " " + value + " = loc_mem[0];\n"; return c; } } Softmax1x1::Softmax1x1(const OperationDef& definition, const GpuInfo& gpu_info, const BHWC& shape) : GPUOperation(definition) { if (gpu_info.IsAdreno() && gpu_info.adreno_info.IsAdreno7xx()) { work_group_size_ = int3(512, 1, 1); } else if (gpu_info.IsMali()) { work_group_size_ = int3(1024, 1, 1); } else { work_group_size_ = int3(128, 1, 1); } const int slices = DivideRoundUp(shape.c, 4); while (work_group_size_.x >= slices * 2) { work_group_size_.x /= 2; } while (work_group_size_.x >= gpu_info.GetMaxWorkGroupSizeForX()) { work_group_size_.x /= 2; } code_ = GetSoftmaxKernelCode(definition_); } Softmax1x1::Softmax1x1(Softmax1x1&& kernel) : GPUOperation(std::move(kernel)) {} Softmax1x1& Softmax1x1::operator=(Softmax1x1&& kernel) { if (this != &kernel) { GPUOperation::operator=(std::move(kernel)); } return *this; } std::string Softmax1x1::GetSoftmaxKernelCode(const OperationDef& op_def) { AddSrcTensor("src_tensor", op_def.src_tensors[0]); AddDstTensor("dst_tensor", op_def.dst_tensors[0]); args_.AddFloat("mask_x"); args_.AddFloat("mask_y"); args_.AddFloat("mask_z"); args_.AddFloat("mask_w"); std::string c; c += "MAIN_FUNCTION($0) {\n"; if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) { c += " int linear_id = GROUP_ID_1;\n"; c += " int X = linear_id / args.dst_tensor.Batch();\n"; c += " int B = linear_id % args.dst_tensor.Batch();\n"; c += " if (B >= args.dst_tensor.Batch()) return;\n"; c += " args.src_tensor.SetBatchRef(B);\n"; c += " args.dst_tensor.SetBatchRef(B);\n"; } else { c += " int X = GROUP_ID_1;\n"; } c += " int Y = GROUP_ID_2;\n"; c += " if (X >= args.dst_tensor.Width()) return;\n"; c += " if (Y >= args.dst_tensor.Height()) return;\n"; c += " float4 mask = INIT_FLOAT4v4(args.mask_x, args.mask_y, args.mask_z, " "args.mask_w);\n"; c += " float4 maxx4 = INIT_FLOAT4(args.src_tensor.Read<float>(X, Y, 0).x);\n"; c += " int tid = LOCAL_ID_0;\n"; const int group_reduction_size = work_group_size_.x; c += " for (int s = tid; s < args.src_tensor.Slices(); s += " + std::to_string(group_reduction_size) + ") {\n"; c += " float4 mask_a = s == args.src_tensor.Slices() - 1 ? mask : " "INIT_FLOAT4(1.0f);\n"; c += " float4 mask_b = INIT_FLOAT4(1.0f) - mask_a;\n"; c += " float4 src = args.src_tensor.Read<float>(X, Y, s);\n"; c += " src = src * mask_a + mask_b * src.x;\n"; c += " maxx4 = max(maxx4, src);\n"; c += " }\n"; c += " float maximum = max(maxx4.x, maxx4.y);\n"; c += " maximum = max(maximum, maxx4.z);\n"; c += " maximum = max(maximum, maxx4.w);\n"; c += " __local float loc_mem[" + std::to_string(group_reduction_size) + "];\n"; c += GetReduceCode("maximum", OperationType::MAXIMUM, group_reduction_size); c += " float sum = 0.0f;\n"; c += " for (int s = tid; s < args.src_tensor.Slices(); s += " + std::to_string(group_reduction_size) + ") {\n"; c += " float4 mask_temp = s == args.src_tensor.Slices() - 1 ? mask : " "INIT_FLOAT4(1.0f);\n"; c += " float4 src = args.src_tensor.Read<float>(X, Y, s) - " "INIT_FLOAT4(maximum);\n"; c += " sum += dot(mask_temp, exp(src));\n"; c += " }\n"; c += GetReduceCode("sum", OperationType::ADD, group_reduction_size); c += " sum = 1.0f / sum;\n"; c += " int dst_s = GLOBAL_ID_0;\n"; c += " if (dst_s < args.dst_tensor.Slices()) {\n"; c += " float4 src = args.src_tensor.Read<float>(X, Y, dst_s) - " "INIT_FLOAT4(maximum);\n"; c += " FLT4 res = TO_FLT4(exp(src) * sum);\n"; c += " args.dst_tensor.Write(res, X, Y, dst_s);\n"; c += " }\n"; c += "}\n"; return c; } absl::Status Softmax1x1::BindArguments(ArgumentsBinder* args) { float4 mask = GetMaskForLastPlane(src_[0]->Channels()); RETURN_IF_ERROR(args->SetFloat("mask_x", mask.x)); RETURN_IF_ERROR(args->SetFloat("mask_y", mask.y)); RETURN_IF_ERROR(args->SetFloat("mask_z", mask.z)); RETURN_IF_ERROR(args->SetFloat("mask_w", mask.w)); return absl::OkStatus(); } int3 Softmax1x1::GetGridSize() const { return int3(dst_[0]->Slices(), dst_[0]->Width() * dst_[0]->Batch(), dst_[0]->Height()); } Softmax1x1 CreateSoftmax1x1(const OperationDef& definition, const GpuInfo& gpu_info, const BHWC& shape) { return Softmax1x1(definition, gpu_info, shape); } } }
#include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/tasks/softmax_test_util.h" namespace tflite { namespace gpu { namespace cl { namespace { TEST_F(OpenCLOperationTest, Softmax1x1) { auto status = Softmax1x1Test(&exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } TEST_F(OpenCLOperationTest, Softmax1x1BigNumber) { auto status = Softmax1x1BigNumberTest(&exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } } } } }
1,018
cpp
tensorflow/tensorflow
reshapex4
tensorflow/lite/delegates/gpu/common/tasks/reshapex4.cc
tensorflow/lite/delegates/gpu/cl/kernels/reshapex4_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_RESHAPEX4_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_RESHAPEX4_H_ #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/task/gpu_operation.h" #include "tensorflow/lite/delegates/gpu/common/types.h" namespace tflite { namespace gpu { GPUOperation CreateReshapex4(const OperationDef& definition); } } #endif #include "tensorflow/lite/delegates/gpu/common/tasks/reshapex4.h" #include <string> #include "tensorflow/lite/delegates/gpu/common/task/work_group_picking.h" namespace tflite { namespace gpu { namespace { std::string GetReshapeCode(const OperationDef& op_def) { std::string c; c += "MAIN_FUNCTION($0) {\n"; if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) { c += " int linear_id = GLOBAL_ID_0;\n"; c += " int X = linear_id / args.dst_tensor.Batch();\n"; c += " int B = linear_id % args.dst_tensor.Batch();\n"; c += " args.dst_tensor.SetBatchRef(B);\n"; } else { c += " int X = GLOBAL_ID_0;\n"; } c += " int Y = GLOBAL_ID_1;\n"; c += " int Z = GLOBAL_ID_2;\n"; c += " if (X >= args.dst_tensor.Width() || Y >= args.dst_tensor.Height() || " "Z >= args.dst_tensor.Slices()) { \n"; c += " return; \n"; c += " } \n"; if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) { c += " int dst_bhwc4 = B;\n"; } else { c += " int dst_bhwc4 = 0;\n"; } c += " dst_bhwc4 = ((dst_bhwc4 * args.dst_tensor.Height() + Y) * " "args.dst_tensor.Width() + X) * args.dst_tensor.Slices() + Z;\n"; c += " int src_z = dst_bhwc4 % args.src_tensor.Slices();\n"; c += " dst_bhwc4 = dst_bhwc4 / args.src_tensor.Slices();\n"; c += " int src_x = dst_bhwc4 % args.src_tensor.Width();\n"; c += " dst_bhwc4 = dst_bhwc4 / args.src_tensor.Width();\n"; c += " int src_y = dst_bhwc4 % args.src_tensor.Height();\n"; if (op_def.src_tensors[0].HasAxis(Axis::BATCH)) { c += " int src_b = dst_bhwc4 / args.src_tensor.Height();\n"; c += " args.src_tensor.SetBatchRef(src_b);\n"; } c += " args.src_tensor::type result = args.src_tensor.Read(src_x, src_y, " "src_z);\n"; c += " args.dst_tensor.Write(result, X, Y, Z);\n"; c += "}\n"; return c; } } GPUOperation CreateReshapex4(const OperationDef& definition) { GPUOperation op(definition); op.AddSrcTensor("src_tensor", definition.src_tensors[0]); op.AddDstTensor("dst_tensor", definition.dst_tensors[0]); op.code_ = GetReshapeCode(definition); op.tensor_to_grid_ = TensorToGrid::kWBToX_HDToY_SToZ; return op; } } }
#include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/tasks/reshape_test_util.h" namespace tflite { namespace gpu { namespace cl { namespace { TEST_F(OpenCLOperationTest, Reshapex4) { auto status = Reshapex4Test(&exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } } } } }
1,019
cpp
tensorflow/tensorflow
conv_constants
tensorflow/lite/delegates/gpu/common/tasks/conv_constants.cc
tensorflow/lite/delegates/gpu/cl/kernels/conv_constants_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_CONV_CONSTANTS_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_CONV_CONSTANTS_H_ #include <memory> #include <utility> #include "tensorflow/lite/delegates/gpu/common/data_type.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/task/buffer_desc.h" #include "tensorflow/lite/delegates/gpu/common/task/gpu_operation.h" #include "tensorflow/lite/delegates/gpu/common/tensor.h" #include "tensorflow/lite/delegates/gpu/common/types.h" namespace tflite { namespace gpu { template <DataType S, typename T> void RearrangeWeightsForConvConstants( const tflite::gpu::Tensor<OHWI, S>& weights, absl::Span<T> dst) { const int dst_depth = DivideRoundUp(weights.shape.o, 4); const int src_depth = DivideRoundUp(weights.shape.i, 4); const int kernel_x = weights.shape.w; const int kernel_y = weights.shape.h; int counter = 0; for (int s = 0; s < src_depth; ++s) { for (int y = 0; y < kernel_y; ++y) { for (int x = 0; x < kernel_x; ++x) { for (int d = 0; d < dst_depth; ++d) { const int channels_count = std::min(4, weights.shape.i - s * 4); T filters[4]; for (int i = 0; i < 4; ++i) { for (int j = 0; j < channels_count; ++j) { const int s_ch = s * 4 + j; const int d_ch = d * 4 + i; if (s_ch < weights.shape.i && d_ch < weights.shape.o) { const int f_index = weights.shape.LinearIndex({d_ch, y, x, s_ch}); filters[j][i] = weights.data[f_index]; } else { filters[j][i] = 0.0f; } } } for (int i = 0; i < channels_count; ++i) { dst[counter++] = filters[i]; } } } } } } template <DataType S, typename T> void RearrangeWeightsForConvConstantsDot( const tflite::gpu::Tensor<OHWI, S>& weights, absl::Span<T> dst) { const int dst_depth = DivideRoundUp(weights.shape.o, 4); const int src_depth = DivideRoundUp(weights.shape.i, 4); const int kernel_x = weights.shape.w; const int kernel_y = weights.shape.h; int counter = 0; for (int s = 0; s < src_depth; ++s) { for (int y = 0; y < kernel_y; ++y) { for (int x = 0; x < kernel_x; ++x) { for (int d = 0; d < dst_depth; ++d) { const int channels_count = std::min(4, weights.shape.o - d * 4); T filters[4]; for (int j = 0; j < channels_count; ++j) { for (int i = 0; i < 4; ++i) { const int s_ch = s * 4 + i; const int d_ch = d * 4 + j; if (s_ch < weights.shape.i && d_ch < weights.shape.o) { const int f_index = weights.shape.LinearIndex({d_ch, y, x, s_ch}); filters[j][i] = weights.data[f_index]; } else { filters[j][i] = 0.0f; } } } for (int i = 0; i < channels_count; ++i) { dst[counter++] = filters[i]; } } } } } } template <DataType T> void UploadWeightsForConvConstants(const tflite::gpu::Tensor<OHWI, T>& weights, const GpuInfo& gpu_info, CalculationsPrecision precision, bool use_dot_conv, GPUOperation* op) { const int src_depth = DivideRoundUp(weights.shape.i, 4); const int dst_depth = DivideRoundUp(weights.shape.o, 4); const int kernel_x = weights.shape.w; const int kernel_y = weights.shape.h; const bool f32_weights = precision == CalculationsPrecision::F32; const int float_size = f32_weights ? 4 : 2; const int aligned_ch_count = use_dot_conv ? weights.shape.o * src_depth * 4 : weights.shape.i * dst_depth * 4; const int float_count = aligned_ch_count * kernel_x * kernel_y; BufferDescriptor desc; desc.element_type = f32_weights ? DataType::FLOAT32 : DataType::FLOAT16; desc.element_size = 4; if (gpu_info.IsApiOpenCl() || gpu_info.IsApiMetal()) { desc.memory_type = MemoryType::CONSTANT; } else { desc.memory_type = MemoryType::GLOBAL; } desc.size = float_size * float_count; desc.data.resize(desc.size); if (f32_weights) { float4* ptr = reinterpret_cast<float4*>(desc.data.data()); if (use_dot_conv) { RearrangeWeightsForConvConstantsDot(weights, absl::MakeSpan(ptr, float_count / 4)); } else { RearrangeWeightsForConvConstants(weights, absl::MakeSpan(ptr, float_count / 4)); } } else { half4* ptr = reinterpret_cast<half4*>(desc.data.data()); if (use_dot_conv) { RearrangeWeightsForConvConstantsDot(weights, absl::MakeSpan(ptr, float_count / 4)); } else { RearrangeWeightsForConvConstants(weights, absl::MakeSpan(ptr, float_count / 4)); } } op->args_.AddObject("weights", std::make_unique<BufferDescriptor>(std::move(desc))); } bool IsConvConstantsSupported(const GpuInfo& gpu_info, const OperationDef& definition, const Convolution2DAttributes& attr); GPUOperation CreateConvConstants(const GpuInfo& gpu_info, const OperationDef& definition, const Convolution2DAttributes& attr); } } #endif #include "tensorflow/lite/delegates/gpu/common/tasks/conv_constants.h" #include <algorithm> #include <memory> #include <string> #include <utility> #include "absl/strings/match.h" #include "absl/strings/str_cat.h" namespace tflite { namespace gpu { namespace { int GetAdrenoOptimalMaxConstantSize(const AdrenoInfo& adreno_info) { if (adreno_info.IsAdreno3xx() || adreno_info.IsAdreno4xx() || adreno_info.IsAdreno5xx()) { return 256 * 10; } else { return 256 * 14; } } int GetOptimalMaxConstantSize(const GpuInfo& gpu_info) { if (gpu_info.IsAdreno()) { return GetAdrenoOptimalMaxConstantSize(gpu_info.adreno_info); } else if (gpu_info.IsAMD()) { return 4096; } else { return 1024; } } void AppendConditionally(const std::string& value, const std::string& delimeter, std::string* result) { if (!result->empty()) { *result += delimeter; } *result += value; } std::string GenerateConv(int src_size, int dst_size, bool use_dot_conv, int const_mem_offset, CalculationsPrecision precision, const std::string& dst, const std::string& src) { std::string result; const std::string postfixes[] = {".x", ".y", ".z", ".w"}; if (use_dot_conv) { const std::string src_postfixes[] = {".x", ".xy", ".xyz", ""}; const std::string src_postfix = src_postfixes[src_size - 1]; for (int i = 0; i < dst_size; ++i) { result += " " + dst + postfixes[i] + " += dot(" + src + ", args.weights.Read(" + std::to_string(const_mem_offset + i) + ")" + src_postfix + ");\n"; } } else { const std::string dst_postfixes[] = {".x", ".xy", ".xyz", ""}; const std::string dst_postfix = dst_postfixes[dst_size - 1]; if (precision == CalculationsPrecision::F32_F16) { for (int i = 0; i < src_size; ++i) { if (i != 0) { result += " + "; } std::string src_name = src; if (src_size != 1) { src_name += postfixes[i]; } result += src_name + " * args.weights.Read(" + std::to_string(const_mem_offset + i) + ")" + dst_postfix; } std::string size = dst_size == 1 ? "" : std::to_string(dst_size); result = " " + dst + dst_postfix + " += TO_ACCUM_FLT" + size + "(" + result + ");\n"; } else { for (int i = 0; i < src_size; ++i) { std::string src_name = src; if (src_size != 1) { src_name += postfixes[i]; } result += " " + dst + dst_postfix + " += " + src_name + " * args.weights.Read(" + std::to_string(const_mem_offset + i) + ")" + dst_postfix + ";\n"; } } } return result; } std::string GenerateConvolutionConstantCode(const GpuInfo& gpu_info, const OperationDef& op_def, const OHWI& weights_shape, bool x_oob_reads, bool y_oob_reads, bool use_dot_conv, GPUOperation* op) { auto src_desc = op_def.src_tensors[0]; op->AddSrcTensor("src_tensor", src_desc); op->AddDstTensor("dst_tensor", op_def.dst_tensors[0]); const int out_z = DivideRoundUp(weights_shape.o, 4); const std::string kOutZ = std::to_string(out_z); const int src_depth = DivideRoundUp(weights_shape.i, 4); const std::string postfixes[] = {".x", ".xy", ".xyz", ""}; std::string c; c += "MAIN_FUNCTION($0) {\n"; if (src_desc.HasAxis(Axis::BATCH)) { c += " int linear_id = GLOBAL_ID_0;\n"; c += " int X = linear_id / args.dst_tensor.Batch();\n"; c += " int B = linear_id % args.dst_tensor.Batch();\n"; c += " args.src_tensor.SetBatchRef(B);\n"; c += " args.dst_tensor.SetBatchRef(B);\n"; } else { c += " int X = GLOBAL_ID_0;\n"; } c += " int Y = GLOBAL_ID_1;\n"; c += " if (X >= args.dst_tensor.Width() || Y >= args.dst_tensor.Height()) " "return;\n"; c += " int start_x = X * args.stride_x + args.padding_x;\n"; c += " int start_y = Y * args.stride_y + args.padding_y;\n"; for (int i = 0; i < out_z; ++i) { c += " ACCUM_FLT4 r" + std::to_string(i) + " = INIT_ACCUM_FLT4(0.0f);\n"; } std::string check; if (y_oob_reads && !src_desc.SupportsZeroClamp(Axis::HEIGHT, gpu_info)) { AppendConditionally("inside_y", " && ", &check); } if (x_oob_reads && !src_desc.SupportsZeroClamp(Axis::WIDTH, gpu_info)) { AppendConditionally("inside_x", " && ", &check); } int filters_counter = 0; for (int s = 0; s < src_depth; ++s) { const int src_ch_count = std::min(4, weights_shape.i - s * 4); const std::string s_count = src_ch_count == 1 ? "" : std::to_string(src_ch_count); const std::string s_type = absl::StrCat("FLT", s_count); const std::string s_postfix = postfixes[src_ch_count - 1]; for (int ky = 0; ky < weights_shape.h; ++ky) { std::string s_y = absl::StrCat("(start_y + ", ky, " * args.dilation_y)"); c += " {\n"; c += " int y_c = start_y + " + std::to_string(ky) + " * args.dilation_y;\n"; if (y_oob_reads && !src_desc.SupportsZeroClamp(Axis::HEIGHT, gpu_info)) { c += " bool inside_y = y_c >= 0 && y_c < args.src_tensor.Height();\n"; c += " y_c = clamp(y_c, 0, args.src_tensor.Height() - 1);\n"; } for (int kx = 0; kx < weights_shape.w; ++kx) { c += " {\n"; c += " int x_c = start_x + " + std::to_string(kx) + " * args.dilation_x;\n"; if (x_oob_reads && !src_desc.SupportsZeroClamp(Axis::WIDTH, gpu_info)) { c += " bool inside_x = x_c >= 0 && x_c < " "args.src_tensor.Width();\n"; c += " x_c = clamp(x_c, 0, args.src_tensor.Width() - 1);\n"; } c += " " + s_type + " src = args.src_tensor.Read(x_c, y_c, " + std::to_string(s) + ")" + s_postfix + ";\n"; if (!check.empty()) { c += " src *= INIT_FLT(" + check + ");\n"; } for (int d = 0; d < out_z; ++d) { const int dst_ch_count = std::min(4, weights_shape.o - d * 4); c += GenerateConv(src_ch_count, dst_ch_count, use_dot_conv, filters_counter, op_def.precision, "r" + std::to_string(d), "src"); filters_counter += use_dot_conv ? dst_ch_count : src_ch_count; } c += " }\n"; } c += " }\n"; } } for (int i = 0; i < out_z; ++i) { std::string s_i = std::to_string(i); c += " {\n"; c += " FLT4 res = TO_FLT4(r" + s_i + ") + args.biases.Read(" + s_i + ");\n"; c += " args.dst_tensor.Write(res, X, Y, " + s_i + ");\n"; c += " }\n"; } c += "}\n"; return c; } bool IsDotConvBetter(int src_channels, int dst_channels) { if (dst_channels % 4 == 0) { return false; } if (src_channels % 4 == 0) { return true; } const int src_depth = DivideRoundUp(src_channels, 4); const int dst_depth = DivideRoundUp(dst_channels, 4); return dst_channels * src_depth < src_channels * dst_depth; } } bool IsConvConstantsSupported(const GpuInfo& gpu_info, const OperationDef& definition, const Convolution2DAttributes& attr) { if (gpu_info.IsApiOpenCl() && gpu_info.IsAdreno()) { const std::string kBadDriver = "OpenCL 2.0 QUALCOMM build: commit #7ff4f54 changeid #I4460aa6217 " "Date: 12/30/18"; if (absl::StrContains(gpu_info.opencl_info.platform_version, kBadDriver)) { return false; } } if (attr.groups != 1) { return false; } const bool use_dot_conv = IsDotConvBetter(attr.weights.shape.i, attr.weights.shape.o); const auto& w_shape = attr.weights.shape; const int src_depth = DivideRoundUp(w_shape.i, 4); const int dst_depth = DivideRoundUp(w_shape.o, 4); const int aligned_ch_count = use_dot_conv ? w_shape.o * src_depth * 4 : w_shape.i * dst_depth * 4; const int filters_count = aligned_ch_count * w_shape.h * w_shape.w; const int float_size = definition.precision == CalculationsPrecision::F32 ? sizeof(float) : sizeof(half); const int filters_buffer_size = filters_count * float_size; const int kConstantMaxSize = GetOptimalMaxConstantSize(gpu_info); const int flt4_registers = DivideRoundUp(w_shape.o, 4); return filters_buffer_size <= kConstantMaxSize && flt4_registers <= 8; } GPUOperation CreateConvConstants(const GpuInfo& gpu_info, const OperationDef& definition, const Convolution2DAttributes& attr) { const bool use_dot_conv = IsDotConvBetter(attr.weights.shape.i, attr.weights.shape.o); GPUOperation op(definition); UploadWeightsForConvConstants(attr.weights, gpu_info, definition.precision, use_dot_conv, &op); op.args_.AddInt("stride_x", attr.strides.w); op.args_.AddInt("stride_y", attr.strides.h); op.args_.AddInt("padding_x", -attr.padding.prepended.w); op.args_.AddInt("padding_y", -attr.padding.prepended.h); op.args_.AddInt("dilation_x", attr.dilations.w); op.args_.AddInt("dilation_y", attr.dilations.h); op.tensor_to_grid_ = TensorToGrid::kWBToX_HDToY_ZIs1; bool x_oob_reads = attr.padding.appended.w != 0 || attr.padding.prepended.w != 0; bool y_oob_reads = attr.padding.appended.h != 0 || attr.padding.prepended.h != 0; op.code_ = GenerateConvolutionConstantCode(gpu_info, definition, attr.weights.shape, x_oob_reads, y_oob_reads, use_dot_conv, &op); if (definition.precision == CalculationsPrecision::F16 && gpu_info.IsAdreno() && gpu_info.adreno_info.IsAdreno3xx()) { op.compiler_options_.push_back(CompilerOptions::kAdrenoFullSimd); } if (definition.precision != CalculationsPrecision::F32 && gpu_info.IsPowerVR()) { op.compiler_options_.push_back(CompilerOptions::kClDisableOptimizations); } TensorDescriptor bias_tensor_desc = CreateConstantLinearTensorDescriptor( gpu_info, definition.src_tensors[0].GetDataType(), attr.bias); op.args_.AddObject("biases", std::make_unique<TensorDescriptor>( std::move(bias_tensor_desc))); return op; } } }
#include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/tasks/conv_constants_test_util.h" namespace tflite { namespace gpu { namespace cl { TEST_F(OpenCLOperationTest, ConvConstantsSimpleWeights) { const auto status = ConvConstantsSimpleWeightsTest(&exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } TEST_F(OpenCLOperationTest, ConvConstants) { const auto status = ConvConstantsTest(&exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } } } }
1,020
cpp
tensorflow/tensorflow
winograd
tensorflow/lite/delegates/gpu/common/tasks/winograd.cc
tensorflow/lite/delegates/gpu/cl/kernels/winograd_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_WINOGRAD_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_WINOGRAD_H_ #include <string> #include "tensorflow/lite/delegates/gpu/common/data_type.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/task/gpu_operation.h" #include "tensorflow/lite/delegates/gpu/common/task/tensor_desc.h" namespace tflite { namespace gpu { class Winograd4x4To36 : public GPUOperation { public: Winograd4x4To36() = default; void GetPossibleKernelWorkGroups( TuningType tuning_type, const GpuInfo& gpu_info, const KernelInfo& kernel_info, std::vector<int3>* work_groups) const override { work_groups->push_back(work_group_size_); } int3 GetGridSize() const override; absl::Status BindArguments(ArgumentsBinder* args) override; Winograd4x4To36(Winograd4x4To36&& kernel) = default; Winograd4x4To36& operator=(Winograd4x4To36&& kernel) = default; Winograd4x4To36(const Winograd4x4To36&) = delete; Winograd4x4To36& operator=(const Winograd4x4To36&) = delete; private: Winograd4x4To36(const OperationDef& definition, const Padding2D& padding) : GPUOperation(definition), padding_(padding) {} friend Winograd4x4To36 CreateWinograd4x4To36(const OperationDef& definition, const Padding2D& padding, const GpuInfo& gpu_info); Padding2D padding_; }; Winograd4x4To36 CreateWinograd4x4To36(const OperationDef& definition, const Padding2D& padding, const GpuInfo& gpu_info); class Winograd4x4To36TileX6 : public GPUOperation { public: Winograd4x4To36TileX6() = default; Winograd4x4To36TileX6(const OperationDef& definition, const Padding2D& padding, const GpuInfo& gpu_info); absl::Status BindArguments(ArgumentsBinder* args) override; int3 GetGridSize() const override; void GetPossibleKernelWorkGroups( TuningType tuning_type, const GpuInfo& gpu_info, const KernelInfo& kernel_info, std::vector<int3>* work_groups) const override; Winograd4x4To36TileX6(Winograd4x4To36TileX6&& operation) = default; Winograd4x4To36TileX6& operator=(Winograd4x4To36TileX6&& operation) = default; Winograd4x4To36TileX6(const Winograd4x4To36TileX6&) = delete; Winograd4x4To36TileX6& operator=(const Winograd4x4To36TileX6&) = delete; private: friend Winograd4x4To36TileX6 CreateWinograd4x4To36TileX6( const GpuInfo& gpu_info, const OperationDef& definition, const Padding2D& padding); void UploadBt(); std::string GetWinograd4x4To36TileX6Code(const OperationDef& op_def, const GpuInfo& gpu_info); int3 SelectBestWorkGroup(const KernelInfo& kernel_info) const; Padding2D padding_; }; Winograd4x4To36TileX6 CreateWinograd4x4To36TileX6( const GpuInfo& gpu_info, const OperationDef& definition, const Padding2D& padding); class Winograd36To4x4 : public GPUOperation { public: Winograd36To4x4() = default; void GetPossibleKernelWorkGroups( TuningType tuning_type, const GpuInfo& gpu_info, const KernelInfo& kernel_info, std::vector<int3>* work_groups) const override { work_groups->push_back(work_group_size_); } int3 GetGridSize() const override; Winograd36To4x4(Winograd36To4x4&& kernel) = default; Winograd36To4x4& operator=(Winograd36To4x4&& kernel) = default; Winograd36To4x4(const Winograd36To4x4&) = delete; Winograd36To4x4& operator=(const Winograd36To4x4&) = delete; private: explicit Winograd36To4x4(const OperationDef& definition) : GPUOperation(definition) {} friend Winograd36To4x4 CreateWinograd36To4x4( const OperationDef& definition, const tflite::gpu::Tensor<Linear, DataType::FLOAT32>& biases); }; Winograd36To4x4 CreateWinograd36To4x4( const OperationDef& definition, const tflite::gpu::Tensor<Linear, DataType::FLOAT32>& biases); class Winograd36To4x4Tile4x1 : public GPUOperation { public: Winograd36To4x4Tile4x1() = default; Winograd36To4x4Tile4x1(const OperationDef& definition, const GpuInfo& gpu_info); absl::Status BindArguments(ArgumentsBinder* args) override; int3 GetGridSize() const override; void GetPossibleKernelWorkGroups( TuningType tuning_type, const GpuInfo& gpu_info, const KernelInfo& kernel_info, std::vector<int3>* work_groups) const override; Winograd36To4x4Tile4x1(Winograd36To4x4Tile4x1&& operation) = default; Winograd36To4x4Tile4x1& operator=(Winograd36To4x4Tile4x1&& operation) = default; Winograd36To4x4Tile4x1(const Winograd36To4x4Tile4x1&) = delete; Winograd36To4x4Tile4x1& operator=(const Winograd36To4x4Tile4x1&) = delete; private: friend Winograd36To4x4Tile4x1 CreateWinograd36To4x4Tile4x1( const GpuInfo& gpu_info, const OperationDef& definition, const tflite::gpu::Tensor<Linear, DataType::FLOAT32>& biases); void UploadAt(); std::string GetWinograd36To4x4Tile4x1Code(const OperationDef& op_def, const GpuInfo& gpu_info); int3 SelectBestWorkGroup(const KernelInfo& kernel_info) const; }; Winograd36To4x4Tile4x1 CreateWinograd36To4x4Tile4x1( const GpuInfo& gpu_info, const OperationDef& definition, const tflite::gpu::Tensor<Linear, DataType::FLOAT32>& biases); } } #endif #include "tensorflow/lite/delegates/gpu/common/tasks/winograd.h" #include <cstring> #include <memory> #include <string> #include <utility> #include <vector> #include "absl/strings/str_format.h" #include "tensorflow/lite/delegates/gpu/common/data_type.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/task/work_group_picking.h" #include "tensorflow/lite/delegates/gpu/common/winograd_util.h" namespace tflite { namespace gpu { namespace { void VectorToKernelBufferDesc(const std::vector<float>& data, DataType data_type, BufferDescriptor* buffer_desc) { buffer_desc->element_type = data_type; buffer_desc->element_size = 1; buffer_desc->memory_type = MemoryType::CONSTANT; buffer_desc->attributes.push_back("kernel_global_space"); buffer_desc->size = SizeOf(data_type) * data.size(); buffer_desc->data.resize(buffer_desc->size); if (data_type == DataType::FLOAT32) { memcpy(buffer_desc->data.data(), data.data(), buffer_desc->size); } else { half* hf_ptr = reinterpret_cast<half*>(buffer_desc->data.data()); for (int i = 0; i < data.size(); ++i) { hf_ptr[i] = data[i]; } } } std::string GetKernelWinograd4x4To36(const GpuInfo& gpu_info, const OperationDef& op_def) { std::string c; const auto src_desc = op_def.src_tensors[0]; c += "MAIN_FUNCTION($0) {\n"; if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) { c += " int linear_id = GLOBAL_ID_0;\n"; c += " int X = (linear_id / args.dst_tensor.Batch()) * 4;\n"; c += " int B = linear_id % args.dst_tensor.Batch();\n"; c += " args.src_tensor.SetBatchRef(B);\n"; c += " args.dst_tensor.SetBatchRef(B);\n"; } else { c += " int X = GLOBAL_ID_0 * 4;\n"; } c += R"( int Y = GLOBAL_ID_1 * 4; int S = GLOBAL_ID_2; if (X / 4 >= args.tiles_x || Y / 4 >= args.tiles_y) return; FLT4 I[6][6]; for (int y = 0; y < 6; ++y) { for (int x = 0; x < 6; ++x) { I[y][x] = INIT_FLT4(0.0f); } } )"; for (int y = 0; y < 6; ++y) { const std::string s_y = std::to_string(y); c += " {\n"; c += " int coord_y = Y + " + s_y + " + args.padding_y;\n"; if (!src_desc.SupportsZeroClamp(Axis::HEIGHT, gpu_info)) { c += " bool in_y = coord_y >= 0 && coord_y < " "args.src_tensor.Height();\n"; c += " coord_y = clamp(coord_y, 0, args.src_tensor.Height() - 1);\n"; } for (int x = 0; x < 6; ++x) { const std::string s_x = std::to_string(x); c += " {\n"; c += " int coord_x = X + " + s_x + " + args.padding_x;\n"; if (!src_desc.SupportsZeroClamp(Axis::WIDTH, gpu_info)) { c += " bool in_x = coord_x >= 0 && coord_x < " "args.src_tensor.Width();\n"; c += " coord_x = clamp(coord_x, 0, args.src_tensor.Width()-1);\n"; } std::string multiplier; if (!src_desc.SupportsZeroClamp(Axis::WIDTH, gpu_info) && !src_desc.SupportsZeroClamp(Axis::HEIGHT, gpu_info)) { multiplier = " * INIT_FLT(in_y && in_x)"; } else if (!src_desc.SupportsZeroClamp(Axis::WIDTH, gpu_info)) { multiplier = " * INIT_FLT(in_x)"; } else if (!src_desc.SupportsZeroClamp(Axis::HEIGHT, gpu_info)) { multiplier = " * INIT_FLT(in_y)"; } c += " FLT4 src = args.src_tensor.Read(coord_x, coord_y, S)" + multiplier + ";\n"; c += " I[0][" + s_x + "] += args.Bt.Read(" + std::to_string(y) + ") * src;\n"; c += " I[1][" + s_x + "] += args.Bt.Read(" + std::to_string(y + 6) + ") * src;\n"; c += " I[2][" + s_x + "] += args.Bt.Read(" + std::to_string(y + 12) + ") * src;\n"; c += " I[3][" + s_x + "] += args.Bt.Read(" + std::to_string(y + 18) + ") * src;\n"; c += " I[4][" + s_x + "] += args.Bt.Read(" + std::to_string(y + 24) + ") * src;\n"; c += " I[5][" + s_x + "] += args.Bt.Read(" + std::to_string(y + 30) + ") * src;\n"; c += " }\n"; } c += " }\n"; } c += R"( int dst_x = Y / 4 * args.tiles_x + X / 4; for (int y = 0; y < 6; ++y) { FLT4 value = I[y][0] + args.Bt.Read(2) * I[y][2] + args.Bt.Read(4) * I[y][4]; args.dst_tensor.Write(value, dst_x, y * 6 + 0, S); value = args.Bt.Read(7) * I[y][1] + args.Bt.Read(8) * I[y][2] + args.Bt.Read(9) * I[y][3] + args.Bt.Read(10) * I[y][4]; args.dst_tensor.Write(value, dst_x, y * 6 + 1, S); value = args.Bt.Read(13) * I[y][1] + args.Bt.Read(14) * I[y][2] + args.Bt.Read(15) * I[y][3] + args.Bt.Read(16) * I[y][4]; args.dst_tensor.Write(value, dst_x, y * 6 + 2, S); value = args.Bt.Read(19) * I[y][1] + args.Bt.Read(20) * I[y][2] + args.Bt.Read(21) * I[y][3] + args.Bt.Read(22) * I[y][4]; args.dst_tensor.Write(value, dst_x, y * 6 + 3, S); value = args.Bt.Read(25) * I[y][1] + args.Bt.Read(26) * I[y][2] + args.Bt.Read(27) * I[y][3] + args.Bt.Read(28) * I[y][4]; args.dst_tensor.Write(value, dst_x, y * 6 + 4, S); value = args.Bt.Read(31) * I[y][1] + args.Bt.Read(33) * I[y][3] + I[y][5]; args.dst_tensor.Write(value, dst_x, y * 6 + 5, S); } } )"; return c; } std::string GetKernelWinograd36To4x4(const OperationDef& op_def) { std::string c; const auto src_desc = op_def.src_tensors[0]; c += "MAIN_FUNCTION($0) {\n"; if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) { c += " int linear_id = GLOBAL_ID_0;\n"; c += " int tile_id = linear_id / args.dst_tensor.Batch();\n"; c += " int B = linear_id % args.dst_tensor.Batch();\n"; c += " args.src_tensor.SetBatchRef(B);\n"; c += " args.dst_tensor.SetBatchRef(B);\n"; } else { c += " int tile_id = GLOBAL_ID_0;\n"; } c += R"( int Z = GLOBAL_ID_2; int tiles_count_x = (args.dst_tensor.Width() + 3) / 4; int tile_x = (tile_id % tiles_count_x) * 4; int tile_y = (tile_id / tiles_count_x) * 4; if (tile_x >= args.dst_tensor.Width() || tile_y >= args.dst_tensor.Height()) return; FLT4 I[4][6]; for (int y = 0; y < 4; ++y) { for (int x = 0; x < 6; ++x) { I[y][x] = INIT_FLT4(0.0f); } } for (int y = 0; y < 6; ++y) { for (int x = 0; x < 6; ++x) { FLT4 src = args.src_tensor.Read(tile_id, y * 6 + x, Z); I[0][x] += src * args.At.Read(y); I[1][x] += src * args.At.Read(y + 6); I[2][x] += src * args.At.Read(y + 12); I[3][x] += src * args.At.Read(y + 18); } } FLT4 bias_val = args.biases.Read(Z); for (int y = 0; y < 4; ++y) { FLT4 t0 = I[y][1] + I[y][2]; FLT4 t1 = I[y][3] + I[y][4]; if (tile_x < args.dst_tensor.Width() && tile_y + y < args.dst_tensor.Height()) { FLT4 value = I[y][0] + t0 + t1 + bias_val; args.dst_tensor.Write(value, tile_x, tile_y + y, Z); } FLT4 t2 = I[y][1] - I[y][2]; FLT4 t3 = I[y][3] - I[y][4]; if (tile_x + 1 < args.dst_tensor.Width() && tile_y + y < args.dst_tensor.Height()) { FLT4 value = t2 * args.At.Read(7) + t3 * args.At.Read(9) + bias_val; args.dst_tensor.Write(value, tile_x + 1, tile_y + y, Z); } if (tile_x + 2 < args.dst_tensor.Width() && tile_y + y < args.dst_tensor.Height()) { FLT4 value = t0 * args.At.Read(13) + t1 * args.At.Read(15) + bias_val; args.dst_tensor.Write(value, tile_x + 2, tile_y + y, Z); } if (tile_x + 3 < args.dst_tensor.Width() && tile_y + y < args.dst_tensor.Height()) { FLT4 value = t2 * args.At.Read(19) + t3 * args.At.Read(21) + I[y][5] + bias_val; args.dst_tensor.Write(value, tile_x + 3, tile_y + y, Z); } } } )"; return c; } } int3 Winograd4x4To36::GetGridSize() const { int new_width = src_[0]->Width() + padding_.prepended.w + padding_.appended.w - 2; int new_height = src_[0]->Height() + padding_.prepended.h + padding_.appended.h - 2; int tiles_x = DivideRoundUp(new_width, 4); int tiles_y = DivideRoundUp(new_height, 4); return int3(tiles_x * dst_[0]->Batch(), tiles_y, src_[0]->Slices()); } absl::Status Winograd4x4To36::BindArguments(ArgumentsBinder* args) { int new_width = src_[0]->Width() + padding_.prepended.w + padding_.appended.w - 2; int new_height = src_[0]->Height() + padding_.prepended.h + padding_.appended.h - 2; int tiles_x = DivideRoundUp(new_width, 4); int tiles_y = DivideRoundUp(new_height, 4); RETURN_IF_ERROR(args->SetInt("tiles_x", tiles_x)); RETURN_IF_ERROR(args->SetInt("tiles_y", tiles_y)); return absl::OkStatus(); } Winograd4x4To36 CreateWinograd4x4To36(const OperationDef& definition, const Padding2D& padding, const GpuInfo& gpu_info) { Winograd4x4To36 desc(definition, padding); desc.code_ = GetKernelWinograd4x4To36(gpu_info, definition); desc.AddSrcTensor("src_tensor", definition.src_tensors[0]); desc.AddDstTensor("dst_tensor", definition.dst_tensors[0]); desc.args_.AddInt("padding_x", -padding.prepended.w); desc.args_.AddInt("padding_y", -padding.prepended.h); desc.args_.AddInt("tiles_x"); desc.args_.AddInt("tiles_y"); BufferDescriptor buffer_desc; VectorToKernelBufferDesc(BtMatrixForWinograd4x4To6x6(), definition.GetDataType(), &buffer_desc); desc.args_.AddObject( "Bt", std::make_unique<BufferDescriptor>(std::move(buffer_desc))); desc.work_group_size_ = int3(8, 4, 1); return desc; } Winograd4x4To36TileX6::Winograd4x4To36TileX6(const OperationDef& definition, const Padding2D& padding, const GpuInfo& gpu_info) : GPUOperation(definition), padding_(padding) { work_group_size_ = int3(32, 1, 1); code_ = GetWinograd4x4To36TileX6Code(definition_, gpu_info); if (gpu_info.IsAdreno()) { compiler_options_.push_back(CompilerOptions::kAdrenoMoreWaves); } if (definition_.precision == CalculationsPrecision::F16 && gpu_info.IsPowerVR()) { compiler_options_.push_back(CompilerOptions::kClFastRelaxedMath); } } std::string Winograd4x4To36TileX6::GetWinograd4x4To36TileX6Code( const OperationDef& op_def, const GpuInfo& gpu_info) { std::string c; const auto& src_desc = op_def.src_tensors[0]; AddSrcTensor("src_tensor", op_def.src_tensors[0]); AddDstTensor("dst_tensor", op_def.dst_tensors[0]); args_.AddInt("padding_x"); args_.AddInt("padding_y"); args_.AddInt("tiles_total"); args_.AddInt("tiles_x"); c += "MAIN_FUNCTION($0) {\n"; if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) { c += " int linear_id = GLOBAL_ID_0;\n"; c += " int DST_X = linear_id / args.dst_tensor.Batch();\n"; c += " int B = linear_id % args.dst_tensor.Batch();\n"; c += " args.src_tensor.SetBatchRef(B);\n"; c += " args.dst_tensor.SetBatchRef(B);\n"; } else { c += " int DST_X = GLOBAL_ID_0;\n"; } c += " int DST_Y = GLOBAL_ID_1;\n"; c += " int DST_Z = GLOBAL_ID_2;\n"; c += " if (DST_X >= args.tiles_total || DST_Y >= 6 || DST_Z >= " "args.dst_tensor.Slices()) {\n"; c += " return; \n"; c += " }\n"; c += " int tile_x = (DST_X % args.tiles_x) * 4;\n"; c += " int tile_y = (DST_X / args.tiles_x) * 4;\n"; c += " FLT4 I0, I1, I2, I3, I4, I5;\n"; c += " FLT bt_ar[6];\n"; c += " FLT4 t0 = args.bt_non_uniform.Read(DST_Y * 2 + 0);\n"; c += " FLT4 t1 = args.bt_non_uniform.Read(DST_Y * 2 + 1);\n"; c += " DST_Y *= 6;\n"; c += " bt_ar[0] = t0.x;\n"; c += " bt_ar[1] = t0.y;\n"; c += " bt_ar[2] = t0.z;\n"; c += " bt_ar[3] = t0.w;\n"; c += " bt_ar[4] = t1.x;\n"; c += " bt_ar[5] = t1.y;\n"; auto read_src = [&](const std::string& src, const std::string& xs) { std::string read_statement; read_statement = "args.src_tensor.Read(xc" + xs + ", yc, DST_Z)"; std::string multiplier; if (!src_desc.SupportsZeroClamp(Axis::WIDTH, gpu_info)) { multiplier += " * m" + xs + "_x"; } if (!src_desc.SupportsZeroClamp(Axis::HEIGHT, gpu_info)) { multiplier += " * INIT_FLT(iny)"; } c += " FLT4 " + src + " = " + read_statement + multiplier + ";\n"; }; for (int x = 0; x < 6; ++x) { const std::string xs = std::to_string(x); c += " int xc" + xs + " = tile_x + args.padding_x + " + xs + ";\n"; if (!src_desc.SupportsZeroClamp(Axis::WIDTH, gpu_info)) { c += " bool inx" + xs + " = (xc" + xs + " >= 0 && xc" + xs + " < args.src_tensor.Width());\n"; c += " FLT m" + xs + "_x = INIT_FLT(inx" + xs + ");\n"; c += " xc" + xs + " = clamp(xc" + xs + ", 0, args.src_tensor.Width() - 1);\n"; } } const bool manual_unroll = !(op_def.precision == CalculationsPrecision::F32 && gpu_info.IsMali()); if (manual_unroll) { c += " {\n"; c += " int yc = tile_y + args.padding_y;\n"; if (!src_desc.SupportsZeroClamp(Axis::HEIGHT, gpu_info)) { c += " bool iny = (yc >= 0 && yc < args.src_tensor.Height());\n"; c += " yc = clamp(yc, 0, args.src_tensor.Height() - 1);\n"; c += " FLT bt = bt_ar[0] * INIT_FLT(iny);\n"; } else { c += " FLT bt = bt_ar[0];\n"; } for (int x = 0; x < 6; ++x) { const std::string xs = std::to_string(x); const std::string src = "src" + xs; read_src(src, xs); c += " I" + xs + " = bt * " + src + ";\n"; } c += " }\n"; for (int y = 1; y < 6; ++y) { const std::string ys = std::to_string(y); c += " {\n"; c += " int yc = tile_y + args.padding_y + (" + ys + ");\n"; if (!src_desc.SupportsZeroClamp(Axis::HEIGHT, gpu_info)) { c += " bool iny = (yc >= 0 && yc < args.src_tensor.Height());\n"; c += " yc = clamp(yc, 0, args.src_tensor.Height() - 1);\n"; c += " FLT bt = bt_ar[" + ys + "] * INIT_FLT(iny);\n"; } else { c += " FLT bt = bt_ar[" + ys + "];\n"; } for (int x = 0; x < 6; ++x) { const std::string xs = std::to_string(x); const std::string src = "src" + xs; read_src(src, xs); c += " I" + xs + " += bt * " + src + ";\n"; } c += " }\n"; } } else { c += " I0 = INIT_FLT4(0.0f);\n"; c += " I1 = INIT_FLT4(0.0f);\n"; c += " I2 = INIT_FLT4(0.0f);\n"; c += " I3 = INIT_FLT4(0.0f);\n"; c += " I4 = INIT_FLT4(0.0f);\n"; c += " I5 = INIT_FLT4(0.0f);\n"; c += " for (int y = 0; y < 6; ++y) {\n"; c += " int yc = tile_y + args.padding_y + y;\n"; if (!src_desc.SupportsZeroClamp(Axis::HEIGHT, gpu_info)) { c += " bool iny = (yc >= 0 && yc < args.src_tensor.Height());\n"; c += " yc = clamp(yc, 0, args.src_tensor.Height() - 1);\n"; c += " FLT bt = bt_ar[y] * INIT_FLT(iny);\n"; } else { c += " FLT bt = bt_ar[y];\n"; } for (int x = 0; x < 6; ++x) { const std::string xs = std::to_string(x); const std::string src = "src" + xs; read_src(src, xs); c += " I" + xs + " += bt * " + src + ";\n"; } c += " }\n"; } c += " {\n"; c += " FLT4 r0 = I0 + args.Bt.Read(2) * I2 + args.Bt.Read(4) * I4;\n"; c += " args.dst_tensor.Write(r0, DST_X, DST_Y, DST_Z);\n"; c += " DST_Y++;\n"; c += " }\n"; c += " {\n"; c += " FLT4 r0 = args.Bt.Read(7) * I1 + args.Bt.Read(8) * I2 + " "args.Bt.Read(9) * I3 + args.Bt.Read(10) * I4;\n"; c += " args.dst_tensor.Write(r0, DST_X, DST_Y, DST_Z);\n"; c += " DST_Y++;\n"; c += " }\n"; c += " {\n"; c += " FLT4 r0 = args.Bt.Read(13) * I1 + args.Bt.Read(14) * I2 + " "args.Bt.Read(15) * I3 + args.Bt.Read(16) * I4;\n"; c += " args.dst_tensor.Write(r0, DST_X, DST_Y, DST_Z);\n"; c += " DST_Y++;\n"; c += " }\n"; c += " {\n"; c += " FLT4 r0 = args.Bt.Read(19) * I1 + args.Bt.Read(20) * I2 + " "args.Bt.Read(21) * I3 + args.Bt.Read(22) * I4;\n"; c += " args.dst_tensor.Write(r0, DST_X, DST_Y, DST_Z);\n"; c += " DST_Y++;\n"; c += " }\n"; c += " {\n"; c += " FLT4 r0 = args.Bt.Read(25) * I1 + args.Bt.Read(26) * I2 + " "args.Bt.Read(27) * I3 + args.Bt.Read(28) * I4;\n"; c += " args.dst_tensor.Write(r0, DST_X, DST_Y, DST_Z);\n"; c += " DST_Y++;\n"; c += " }\n"; c += " {\n"; c += " FLT4 r0 = args.Bt.Read(31) * I1 + args.Bt.Read(33) * I3 + I5;\n"; c += " args.dst_tensor.Write(r0, DST_X, DST_Y, DST_Z);\n"; c += " DST_Y++;\n"; c += " }\n"; c += "}\n"; return c; } void Winograd4x4To36TileX6::UploadBt() { tflite::gpu::Tensor<Linear, DataType::FLOAT32> bt_aligned; bt_aligned.shape = Linear(6 * 8); bt_aligned.data.resize(6 * 8); auto bt_mat = BtMatrixForWinograd4x4To6x6(); for (int y = 0; y < 6; ++y) { for (int x = 0; x < 6; ++x) { bt_aligned.data[y * 8 + x] = bt_mat[y * 6 + x]; } bt_aligned.data[y * 8 + 6] = 0.0f; bt_aligned.data[y * 8 + 7] = 0.0f; } TensorDescriptor bt_tensor_desc = CreateConstantLinearTensorDescriptor( definition_.src_tensors[0].GetDataType(), definition_.src_tensors[0].GetStorageType(), bt_aligned); args_.AddObject("bt_non_uniform", std::make_unique<TensorDescriptor>( std::move(bt_tensor_desc))); BufferDescriptor buffer_desc; VectorToKernelBufferDesc(bt_mat, definition_.GetDataType(), &buffer_desc); args_.AddObject("Bt", std::make_unique<BufferDescriptor>(std::move(buffer_desc))); } int3 Winograd4x4To36TileX6::SelectBestWorkGroup( const KernelInfo& kernel_info) const { const std::vector<int3> wgs = {{8, 6, 4}, {8, 6, 2}, {4, 6, 2}, {4, 6, 2}, {2, 6, 2}, {2, 6, 1}, {1, 6, 1}, {1, 3, 1}, {1, 1, 1}}; return GetFirstSuitableWorkGroup(wgs, kernel_info.max_work_group_size); } absl::Status Winograd4x4To36TileX6::BindArguments(ArgumentsBinder* args) { const int tiles_x = DivideRoundUp( src_[0]->Width() + padding_.prepended.w + padding_.appended.w - 2, 4); const int tiles_y = DivideRoundUp( src_[0]->Height() + padding_.prepended.h + padding_.appended.h - 2, 4); const int tiles_total = tiles_x * tiles_y; RETURN_IF_ERROR(args->SetInt("padding_x", -padding_.prepended.w)); RETURN_IF_ERROR(args->SetInt("padding_y", -padding_.prepended.h)); RETURN_IF_ERROR(args->SetInt("tiles_total", tiles_total)); RETURN_IF_ERROR(args->SetInt("tiles_x", tiles_x)); return absl::OkStatus(); } int3 Winograd4x4To36TileX6::GetGridSize() const { const int grid_x = dst_[0]->Width() * dst_[0]->Batch(); const int grid_y = 6; const int grid_z = dst_[0]->Slices(); return int3(grid_x, grid_y, grid_z); } void Winograd4x4To36TileX6::GetPossibleKernelWorkGroups( TuningType tuning_type, const GpuInfo& gpu_info, const KernelInfo& kernel_info, std::vector<int3>* work_groups) const { if (gpu_info.IsIntel()) { work_groups->push_back(int3(4, 6, 1)); return; } switch (tuning_type) { case TuningType::kExhaustive: GetPossibleWorkGroups(tuning_type, gpu_info, kernel_info, grid_size_, work_groups); return; case TuningType::kFast: default: work_groups->push_back(SelectBestWorkGroup(kernel_info)); return; } } Winograd4x4To36TileX6 CreateWinograd4x4To36TileX6( const GpuInfo& gpu_info, const OperationDef& definition, const Padding2D& padding) { Winograd4x4To36TileX6 result(definition, padding, gpu_info); result.UploadBt(); return result; } int3 Winograd36To4x4::GetGridSize() const { return int3(src_[0]->Width() * dst_[0]->Batch(), 1, src_[0]->Slices()); } Winograd36To4x4 CreateWinograd36To4x4( const OperationDef& definition, const tflite::gpu::Tensor<Linear, DataType::FLOAT32>& biases) { Winograd36To4x4 desc(definition); desc.code_ = GetKernelWinograd36To4x4(definition); desc.AddSrcTensor("src_tensor", definition.src_tensors[0]); desc.AddDstTensor("dst_tensor", definition.dst_tensors[0]); TensorDescriptor bias_tensor_desc = CreateConstantLinearTensorDescriptor( definition.src_tensors[0].GetDataType(), definition.src_tensors[0].GetStorageType(), biases); desc.args_.AddObject("biases", std::make_unique<TensorDescriptor>( std::move(bias_tensor_desc))); BufferDescriptor buffer_desc; VectorToKernelBufferDesc(AtMatrixForWinograd4x4To6x6(), definition.GetDataType(), &buffer_desc); desc.args_.AddObject( "At", std::make_unique<BufferDescriptor>(std::move(buffer_desc))); desc.work_group_size_ = int3(32, 1, 1); return desc; } Winograd36To4x4Tile4x1::Winograd36To4x4Tile4x1(const OperationDef& definition, const GpuInfo& gpu_info) : GPUOperation(definition) { work_group_size_ = int3(32, 1, 1); if (definition_.precision == CalculationsPrecision::F16 && gpu_info.IsPowerVR()) { compiler_options_.push_back(CompilerOptions::kClFastRelaxedMath); } code_ = GetWinograd36To4x4Tile4x1Code(definition_, gpu_info); } std::string Winograd36To4x4Tile4x1::GetWinograd36To4x4Tile4x1Code( const OperationDef& op_def, const GpuInfo& gpu_info) { std::string c; AddSrcTensor("src_tensor", op_def.src_tensors[0]); AddDstTensor("dst_tensor", op_def.dst_tensors[0]); args_.AddInt("tiles_x"); c += "MAIN_FUNCTION($0) {\n"; if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) { c += " int linear_id = GLOBAL_ID_0;\n"; c += " int tile_id = linear_id / args.dst_tensor.Batch();\n"; c += " int B = linear_id % args.dst_tensor.Batch();\n"; c += " args.src_tensor.SetBatchRef(B);\n"; c += " args.dst_tensor.SetBatchRef(B);\n"; } else { c += " int tile_id = GLOBAL_ID_0;\n"; } c += " int DST_Y = GLOBAL_ID_1;\n"; c += " int DST_Z = GLOBAL_ID_2;\n"; c += " int tile_x = (tile_id % args.tiles_x) * 4;\n"; c += " int tile_y = (tile_id / args.tiles_x) * 4 + DST_Y;\n"; c += " if (tile_x >= args.dst_tensor.Width() || tile_y >= " "args.dst_tensor.Height() || DST_Z >= args.dst_tensor.Slices()) {\n"; c += " return; \n"; c += " }\n"; c += " FLT4 I0, I1, I2, I3, I4, I5;\n"; c += " FLT at_ar[6];\n"; c += " FLT4 t00 = args.at_non_uniform.Read(DST_Y * 2 + 0);\n"; c += " FLT4 t01 = args.at_non_uniform.Read(DST_Y * 2 + 1);\n"; c += " at_ar[0] = t00.x;\n"; c += " at_ar[1] = t00.y;\n"; c += " at_ar[2] = t00.z;\n"; c += " at_ar[3] = t00.w;\n"; c += " at_ar[4] = t01.x;\n"; c += " at_ar[5] = t01.y;\n"; const bool manual_unroll = !(op_def.precision == CalculationsPrecision::F32 && gpu_info.IsMali()); if (manual_unroll) { c += " {\n"; c += " FLT at = at_ar[0];\n"; for (int x = 0; x < 6; ++x) { const std::string yc = std::to_string(x); const std::string src = "src" + std::to_string(x); c += " FLT4 " + src + " = args.src_tensor.Read(tile_id, " + yc + ", DST_Z);\n"; c += " I" + std::to_string(x) + " = at * " + src + ";\n"; } c += " }\n"; for (int y = 1; y < 6; ++y) { c += " {\n"; c += " FLT at = at_ar[" + std::to_string(y) + "];\n"; for (int x = 0; x < 6; ++x) { const std::string yc = std::to_string(y * 6 + x); const std::string src = "src" + std::to_string(x); c += " FLT4 " + src + " = args.src_tensor.Read(tile_id, " + yc + ", DST_Z);\n"; c += " I" + std::to_string(x) + " += at * " + src + ";\n"; } c += " }\n"; } } else { c += " I0 = INIT_FLT4(0.0f);\n"; c += " I1 = INIT_FLT4(0.0f);\n"; c += " I2 = INIT_FLT4(0.0f);\n"; c += " I3 = INIT_FLT4(0.0f);\n"; c += " I4 = INIT_FLT4(0.0f);\n"; c += " I5 = INIT_FLT4(0.0f);\n"; c += " for (int y = 0; y < 6; ++y) {\n"; c += " FLT at = at_ar[y];\n"; for (int x = 0; x < 6; ++x) { const std::string src = "src" + std::to_string(x); c += " FLT4 " + src + " = args.src_tensor.Read(tile_id, y * 6 + " + std::to_string(x) + ", DST_Z);\n"; c += " I" + std::to_string(x) + " += at * " + src + ";\n"; } c += " }\n"; } c += " FLT4 t0 = I1 + I2;\n"; c += " FLT4 t1 = I3 + I4;\n"; c += " FLT4 bias_val = args.biases.Read(DST_Z);\n"; c += " {\n"; c += " FLT4 r0 = I0 + t0 + t1 + bias_val;\n"; c += " args.dst_tensor.Write(r0, tile_x, tile_y, DST_Z);\n"; c += " tile_x++;\n"; c += " }\n"; c += " FLT4 t2 = I1 - I2;\n"; c += " FLT4 t3 = I3 - I4;\n"; c += " if (tile_x < args.dst_tensor.Width()) {\n"; c += " FLT4 r0 = t2 * args.At.Read(7) + t3 * args.At.Read(9) + bias_val;\n"; c += " args.dst_tensor.Write(r0, tile_x, tile_y, DST_Z);\n"; c += " tile_x++;\n"; c += " }\n"; c += " if (tile_x < args.dst_tensor.Width()) {\n"; c += " FLT4 r0 = t0 * args.At.Read(13) + t1 * args.At.Read(15) + " "bias_val;\n"; c += " args.dst_tensor.Write(r0, tile_x, tile_y, DST_Z);\n"; c += " tile_x++;\n"; c += " }\n"; c += " if (tile_x < args.dst_tensor.Width()) {\n"; c += " FLT4 r0 = t2 * args.At.Read(19) + t3 * args.At.Read(21) + I5 + " "bias_val;\n"; c += " args.dst_tensor.Write(r0, tile_x, tile_y, DST_Z);\n"; c += " tile_x++;\n"; c += " }\n"; c += "}\n"; return c; } void Winograd36To4x4Tile4x1::UploadAt() { tflite::gpu::Tensor<Linear, DataType::FLOAT32> at_aligned; at_alig
#include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/tasks/winograd_test_util.h" namespace tflite { namespace gpu { namespace cl { TEST_F(OpenCLOperationTest, Winograd4x4To36TileX6) { auto status = Winograd4x4To36TileX6Test(&exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } TEST_F(OpenCLOperationTest, Winograd36To4x4Tile4x1) { auto status = Winograd36To4x4Tile4x1Test(&exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } TEST_F(OpenCLOperationTest, Winograd4x4To36) { auto status = Winograd4x4To36Test(&exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } TEST_F(OpenCLOperationTest, Winograd4x4To36Batch) { auto status = Winograd4x4To36BatchTest(&exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } TEST_F(OpenCLOperationTest, Winograd36To4x4) { auto status = Winograd36To4x4Test(&exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } } } }
1,021
cpp
tensorflow/tensorflow
select_v2
tensorflow/lite/delegates/gpu/common/tasks/select_v2.cc
tensorflow/lite/delegates/gpu/cl/kernels/select_v2_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_SELECT_V2_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_SELECT_V2_H_ #include <string> #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/task/gpu_operation.h" namespace tflite { namespace gpu { GPUOperation CreateSelectV2(const OperationDef& definition, const SelectV2Attributes& attr = {}); } } #endif #include "tensorflow/lite/delegates/gpu/common/tasks/select_v2.h" #include <string> #include <utility> namespace tflite { namespace gpu { std::string GetSelectV2Code(const OperationDef& op_def, const SelectV2Attributes& attr, GPUOperation* op) { op->AddSrcTensor("cond_tensor", op_def.src_tensors[0]); op->AddSrcTensor("true_tensor", op_def.src_tensors[1]); op->AddSrcTensor("else_tensor", op_def.src_tensors[2]); op->AddDstTensor("dst_tensor", op_def.dst_tensors[0]); std::string c; c += "MAIN_FUNCTION($0) {\n"; if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) { c += " int linear_id = GLOBAL_ID_0;\n"; c += " int X = linear_id / args.dst_tensor.Batch();\n"; c += " int B = linear_id % args.dst_tensor.Batch();\n"; c += " args.cond_tensor.SetBatchRef(B);\n"; c += " args.dst_tensor.SetBatchRef(B);\n"; c += attr.broadcast_true ? "" : " args.true_tensor.SetBatchRef(B);\n"; c += attr.broadcast_false ? "" : " args.else_tensor.SetBatchRef(B);\n"; } else { c += " int X = GLOBAL_ID_0;\n"; } c += " int Y = GLOBAL_ID_1;\n"; c += " int Z = GLOBAL_ID_2;\n"; c += " if (X >= args.dst_tensor.Width() || Y >= args.dst_tensor.Height() || " "Z >= args.dst_tensor.Slices()) { \n"; c += " return; \n"; c += " } \n"; c += " FLT4 true_val, else_val;\n"; if (!attr.broadcast_true) { c += " true_val = args.true_tensor.Read(X, Y, Z);\n"; } else { c += " true_val = INIT_FLT4(args.true_tensor.Read(0, 0, 0, 0).x);\n"; } if (!attr.broadcast_false) { c += " else_val = args.else_tensor.Read(X, Y, Z);\n"; } else { c += " else_val = INIT_FLT4(args.else_tensor.Read(0, 0, 0, 0).x);\n"; } c += " bool should_gather_rows = \n"; if (attr.broadcast_true && attr.broadcast_false) { c += " true;\n"; } else { c += " args.dst_tensor.Slices() != args.cond_tensor.Slices();\n"; } c += " FLT4 res;\n"; if (attr.scalar_cond) { c += " bool cond = args.cond_tensor.Read<bool>(0, 0, 0).x;\n"; c += " res = cond ? true_val : else_val;\n"; } else { c += " if (should_gather_rows) {\n"; c += " bool cond = args.cond_tensor.Read<bool>(X, 0, 0).x;\n"; c += " res = cond ? true_val : else_val;\n"; c += " } else {\n"; c += " bool4 cond = args.cond_tensor.Read<bool>(0, Y, Z);\n"; c += " res = true_val;\n"; c += " res.x = cond.x ? true_val.x : else_val.x;\n"; c += " res.y = cond.y ? true_val.y : else_val.y;\n"; c += " res.z = cond.z ? true_val.z : else_val.z;\n"; c += " res.w = cond.w ? true_val.w : else_val.w;\n"; c += " }\n;"; } c += " args.dst_tensor.Write(res, X, Y, Z);\n"; c += "}\n"; return c; } GPUOperation CreateSelectV2(const OperationDef& definition, const SelectV2Attributes& attr) { GPUOperation op(definition); op.code_ = GetSelectV2Code(definition, attr, &op); op.tensor_to_grid_ = TensorToGrid::kWBToX_HDToY_SToZ; op.args_.AddInt("broadcast_true", attr.broadcast_true); op.args_.AddInt("broadcast_else", attr.broadcast_false); return op; } } }
#include <gtest/gtest.h> #include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h" #include "tensorflow/lite/delegates/gpu/common/tasks/select_v2_test_util.h" namespace tflite { namespace gpu { namespace cl { namespace { TEST_F(OpenCLOperationTest, SelectV2) { auto status = SelectV2Test(&exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } TEST_F(OpenCLOperationTest, SelectV2Batch) { auto status = SelectV2BatchTest(&exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } TEST_F(OpenCLOperationTest, SelectV2Channels) { auto status = SelectV2ChannelsTest(&exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } TEST_F(OpenCLOperationTest, SelectV2ChannelsBatch) { auto status = SelectV2ChannelsBatchTest(&exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } TEST_F(OpenCLOperationTest, SelectV2BroadcastTrue) { auto status = SelectV2BroadcastTrueTest(&exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } TEST_F(OpenCLOperationTest, SelectV2BroadcastFalse) { auto status = SelectV2BroadcastFalseTest(&exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } TEST_F(OpenCLOperationTest, SelectV2BroadcastBoth) { auto status = SelectV2BroadcastBothTest(&exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } TEST_F(OpenCLOperationTest, SelectV2ChannelsBroadcastFalse) { auto status = SelectV2ChannelsBroadcastFalseTest(&exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } } } } }
1,022
cpp
tensorflow/tensorflow
padding
third_party/xla/xla/client/padding.cc
third_party/xla/xla/client/padding_test.cc
#ifndef XLA_CLIENT_PADDING_H_ #define XLA_CLIENT_PADDING_H_ #include <utility> #include <vector> #include "absl/status/statusor.h" #include "absl/types/span.h" #include "xla/types.h" namespace xla { enum class Padding { kSame, kValid, }; absl::Status ValidatePaddingValues(absl::Span<const int64_t> input_dimensions, absl::Span<const int64_t> window_dimensions, absl::Span<const int64_t> window_strides); std::vector<std::pair<int64_t, int64_t>> MakePadding( absl::Span<const int64_t> input_dimensions, absl::Span<const int64_t> window_dimensions, absl::Span<const int64_t> window_strides, Padding padding); } #endif #include "xla/client/padding.h" #include <algorithm> #include <utility> #include <vector> #include "absl/status/status.h" #include "xla/util.h" #include "tsl/lib/math/math_util.h" #include "tsl/platform/logging.h" namespace xla { absl::Status ValidatePaddingValues(absl::Span<const int64_t> input_dimensions, absl::Span<const int64_t> window_dimensions, absl::Span<const int64_t> window_strides) { bool ok = input_dimensions.size() == window_dimensions.size() && input_dimensions.size() == window_strides.size(); if (!ok) { return InvalidArgument( "Want input dimensions size %u = window dimensions size %u = window " "strides size %u", input_dimensions.size(), window_dimensions.size(), window_strides.size()); } for (size_t i = 0; i < input_dimensions.size(); ++i) { if (window_dimensions[i] <= 0) { return InvalidArgument("Window dimension %u has non-positive size %d", i, window_dimensions[i]); } if (window_strides[i] <= 0) { return InvalidArgument("Window dimension %u has non-positive stride %d", i, window_strides[i]); } } return absl::OkStatus(); } std::vector<std::pair<int64_t, int64_t>> MakePadding( absl::Span<const int64_t> input_dimensions, absl::Span<const int64_t> window_dimensions, absl::Span<const int64_t> window_strides, Padding padding) { TF_CHECK_OK(ValidatePaddingValues(input_dimensions, window_dimensions, window_strides)); std::vector<std::pair<int64_t, int64_t>> low_high_padding; switch (padding) { case Padding::kValid: low_high_padding.resize(window_dimensions.size(), {0, 0}); return low_high_padding; case Padding::kSame: for (size_t i = 0; i < input_dimensions.size(); ++i) { int64_t input_dimension = input_dimensions[i]; int64_t window_dimension = window_dimensions[i]; int64_t window_stride = window_strides[i]; int64_t output_dimension = tsl::MathUtil::CeilOfRatio(input_dimension, window_stride); int64_t padding_size = std::max<int64_t>((output_dimension - 1) * window_stride + window_dimension - input_dimension, 0); low_high_padding.emplace_back( tsl::MathUtil::FloorOfRatio(padding_size, int64_t{2}), tsl::MathUtil::CeilOfRatio(padding_size, int64_t{2})); } break; } return low_high_padding; } }
#include "xla/client/padding.h" #include <utility> #include "tsl/platform/test.h" namespace xla { namespace { class PaddingTest : public ::testing::Test { protected: std::pair<int64_t, int64_t> ComputePadding(int64_t input_dimension, int64_t window_dimension, int64_t window_stride, Padding padding) { return MakePadding({input_dimension}, {window_dimension}, {window_stride}, padding)[0]; } }; TEST_F(PaddingTest, ValidPaddingWithStrideOne) { const auto padding = ComputePadding(10, 5, 1, Padding::kValid); EXPECT_EQ(padding.first, 0); EXPECT_EQ(padding.second, 0); } TEST_F(PaddingTest, ValidPaddingWithStrideThree) { const auto padding = ComputePadding(10, 5, 3, Padding::kValid); EXPECT_EQ(padding.first, 0); EXPECT_EQ(padding.second, 0); } TEST_F(PaddingTest, SamePaddingWithOddWindow) { const auto padding = ComputePadding(10, 7, 1, Padding::kSame); EXPECT_EQ(padding.first, 3); EXPECT_EQ(padding.second, 3); } TEST_F(PaddingTest, SamePaddingWithEvenWindow) { const auto padding = ComputePadding(10, 6, 1, Padding::kSame); EXPECT_EQ(padding.first, 2); EXPECT_EQ(padding.second, 3); } TEST_F(PaddingTest, SamePaddingWithOddWindowWithStride) { const auto padding = ComputePadding(10, 7, 3, Padding::kSame); EXPECT_EQ(padding.first, 3); EXPECT_EQ(padding.second, 3); } TEST_F(PaddingTest, SamePaddingWithEvenWindowWithStride) { const auto padding = ComputePadding(10, 6, 4, Padding::kSame); EXPECT_EQ(padding.first, 2); EXPECT_EQ(padding.second, 2); } TEST_F(PaddingTest, SamePaddingForWindowSizeOne) { const auto padding = ComputePadding(10, 1, 1, Padding::kSame); EXPECT_EQ(padding.first, 0); EXPECT_EQ(padding.second, 0); } TEST_F(PaddingTest, SamePaddingForWindowLargerThanInput) { const auto padding = ComputePadding(10, 20, 1, Padding::kSame); EXPECT_EQ(padding.first, 9); EXPECT_EQ(padding.second, 10); } TEST_F(PaddingTest, NonNegativePadding) { const auto padding = ComputePadding(4, 1, 2, Padding::kSame); EXPECT_EQ(padding.first, 0); EXPECT_EQ(padding.second, 0); } } }
1,023
cpp
tensorflow/tensorflow
softmax
tensorflow/lite/delegates/gpu/gl/kernels/softmax.cc
tensorflow/lite/kernels/softmax_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_SOFTMAX_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_SOFTMAX_H_ #include <memory> #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/gl/node_shader.h" namespace tflite { namespace gpu { namespace gl { std::unique_ptr<NodeShader> NewSoftmaxNodeShader(); } } } #endif #if GOOGLE_CUDA && GOOGLE_TENSORRT #include "tensorflow/compiler/tf2tensorrt/convert/convert_nodes.h" #include "tensorflow/compiler/tf2tensorrt/convert/op_converter_registry.h" #include "tensorflow/compiler/tf2tensorrt/convert/ops/layer_utils.h" namespace tensorflow { namespace tensorrt { namespace convert { class ConvertSoftmax : public OpConverterBase<ConvertSoftmax> { public: explicit ConvertSoftmax(const OpConverterParams *params) : OpConverterBase<ConvertSoftmax>(params) {} static constexpr std::array<DataType, 3> AllowedDataTypes() { return {DataType::DT_FLOAT, DataType::DT_HALF}; } static constexpr std::array<InputArgSpec, 1> InputSpec() { return std::array<InputArgSpec, 1>{ InputArgSpec::Create("logits", TrtInputArg::kTensor)}; } Status Validate() { const auto &params = *this->params_; const auto &inputs = params.inputs; ITensorProxyPtr logits_tensor = inputs.at(0).tensor(); const int num_trt_dims = logits_tensor->getDimensions().nbDims; if (!num_trt_dims && params.use_implicit_batch) { return errors::InvalidArgument( "TensorRT Softmax cannot apply on the batch dimension"); } return OkStatus(); } Status Convert() { const auto &params = *this->params_; const auto &inputs = params.inputs; const auto &node_def = params.node_def; ITensorProxyPtr logits_tensor = inputs.at(0).tensor(); const int num_trt_dims = logits_tensor->getDimensions().nbDims; nvinfer1::ISoftMaxLayer *layer = params.converter->network()->addSoftMax(*logits_tensor->trt_tensor()); TFTRT_RETURN_ERROR_IF_NULLPTR(layer, node_def.name()); params.converter->SetLayerName(layer, node_def); layer->setAxes(1 << (num_trt_dims - 1)); ITensorProxyPtr output_tensor = layer->getOutput(0); params.outputs->push_back(TRT_TensorOrWeights(output_tensor)); return OkStatus(); } }; REGISTER_DEFAULT_TRT_OP_CONVERTER(MakeConverterFunction<ConvertSoftmax>(), "Softmax"); } } } #endif
#include "tensorflow/lite/delegates/gpu/gl/kernels/softmax.h" #include <cmath> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/gl/kernels/test_util.h" using ::testing::FloatNear; using ::testing::Pointwise; namespace tflite { namespace gpu { namespace gl { namespace { TEST(SoftmaxTest, Softmax) { TensorRef<BHWC> input; input.type = DataType::FLOAT32; input.ref = 0; input.shape = BHWC(1, 2, 2, 1); TensorRef<BHWC> output; output.type = DataType::FLOAT32; output.ref = 1; output.shape = BHWC(1, 2, 2, 1); SoftmaxAttributes attr; attr.axis = Axis::CHANNELS; SingleOpModel model({ToString(OperationType::SOFTMAX), attr}, {input}, {output}); ASSERT_TRUE(model.PopulateTensor(0, {0.1f, 0.2f, 0.3f, 0.4f})); ASSERT_OK(model.Invoke(*NewSoftmaxNodeShader())); EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6f), {1.0f, 1.0f, 1.0f, 1.0f})); } TEST(SoftmaxTest, DoesNotWorkForHeightAxis) { TensorRef<BHWC> input; input.type = DataType::FLOAT32; input.ref = 0; input.shape = BHWC(1, 2, 2, 1); TensorRef<BHWC> output; output.type = DataType::FLOAT32; output.ref = 1; output.shape = BHWC(1, 2, 2, 1); SoftmaxAttributes attr; attr.axis = Axis::HEIGHT; SingleOpModel model({ToString(OperationType::SOFTMAX), attr}, {input}, {output}); ASSERT_TRUE(model.PopulateTensor(0, {0.1f, 0.2f, 0.3f, 0.4f})); EXPECT_FALSE(model.Invoke(*NewSoftmaxNodeShader()).ok()); } TEST(SoftmaxTest, DoesNotWorkForWidthAxis) { TensorRef<BHWC> input; input.type = DataType::FLOAT32; input.ref = 0; input.shape = BHWC(1, 2, 2, 1); TensorRef<BHWC> output; output.type = DataType::FLOAT32; output.ref = 1; output.shape = BHWC(1, 2, 2, 1); SoftmaxAttributes attr; attr.axis = Axis::WIDTH; SingleOpModel model({ToString(OperationType::SOFTMAX), attr}, {input}, {output}); ASSERT_TRUE(model.PopulateTensor(0, {0.1f, 0.2f, 0.3f, 0.4f})); EXPECT_FALSE(model.Invoke(*NewSoftmaxNodeShader()).ok()); } TEST(SoftmaxTest, Softmax1x1) { TensorRef<BHWC> input; input.type = DataType::FLOAT32; input.ref = 0; input.shape = BHWC(1, 1, 1, 4); TensorRef<BHWC> output; output.type = DataType::FLOAT32; output.ref = 1; output.shape = BHWC(1, 1, 1, 4); SoftmaxAttributes attr; attr.axis = Axis::CHANNELS; const float sum = std::exp(0.1f) + std::exp(0.2f) + std::exp(0.3f) + std::exp(0.4f); SingleOpModel model({ToString(OperationType::SOFTMAX), attr}, {input}, {output}); ASSERT_TRUE(model.PopulateTensor(0, {0.1f, 0.2f, 0.3f, 0.4f})); ASSERT_OK(model.Invoke(*NewSoftmaxNodeShader())); EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6f), {std::exp(0.1f) / sum, std::exp(0.2f) / sum, std::exp(0.3f) / sum, std::exp(0.4f) / sum})); } TEST(SoftmaxTest, SoftmaxBigNumber) { TensorRef<BHWC> input; input.type = DataType::FLOAT32; input.ref = 0; input.shape = BHWC(1, 2, 1, 2); TensorRef<BHWC> output; output.type = DataType::FLOAT32; output.ref = 1; output.shape = BHWC(1, 2, 1, 2); SoftmaxAttributes attr; attr.axis = Axis::CHANNELS; double doubles[4] = {1.0, 2.0, 3.0, 100.0}; ASSERT_TRUE(std::isinf(std::exp(static_cast<float>(doubles[3])))); ASSERT_FALSE(std::isinf(std::exp(doubles[3]))); double s0 = std::exp(doubles[0]) + std::exp(doubles[1]); double s1 = std::exp(doubles[2]) + std::exp(doubles[3]); SingleOpModel model({ToString(OperationType::SOFTMAX), attr}, {input}, {output}); ASSERT_TRUE(model.PopulateTensor( 0, {static_cast<float>(doubles[0]), static_cast<float>(doubles[1]), static_cast<float>(doubles[2]), static_cast<float>(doubles[3])})); ASSERT_OK(model.Invoke(*NewSoftmaxNodeShader())); EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6f), {static_cast<float>(std::exp(doubles[0]) / s0), static_cast<float>(std::exp(doubles[1]) / s0), static_cast<float>(std::exp(doubles[2]) / s1), static_cast<float>(std::exp(doubles[3]) / s1)})); } TEST(SoftmaxTest, Softmax1x1BigNumber) { TensorRef<BHWC> input; input.type = DataType::FLOAT32; input.ref = 0; input.shape = BHWC(1, 1, 1, 4); TensorRef<BHWC> output; output.type = DataType::FLOAT32; output.ref = 1; output.shape = BHWC(1, 1, 1, 4); SoftmaxAttributes attr; attr.axis = Axis::CHANNELS; double doubles[4] = {1.0, 2.0, 3.0, 100.0}; ASSERT_TRUE(std::isinf(std::exp(static_cast<float>(doubles[3])))); ASSERT_FALSE(std::isinf(std::exp(doubles[3]))); double s0 = std::exp(doubles[0]) + std::exp(doubles[1]) + std::exp(doubles[2]) + std::exp(doubles[3]); SingleOpModel model({ToString(OperationType::SOFTMAX), attr}, {input}, {output}); ASSERT_TRUE(model.PopulateTensor( 0, {static_cast<float>(doubles[0]), static_cast<float>(doubles[1]), static_cast<float>(doubles[2]), static_cast<float>(doubles[3])})); ASSERT_OK(model.Invoke(*NewSoftmaxNodeShader())); EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6f), {static_cast<float>(std::exp(doubles[0]) / s0), static_cast<float>(std::exp(doubles[1]) / s0), static_cast<float>(std::exp(doubles[2]) / s0), static_cast<float>(std::exp(doubles[3]) / s0)})); } } } } }
1,024
cpp
tensorflow/tensorflow
resampler
tensorflow/lite/delegates/gpu/gl/kernels/resampler.cc
tensorflow/lite/delegates/gpu/cl/kernels/resampler_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_RESAMPLER_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_RESAMPLER_H_ #include <memory> #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/gl/node_shader.h" namespace tflite { namespace gpu { namespace gl { std::unique_ptr<NodeShader> NewResamplerNodeShader(); } } } #endif #include "tensorflow/lite/delegates/gpu/gl/kernels/resampler.h" #include <algorithm> #include <cstdint> #include <cstring> #include <memory> #include <string> #include <utility> #include <vector> #include "absl/memory/memory.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/types.h" namespace tflite { namespace gpu { namespace gl { namespace { class Resampler : public NodeShader { public: absl::Status GenerateCode(const GenerationContext& ctx, GeneratedCode* generated_code) const final { std::vector<Variable> parameters = { {"src_height", static_cast<int>(ctx.input_shapes[0][1])}, {"src_width", static_cast<int>(ctx.input_shapes[0][2])}, }; std::string source = R"( highp int X = int(gid.x); highp int Y = int(gid.y); highp int S = int(gid.z); highp vec2 f_coords = ($input_data_1[X, Y, 0]$).xy; highp vec2 f_coords_floor = floor(f_coords); highp ivec4 st; st.xy = ivec2(f_coords_floor.x, f_coords_floor.y); st.zw = st.xy + ivec2(1, 1); highp vec2 t = f_coords - f_coords_floor; bool stx_in = st.x >= 0 && st.x < $src_width$; bool stz_in = st.z >= 0 && st.z < $src_width$; bool sty_in = st.y >= 0 && st.y < $src_height$; bool stw_in = st.w >= 0 && st.w < $src_height$; vec4 src0 = (stx_in && sty_in) ? $input_data_0[st.x, st.y, S]$ : vec4(0.0); vec4 src1 = (stz_in && sty_in) ? $input_data_0[st.z, st.y, S]$ : vec4(0.0); vec4 src2 = (stx_in && stw_in) ? $input_data_0[st.x, st.w, S]$ : vec4(0.0); vec4 src3 = (stz_in && stw_in) ? $input_data_0[st.z, st.w, S]$ : vec4(0.0); value_0 = mix(mix(src0, src1, t.x), mix(src2, src3, t.x), t.y); )"; *generated_code = { std::move(parameters), {}, {}, uint3(), uint3(), std::move(source), IOStructure::ONLY_DEFINITIONS, IOStructure::AUTO, }; return absl::OkStatus(); } }; } std::unique_ptr<NodeShader> NewResamplerNodeShader() { return std::make_unique<Resampler>(); } } } }
#include "tensorflow/lite/delegates/gpu/gl/kernels/resampler.h" #include <utility> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/gl/kernels/test_util.h" using ::testing::FloatNear; using ::testing::Pointwise; namespace tflite { namespace gpu { namespace gl { namespace { absl::Status ResamplerIdentityTest(const BHWC& shape) { TensorRef<BHWC> src_tensor; src_tensor.type = DataType::FLOAT32; src_tensor.ref = 0; src_tensor.shape = shape; TensorRef<BHWC> warp_tensor; warp_tensor.type = DataType::FLOAT32; warp_tensor.ref = 1; warp_tensor.shape = BHWC(1, shape.h, shape.w, 2); TensorRef<BHWC> dst_tensor; dst_tensor.type = DataType::FLOAT32; dst_tensor.ref = 2; dst_tensor.shape = shape; SingleOpModel model({ToString(OperationType::RESAMPLER)}, {src_tensor, warp_tensor}, {dst_tensor}); std::vector<float> src_data(src_tensor.shape.DimensionsProduct()); std::vector<float> warp_data(warp_tensor.shape.DimensionsProduct()); std::vector<float> dst_data(dst_tensor.shape.DimensionsProduct()); for (int i = 0; i < src_data.size(); ++i) { src_data[i] = std::sin(i); dst_data[i] = src_data[i]; } for (int y = 0; y < shape.h; ++y) { for (int x = 0; x < shape.w; ++x) { warp_data[(y * shape.w + x) * 2 + 0] = x; warp_data[(y * shape.w + x) * 2 + 1] = y; } } if (!model.PopulateTensor(0, std::move(src_data))) { return absl::InternalError("failed loading data"); } if (!model.PopulateTensor(1, std::move(warp_data))) { return absl::InternalError("failed loading data"); } RETURN_IF_ERROR(model.Invoke(*NewResamplerNodeShader())); EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), dst_data)); return absl::OkStatus(); } TEST(ResamplerTest, Identity_2_2_1) { auto status = ResamplerIdentityTest(BHWC(1, 2, 2, 1)); ASSERT_TRUE(status.ok()) << status.message(); } TEST(ResamplerTest, Identity_3_5_3) { auto status = ResamplerIdentityTest(BHWC(1, 3, 5, 3)); ASSERT_TRUE(status.ok()) << status.message(); } TEST(ResamplerTest, Identity_6_1_7) { auto status = ResamplerIdentityTest(BHWC(1, 6, 1, 7)); ASSERT_TRUE(status.ok()) << status.message(); } } } } }
1,025
cpp
tensorflow/tensorflow
convolution_transposed_3x3_thin
tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_3x3_thin.cc
tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_3x3_thin_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_CONVOLUTION_TRANSPOSED_3X3_THIN_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_CONVOLUTION_TRANSPOSED_3X3_THIN_H_ #include <string> #include <vector> #include "tensorflow/lite/delegates/gpu/common/data_type.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/task/gpu_operation.h" #include "tensorflow/lite/delegates/gpu/common/task/weights_layout.h" #include "tensorflow/lite/delegates/gpu/common/tensor.h" #include "tensorflow/lite/delegates/gpu/common/types.h" namespace tflite { namespace gpu { class ConvolutionTransposed3x3Thin : public GPUOperation { public: ConvolutionTransposed3x3Thin() = default; int3 GetGridSize() const override; ConvolutionTransposed3x3Thin(ConvolutionTransposed3x3Thin&& operation) = default; ConvolutionTransposed3x3Thin& operator=( ConvolutionTransposed3x3Thin&& operation) = default; ConvolutionTransposed3x3Thin(const ConvolutionTransposed3x3Thin&) = delete; ConvolutionTransposed3x3Thin& operator=(const ConvolutionTransposed3x3Thin&) = delete; WeightsDescription GetWeightsDescription() const { WeightsDescription desc; desc.type = DeduceDataTypeFromPrecision(definition_.precision); desc.layout = weights_layout_; desc.spatial_remap = GetSpatialWeightsRemap(); return desc; } private: ConvolutionTransposed3x3Thin(const GpuInfo& gpu_info, const OperationDef& definition, const ConvolutionTransposedAttributes& attr); friend ConvolutionTransposed3x3Thin CreateConvolutionTransposed3x3Thin( const GpuInfo& gpu_info, const OperationDef& definition, const ConvolutionTransposedAttributes& attr); friend ConvolutionTransposed3x3Thin CreateConvolutionTransposed3x3ThinDynamicWeights( const GpuInfo& gpu_info, const OperationDef& definition, const ConvolutionTransposedAttributes& attr); void UploadWeights( const tflite::gpu::Tensor<OHWI, DataType::FLOAT32>& weights); std::vector<int> GetSpatialWeightsRemap() const; std::string GenerateConvolutionTransposedCode(const OperationDef& op_def, const GpuInfo& gpu_info, int src_depth, int dst_depth); WeightsLayout weights_layout_; }; bool IsConvolutionTransposed3x3ThinSupported( const ConvolutionTransposedAttributes& attr); ConvolutionTransposed3x3Thin CreateConvolutionTransposed3x3Thin( const GpuInfo& gpu_info, const OperationDef& definition, const ConvolutionTransposedAttributes& attr); ConvolutionTransposed3x3Thin CreateConvolutionTransposed3x3ThinDynamicWeights( const GpuInfo& gpu_info, const OperationDef& definition, const ConvolutionTransposedAttributes& attr); } } #endif #include "tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_3x3_thin.h" #include <memory> #include <string> #include <utility> #include <vector> #include "absl/strings/substitute.h" #include "tensorflow/lite/delegates/gpu/common/precision.h" #include "tensorflow/lite/delegates/gpu/common/task/buffer_desc.h" #include "tensorflow/lite/delegates/gpu/common/task/weights_conversion.h" namespace tflite { namespace gpu { namespace { std::string ConvInstr(CalculationsPrecision precision, bool is_i4_o4, const std::string& dst_name, const std::string& src_name, int weights_offset) { std::string c; if (is_i4_o4) { switch (precision) { case CalculationsPrecision::F32: case CalculationsPrecision::F16: c += " $0 += $1.x * args.weights.Read($2); \n"; c += " $0 += $1.y * args.weights.Read($3); \n"; c += " $0 += $1.z * args.weights.Read($4); \n"; c += " $0 += $1.w * args.weights.Read($5); \n"; break; case CalculationsPrecision::F32_F16: c += " $0 += TO_ACCUM_TYPE($1.x * args.weights.Read($2) + $1.y * " "args.weights.Read($3) + $1.z * args.weights.Read($4) + $1.w * " "args.weights.Read($5)); \n"; break; } } else { c += " $0.x += dot($1, args.weights.Read($2)); \n"; c += " $0.y += dot($1, args.weights.Read($3)); \n"; c += " $0.z += dot($1, args.weights.Read($4)); \n"; c += " $0.w += dot($1, args.weights.Read($5)); \n"; } return absl::Substitute(c, dst_name, src_name, weights_offset, weights_offset + 1, weights_offset + 2, weights_offset + 3); } } ConvolutionTransposed3x3Thin::ConvolutionTransposed3x3Thin( const GpuInfo& gpu_info, const OperationDef& definition, const ConvolutionTransposedAttributes& attr) : GPUOperation(definition) { if (gpu_info.IsApple()) { weights_layout_ = WeightsLayout::kOICustomSpatialO4I4; } else { weights_layout_ = WeightsLayout::kOICustomSpatialI4O4; } code_ = GenerateConvolutionTransposedCode( definition_, gpu_info, DivideRoundUp(attr.weights.shape.i, 4), DivideRoundUp(attr.weights.shape.o, 4)); } std::string ConvolutionTransposed3x3Thin::GenerateConvolutionTransposedCode( const OperationDef& op_def, const GpuInfo& gpu_info, int src_depth, int dst_depth) { AddSrcTensor("src_tensor", op_def.src_tensors[0]); AddDstTensor("dst_tensor", op_def.dst_tensors[0]); if (op_def.src_tensors.size() == 2) { BufferDescriptor desc; desc.element_type = op_def.src_tensors[1].GetDataType(); desc.element_size = 4; desc.memory_type = MemoryType::CONSTANT; AddSrcBuffer("weights", desc); } std::string c; c += "MAIN_FUNCTION($0) {\n"; if (op_def.IsBatchSupported()) { c += " int linear_id = GLOBAL_ID_0;\n"; c += " int X = linear_id / args.dst_tensor.Batch();\n"; c += " int B = linear_id % args.dst_tensor.Batch();\n"; c += " args.dst_tensor.SetBatchRef(B);\n"; c += " args.src_tensor.SetBatchRef(B);\n"; } else { c += " int X = GLOBAL_ID_0;\n"; } c += " int Y = GLOBAL_ID_1;\n"; c += " if (X >= args.src_tensor.Width() || Y >= args.src_tensor.Height()) " "return;\n"; for (int d = 0; d < dst_depth; ++d) { const std::string layer = std::to_string(d); c += " ACCUM_FLT4 r" + layer + "[2][2];\n"; c += " r" + layer + "[0][0] = INIT_ACCUM_FLT4(0.0f);\n"; c += " r" + layer + "[0][1] = INIT_ACCUM_FLT4(0.0f);\n"; c += " r" + layer + "[1][0] = INIT_ACCUM_FLT4(0.0f);\n"; c += " r" + layer + "[1][1] = INIT_ACCUM_FLT4(0.0f);\n"; } for (int s = 0; s < src_depth; ++s) { const std::string z = std::to_string(s); c += " {\n"; if (op_def.src_tensors[0].SupportsZeroClamp(Axis::WIDTH, gpu_info) && op_def.src_tensors[0].SupportsZeroClamp(Axis::HEIGHT, gpu_info)) { c += " FLT4 src0 = args.src_tensor.Read(X, Y, " + z + ");\n"; c += " FLT4 src1 = args.src_tensor.Read(X + 1, Y, " + z + ");\n"; c += " FLT4 src2 = args.src_tensor.Read(X, Y + 1, " + z + ");\n"; c += " FLT4 src3 = args.src_tensor.Read(X + 1, Y + 1, " + z + ");\n"; } else if (op_def.src_tensors[0].IsLinear() && op_def.src_tensors[0].ReturnsZeroForNegOneRead(gpu_info)) { c += " int c0 = args.src_tensor.GetAddress(X, Y, " + z + ");\n"; c += " int c1 = args.src_tensor.GetAddress(X + 1, Y, " + z + ");\n"; c += " int c2 = args.src_tensor.GetAddress(X, Y + 1, " + z + ");\n"; c += " int c3 = args.src_tensor.GetAddress(X + 1, Y + 1, " + z + ");\n"; c += " bool x_in = X + 1 < args.src_tensor.Width();\n"; c += " bool y_in = Y + 1 < args.src_tensor.Height();\n"; c += " c1 = select(-1, c1, x_in);\n"; c += " c2 = select(-1, c2, y_in);\n"; c += " c3 = select(-1, c3, x_in && y_in);\n"; c += " FLT4 src0 = args.src_tensor.Read(c0);\n"; c += " FLT4 src1 = args.src_tensor.Read(c1);\n"; c += " FLT4 src2 = args.src_tensor.Read(c2);\n"; c += " FLT4 src3 = args.src_tensor.Read(c3);\n"; } else { c += " bool x_in = X + 1 < args.src_tensor.Width();\n"; c += " bool y_in = Y + 1 < args.src_tensor.Height();\n"; c += " FLT4 src0 = args.src_tensor.Read(X, Y, " + z + ");\n"; c += " FLT4 src1 = INIT_FLT4(0.0);\n"; c += " FLT4 src2 = INIT_FLT4(0.0);\n"; c += " FLT4 src3 = INIT_FLT4(0.0);\n"; c += " if (x_in) {\n"; c += " src1 = args.src_tensor.Read(X + 1, Y, " + z + ");\n"; c += " }\n"; c += " if (y_in) {\n"; c += " src2 = args.src_tensor.Read(X, Y + 1, " + z + ");\n"; c += " }\n"; c += " if (x_in && y_in) {\n"; c += " src3 = args.src_tensor.Read(X + 1, Y + 1, " + z + ");\n"; c += " }\n"; } for (int d = 0; d < dst_depth; ++d) { const std::string layer = std::to_string(d); const int filters_index = (s * dst_depth + d) * 36; const bool is_i4_o4 = GetWeightsDescription().IsI4O4(); c += ConvInstr(op_def.precision, is_i4_o4, "r" + layer + "[0][0]", "src0", filters_index); c += ConvInstr(op_def.precision, is_i4_o4, "r" + layer + "[0][1]", "src0", filters_index + 4); c += ConvInstr(op_def.precision, is_i4_o4, "r" + layer + "[0][1]", "src1", filters_index + 8); c += ConvInstr(op_def.precision, is_i4_o4, "r" + layer + "[1][0]", "src0", filters_index + 12); c += ConvInstr(op_def.precision, is_i4_o4, "r" + layer + "[1][0]", "src2", filters_index + 16); c += ConvInstr(op_def.precision, is_i4_o4, "r" + layer + "[1][1]", "src0", filters_index + 20); c += ConvInstr(op_def.precision, is_i4_o4, "r" + layer + "[1][1]", "src1", filters_index + 24); c += ConvInstr(op_def.precision, is_i4_o4, "r" + layer + "[1][1]", "src2", filters_index + 28); c += ConvInstr(op_def.precision, is_i4_o4, "r" + layer + "[1][1]", "src3", filters_index + 32); } c += " }\n"; } c += " X *= 2;\n"; c += " Y *= 2;\n"; for (int d = 0; d < dst_depth; ++d) { const std::string layer = std::to_string(d); c += " {\n"; c += " FLT4 bias_val = args.biases.Read(" + layer + ");\n"; for (int y = 0; y < 2; ++y) { for (int x = 0; x < 2; ++x) { const std::string x_coord = "X + " + std::to_string(x); const std::string y_coord = "Y + " + std::to_string(y); c += " {\n"; c += " FLT4 result = TO_FLT4(r" + layer + "[" + std::to_string(y) + "][" + std::to_string(x) + "]) + bias_val;\n"; c += " args.dst_tensor.Write(result, " + x_coord + ", " + y_coord + ", " + layer + ");\n"; c += " }\n"; } } c += " }\n"; } c += "}\n"; return c; } int3 ConvolutionTransposed3x3Thin::GetGridSize() const { const int grid_x = src_[0]->Width() * dst_[0]->Batch(); const int grid_y = src_[0]->Height(); const int grid_z = 1; return int3(grid_x, grid_y, grid_z); } std::vector<int> ConvolutionTransposed3x3Thin::GetSpatialWeightsRemap() const { return std::vector<int>{4, 5, 3, 7, 1, 8, 6, 2, 0}; } void ConvolutionTransposed3x3Thin::UploadWeights( const tflite::gpu::Tensor<OHWI, DataType::FLOAT32>& weights) { const auto weights_desc = GetWeightsDescription(); const int flt_count = GetTotalElementsCountForLayout(weights_desc, weights.shape); BufferDescriptor desc; desc.element_type = weights_desc.type; desc.element_size = 4; desc.memory_type = MemoryType::CONSTANT; desc.size = flt_count * SizeOf(desc.element_type); desc.data.resize(desc.size); RearrangeWeights(weights, weights_desc, absl::MakeSpan(desc.data)); args_.AddObject("weights", std::make_unique<BufferDescriptor>(std::move(desc))); } bool IsConvolutionTransposed3x3ThinSupported( const ConvolutionTransposedAttributes& attr) { return attr.weights.shape.o <= 8 && attr.weights.shape.w == 3 && attr.weights.shape.h == 3 && attr.stride.w == 2 && attr.stride.h == 2 && attr.padding.prepended.w == 1 && attr.padding.prepended.h == 1 && attr.padding.appended.w == 1 && attr.padding.appended.h == 1; } ConvolutionTransposed3x3Thin CreateConvolutionTransposed3x3Thin( const GpuInfo& gpu_info, const OperationDef& definition, const ConvolutionTransposedAttributes& attr) { ConvolutionTransposed3x3Thin result(gpu_info, definition, attr); result.UploadWeights(attr.weights); TensorDescriptor bias_tensor_desc = CreateConstantLinearTensorDescriptor( gpu_info, definition.src_tensors[0].GetDataType(), attr.bias); result.args_.AddObject("biases", std::make_unique<TensorDescriptor>( std::move(bias_tensor_desc))); return result; } ConvolutionTransposed3x3Thin CreateConvolutionTransposed3x3ThinDynamicWeights( const GpuInfo& gpu_info, const OperationDef& definition, const ConvolutionTransposedAttributes& attr) { OperationDef new_def = definition; new_def.src_tensors = { definition.src_tensors[0]}; const DataType weights_type = definition.GetDataType(); new_def.src_tensors.push_back( {weights_type, TensorStorageType::BUFFER, Layout::HWC}); ConvolutionTransposed3x3Thin result(gpu_info, new_def, attr); TensorDescriptor bias_tensor_desc = CreateConstantLinearTensorDescriptor( gpu_info, definition.src_tensors[0].GetDataType(), attr.bias); result.args_.AddObject("biases", std::make_unique<TensorDescriptor>( std::move(bias_tensor_desc))); return result; } } }
#include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_3x3_thin_test_util.h" namespace tflite { namespace gpu { namespace cl { TEST_F(OpenCLOperationTest, ConvolutionTransposed3x3ThinSimpleWeights) { auto status = ConvolutionTransposed3x3ThinSimpleWeightsTest(&exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } TEST_F(OpenCLOperationTest, ConvolutionTransposed3x3Thin) { auto status = ConvolutionTransposed3x3ThinTest(&exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } } } }
1,026
cpp
tensorflow/tensorflow
relu
tensorflow/lite/delegates/gpu/gl/kernels/relu.cc
tensorflow/lite/delegates/gpu/cl/kernels/relu_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_RELU_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_RELU_H_ #include <memory> #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/gl/node_shader.h" namespace tflite { namespace gpu { namespace gl { std::unique_ptr<NodeShader> NewReLUNodeShader(); } } } #endif #include "tensorflow/lite/delegates/gpu/gl/kernels/relu.h" #include <algorithm> #include <any> #include <cstdint> #include <cstring> #include <memory> #include <string> #include <utility> #include <vector> #include "absl/memory/memory.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/types.h" #include "tensorflow/lite/delegates/gpu/gl/variable.h" namespace tflite { namespace gpu { namespace gl { namespace { class ReLU : public NodeShader { public: absl::Status GenerateCode(const GenerationContext& ctx, GeneratedCode* generated_code) const final { const auto& attr = std::any_cast<const ReLUAttributes&>(ctx.op_attr); std::vector<Variable> params; std::string min; if (attr.alpha == 0) { min = "vec4($activation_min$)"; params.push_back({"activation_min", attr.activation_min}); } else { min = "min($alpha$ * value_0, 0.0)"; params.push_back({"alpha", attr.alpha}); } std::string code; if (attr.activation_max == 0) { code = "value_0 = max(value_0, " + min + ");"; } else { code = "value_0 = clamp(value_0, " + min + ", vec4($activation_max$));"; params.push_back({"activation_max", attr.activation_max}); } *generated_code = { std::move(params), {}, {}, uint3(), uint3(), std::move(code), IOStructure::AUTO, IOStructure::AUTO, }; return absl::OkStatus(); } }; } std::unique_ptr<NodeShader> NewReLUNodeShader() { return std::make_unique<ReLU>(); } } } }
#include "tensorflow/lite/delegates/gpu/gl/kernels/relu.h" #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/gl/kernels/test_util.h" using ::testing::FloatNear; using ::testing::Pointwise; namespace tflite { namespace gpu { namespace gl { namespace { class ReluTest : public ::testing::Test { public: ReluTest() = default; ~ReluTest() override = default; TensorRef<BHWC> GetTensorRef(int ref) { TensorRef<BHWC> tensor_ref; tensor_ref.type = DataType::FLOAT32; tensor_ref.ref = ref; tensor_ref.shape = BHWC(1, 2, 2, 1); return tensor_ref; } }; TEST_F(ReluTest, Smoke) { OperationType op_type = OperationType::RELU; ReLUAttributes attr; attr.activation_max = 0; attr.alpha = 0; SingleOpModel model({ToString(op_type), attr}, {GetTensorRef(0)}, {GetTensorRef(1)}); ASSERT_TRUE(model.PopulateTensor(0, {-6.0, 0.0, 2.0, 8.0})); ASSERT_OK(model.Invoke(*NewReLUNodeShader())); EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {0.0, 0.0, 2.0, 8.0})); } TEST_F(ReluTest, ClipOnly) { OperationType op_type = OperationType::RELU; ReLUAttributes attr; attr.activation_max = 6; attr.alpha = 0; SingleOpModel model({ToString(op_type), attr}, {GetTensorRef(0)}, {GetTensorRef(1)}); ASSERT_TRUE(model.PopulateTensor(0, {-6.0, 0.0, 2.0, 8.0})); ASSERT_OK(model.Invoke(*NewReLUNodeShader())); EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {0.0, 0.0, 2.0, 6.0})); } TEST_F(ReluTest, AlphaOnly) { OperationType op_type = OperationType::RELU; ReLUAttributes attr; attr.activation_max = 0; attr.alpha = 0.5; SingleOpModel model({ToString(op_type), attr}, {GetTensorRef(0)}, {GetTensorRef(1)}); ASSERT_TRUE(model.PopulateTensor(0, {-6.0, 0.0, 2.0, 8.0})); ASSERT_OK(model.Invoke(*NewReLUNodeShader())); EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {-3.0, 0.0, 2.0, 8.0})); } TEST_F(ReluTest, ClipAndAlpha) { OperationType op_type = OperationType::RELU; ReLUAttributes attr; attr.activation_max = 6; attr.alpha = 0.5; SingleOpModel model({ToString(op_type), attr}, {GetTensorRef(0)}, {GetTensorRef(1)}); ASSERT_TRUE(model.PopulateTensor(0, {-6.0, 0.0, 2.0, 8.0})); ASSERT_OK(model.Invoke(*NewReLUNodeShader())); EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {-3.0, 0.0, 2.0, 6.0})); } TEST_F(ReluTest, ReLUN1Smoke) { OperationType op_type = OperationType::RELU; ReLUAttributes attr; attr.activation_min = -1; attr.activation_max = 0; attr.alpha = 0; SingleOpModel model({ToString(op_type), attr}, {GetTensorRef(0)}, {GetTensorRef(1)}); ASSERT_TRUE(model.PopulateTensor(0, {-12.0f, -0.5f, 0.8f, 3.2f})); ASSERT_OK(model.Invoke(*NewReLUNodeShader())); EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {-1.0f, -0.5f, 0.8f, 3.2f})); } TEST_F(ReluTest, ReLUN1ClipOnly) { OperationType op_type = OperationType::RELU; ReLUAttributes attr; attr.activation_min = -1; attr.activation_max = 1; attr.alpha = 0; SingleOpModel model({ToString(op_type), attr}, {GetTensorRef(0)}, {GetTensorRef(1)}); ASSERT_TRUE(model.PopulateTensor(0, {-12.0f, -0.5f, 0.8f, 3.2f})); ASSERT_OK(model.Invoke(*NewReLUNodeShader())); EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {-1.0f, -0.5f, 0.8f, 1.0f})); } TEST_F(ReluTest, ReLUN1AlphaOnly) { OperationType op_type = OperationType::RELU; ReLUAttributes attr; attr.activation_min = -1; attr.activation_max = 0; attr.alpha = 0.5; SingleOpModel model({ToString(op_type), attr}, {GetTensorRef(0)}, {GetTensorRef(1)}); ASSERT_TRUE(model.PopulateTensor(0, {-6.0, 0.0, 2.0, 8.0})); ASSERT_OK(model.Invoke(*NewReLUNodeShader())); EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {-3.0, 0.0, 2.0, 8.0})); } TEST_F(ReluTest, ReLUN1ClipAndAlpha) { OperationType op_type = OperationType::RELU; ReLUAttributes attr; attr.activation_min = -1; attr.activation_max = 6; attr.alpha = 0.5; SingleOpModel model({ToString(op_type), attr}, {GetTensorRef(0)}, {GetTensorRef(1)}); ASSERT_TRUE(model.PopulateTensor(0, {-6.0, 0.0, 2.0, 8.0})); ASSERT_OK(model.Invoke(*NewReLUNodeShader())); EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {-3.0, 0.0, 2.0, 6.0})); } } } } }
1,027
cpp
tensorflow/tensorflow
max_unpooling
tensorflow/lite/delegates/gpu/gl/kernels/max_unpooling.cc
tensorflow/lite/delegates/gpu/cl/kernels/max_unpooling_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_MAX_UNPOOLING_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_MAX_UNPOOLING_H_ #include <memory> #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/gl/node_shader.h" namespace tflite { namespace gpu { namespace gl { std::unique_ptr<NodeShader> NewMaxUnpoolingNodeShader(); } } } #endif #include "tensorflow/lite/delegates/gpu/gl/kernels/max_unpooling.h" #include <algorithm> #include <any> #include <cstdint> #include <cstring> #include <memory> #include <string> #include <utility> #include <vector> #include "absl/memory/memory.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/types.h" #include "tensorflow/lite/delegates/gpu/gl/variable.h" namespace tflite { namespace gpu { namespace gl { namespace { class MaxUnpooling : public NodeShader { public: absl::Status GenerateCode(const GenerationContext& ctx, GeneratedCode* generated_code) const final { const auto& attr = std::any_cast<const MaxUnpooling2DAttributes&>(ctx.op_attr); std::vector<Variable> parameters = { {"stride", int2(attr.strides.w, attr.strides.h)}, {"offset", int2(attr.padding.prepended.w, attr.padding.prepended.h)}, {"window_h", attr.kernel.h}, {"window_w", attr.kernel.w}, }; std::string source = R"( ivec2 coord = (gid.xy + $offset$) / $stride$; ivec4 indices = $input_data_1[coord.x, coord.y, gid.z]$; vec4 input_ = $input_data_0[coord.x, coord.y, gid.z]$; coord = coord * $stride$ - $offset$; for (int i = 0; i < 4; ++i) { ivec2 t = coord + ivec2(indices[i] % $window_w$, indices[i] / $window_w$); if (t.x == gid.x && t.y == gid.y) { value_0[i] = input_[i]; } } )"; *generated_code = { std::move(parameters), {}, {}, uint3(), uint3(), std::move(source), IOStructure::ONLY_DEFINITIONS, IOStructure::AUTO, }; return absl::OkStatus(); } }; } std::unique_ptr<NodeShader> NewMaxUnpoolingNodeShader() { return std::make_unique<MaxUnpooling>(); } } } }
#include "tensorflow/lite/delegates/gpu/gl/kernels/max_unpooling.h" #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/gl/kernels/test_util.h" using ::testing::FloatNear; using ::testing::Pointwise; namespace tflite { namespace gpu { namespace gl { namespace { TEST(MaxUnpoolingTest, Kernel2x2Stride2x2) { TensorRef<BHWC> input; input.type = DataType::FLOAT32; input.ref = 0; input.shape = BHWC(1, 2, 2, 1); TensorRef<BHWC> indices; indices.type = DataType::INT32; indices.ref = 1; indices.shape = BHWC(1, 2, 2, 1); TensorRef<BHWC> output; output.type = DataType::FLOAT32; output.ref = 2; output.shape = BHWC(1, 4, 4, 1); MaxUnpooling2DAttributes attr; attr.kernel = HW(2, 2); attr.padding.prepended = HW(0, 0); attr.padding.appended = HW(0, 0); attr.strides = HW(2, 2); SingleOpModel model({ToString(OperationType::MAX_UNPOOLING_2D), attr}, {input, indices}, {output}); ASSERT_TRUE(model.PopulateTensor(0, {1, 2, 3, 4})); ASSERT_TRUE(model.PopulateTensor(1, {0, 0, 0, 0})); ASSERT_OK(model.Invoke(*NewMaxUnpoolingNodeShader())); EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {1, 0, 2, 0, 0, 0, 0, 0, 3, 0, 4, 0, 0, 0, 0, 0})); } } } } }
1,028
cpp
tensorflow/tensorflow
depthwise_conv_3x3
tensorflow/lite/delegates/gpu/common/tasks/depthwise_conv_3x3.cc
tensorflow/lite/delegates/gpu/cl/kernels/depthwise_conv_3x3_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_DEPTHWISE_CONV_3X3_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_DEPTHWISE_CONV_3X3_H_ #include <memory> #include <string> #include <utility> #include <vector> #include "tensorflow/lite/delegates/gpu/common/data_type.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/task/buffer_desc.h" #include "tensorflow/lite/delegates/gpu/common/task/gpu_operation.h" #include "tensorflow/lite/delegates/gpu/common/task/tensor_desc.h" #include "tensorflow/lite/delegates/gpu/common/tensor.h" #include "tensorflow/lite/delegates/gpu/common/types.h" namespace tflite { namespace gpu { class DepthwiseConv3x3 : public GPUOperation { public: DepthwiseConv3x3() = default; void GetPossibleKernelWorkGroups( TuningType tuning_type, const GpuInfo& gpu_info, const KernelInfo& kernel_info, std::vector<int3>* work_groups) const override; int3 GetGridSize() const override; DepthwiseConv3x3(DepthwiseConv3x3&& operation); DepthwiseConv3x3& operator=(DepthwiseConv3x3&& operation); DepthwiseConv3x3(const DepthwiseConv3x3&) = delete; DepthwiseConv3x3& operator=(const DepthwiseConv3x3&) = delete; private: explicit DepthwiseConv3x3(const OperationDef& definition, bool weights_are_buffer, bool local_mem_uploads, const GpuInfo& gpu_info); template <DataType T> void UploadWeightsAndBiases(const tflite::gpu::Tensor<OHWI, T>& weights, const tflite::gpu::Tensor<Linear, T>& biases, bool weights_are_buffer); friend DepthwiseConv3x3 CreateDepthwiseConv3x3( const GpuInfo& gpu_info, const OperationDef& definition, const DepthwiseConvolution2DAttributes& attr); template <DataType S, typename T> void RearrangeWeightsAndBiasesData( const tflite::gpu::Tensor<OHWI, S>& weights, const tflite::gpu::Tensor<Linear, S>& biases, absl::Span<T> dst); std::string GenerateDepthwiseConvCode(const GpuInfo& gpu_info, const OperationDef& op_def, bool weights_are_buffer, bool local_mem_uploads); bool local_mem_uploads_; }; template <DataType T> void DepthwiseConv3x3::UploadWeightsAndBiases( const tflite::gpu::Tensor<OHWI, T>& weights, const tflite::gpu::Tensor<Linear, T>& biases, bool weights_are_buffer) { const int src_depth = DivideRoundUp(weights.shape.i, 4); int texture_width = 10; int texture_height = src_depth; const int elements_count = texture_width * texture_height; const bool fp32_weights = definition_.precision == CalculationsPrecision::F32; const int float4_size = fp32_weights ? 16 : 8; std::vector<uint8_t> data(float4_size * elements_count); if (fp32_weights) { float4* ptr = reinterpret_cast<float4*>(data.data()); RearrangeWeightsAndBiasesData(weights, biases, absl::MakeSpan(ptr, elements_count)); } else { half4* ptr = reinterpret_cast<half4*>(data.data()); RearrangeWeightsAndBiasesData(weights, biases, absl::MakeSpan(ptr, elements_count)); } if (weights_are_buffer) { BufferDescriptor desc; desc.element_type = fp32_weights ? DataType::FLOAT32 : DataType::FLOAT16; desc.element_size = 4; desc.size = float4_size * elements_count; desc.data = std::move(data); args_.AddObject("weights", std::make_unique<BufferDescriptor>(std::move(desc))); } else { TensorDescriptor desc = CreateConstantHWVec4TensorDescriptor( fp32_weights ? DataType::FLOAT32 : DataType::FLOAT16, TensorStorageType::TEXTURE_2D, texture_width, texture_height, data.data()); args_.AddObject("weights", std::make_unique<TensorDescriptor>(desc)); } } template <DataType S, typename T> void DepthwiseConv3x3::RearrangeWeightsAndBiasesData( const tflite::gpu::Tensor<OHWI, S>& weights, const tflite::gpu::Tensor<Linear, S>& biases, absl::Span<T> dst) { const int src_depth = DivideRoundUp(weights.shape.i, 4); int counter = 0; for (int s = 0; s < src_depth; ++s) { for (int y = 0; y < 3; ++y) { for (int x = 0; x < 3; ++x) { T filter_val; for (int i = 0; i < 4; ++i) { const int s_ch = s * 4 + i; if (s_ch < weights.shape.i) { const int f_index = weights.shape.LinearIndex({0, y, x, s_ch}); filter_val[i] = weights.data[f_index]; } else { filter_val[i] = 0.0f; } } dst[counter++] = filter_val; } } T bias_val; for (int i = 0; i < 4; ++i) { const int dst_ch = s * 4 + i; bias_val[i] = dst_ch >= biases.shape.v ? 0.0f : biases.data[dst_ch]; } dst[counter++] = bias_val; } } bool IsDepthwiseConv3x3Supported(const GpuInfo& gpu_info, const DepthwiseConvolution2DAttributes& attr); DepthwiseConv3x3 CreateDepthwiseConv3x3( const GpuInfo& gpu_info, const OperationDef& definition, const DepthwiseConvolution2DAttributes& attr); } } #endif #include "tensorflow/lite/delegates/gpu/common/tasks/depthwise_conv_3x3.h" #include <string> #include <utility> #include "absl/strings/match.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/task/work_group_picking.h" namespace tflite { namespace gpu { DepthwiseConv3x3::DepthwiseConv3x3(const OperationDef& definition, bool weights_are_buffer, bool local_mem_uploads, const GpuInfo& gpu_info) : GPUOperation(definition), local_mem_uploads_(local_mem_uploads) { work_group_size_ = int3(8, 4, 1); code_ = GenerateDepthwiseConvCode(gpu_info, definition_, weights_are_buffer, local_mem_uploads_); if (definition_.precision == CalculationsPrecision::F16 && gpu_info.IsPowerVR()) { compiler_options_.push_back(CompilerOptions::kClFastRelaxedMath); } } DepthwiseConv3x3::DepthwiseConv3x3(DepthwiseConv3x3&& operation) : GPUOperation(std::move(operation)), local_mem_uploads_(operation.local_mem_uploads_) {} DepthwiseConv3x3& DepthwiseConv3x3::operator=(DepthwiseConv3x3&& operation) { if (this != &operation) { std::swap(local_mem_uploads_, operation.local_mem_uploads_); GPUOperation::operator=(std::move(operation)); } return *this; } std::string DepthwiseConv3x3::GenerateDepthwiseConvCode( const GpuInfo& gpu_info, const OperationDef& op_def, bool weights_are_buffer, bool local_mem_uploads) { auto src_desc = op_def.src_tensors[0]; AddSrcTensor("src_tensor", src_desc); AddDstTensor("dst_tensor", op_def.dst_tensors[0]); std::string c; if (local_mem_uploads && gpu_info.IsApiOpenCl()) { c += "__attribute__((reqd_work_group_size(8, 4, 1)))\n"; } c += "MAIN_FUNCTION($0) {\n"; if (op_def.dst_tensors[0].HasAxis(Axis::BATCH)) { c += " int linear_id = GLOBAL_ID_0;\n"; c += " int X = (linear_id / args.dst_tensor.Batch()) * 2;\n"; c += " int B = linear_id % args.dst_tensor.Batch();\n"; c += " args.dst_tensor.SetBatchRef(B);\n"; c += " args.src_tensor.SetBatchRef(B);\n"; } else { c += " int X = GLOBAL_ID_0 * 2;\n"; } c += " int Y = GLOBAL_ID_1 * 2;\n"; c += " int S = GLOBAL_ID_2;\n"; c += " ACCUM_FLT4 r0 = INIT_ACCUM_FLT4(0.0f);\n"; c += " ACCUM_FLT4 r1 = INIT_ACCUM_FLT4(0.0f);\n"; c += " ACCUM_FLT4 r2 = INIT_ACCUM_FLT4(0.0f);\n"; c += " ACCUM_FLT4 r3 = INIT_ACCUM_FLT4(0.0f);\n"; if (!local_mem_uploads) { c += " if (X >= args.dst_tensor.Width() || Y >= args.dst_tensor.Height() " "|| S >= args.dst_tensor.Slices()) { \n"; c += " return; \n"; c += " } \n"; } if (local_mem_uploads) { c += " __local FLT4 f[10];\n"; if (gpu_info.IsApiOpenCl() && gpu_info.IsPowerVR()) { c += " event_t e = async_work_group_copy(f, args.weights.GetPtr() + S * " "10, 10, 0);\n"; c += " wait_group_events(1, &e);\n"; } else { c += " int local_id = LOCAL_ID_1 * 8 + LOCAL_ID_0;\n"; c += " if (local_id < 10) {\n"; c += " f[local_id] = args.weights.Read(S * 10 + local_id);\n"; c += " }\n"; c += " LOCAL_MEM_BARRIER;\n"; } } else if (weights_are_buffer && gpu_info.SupportsPointersInKernels()) { c += " __global FLT4* f = args.weights.GetPtr() + S * 10;\n"; } c += " FLT4 s0;\n"; c += " FLT4 s1;\n"; c += " FLT4 s2;\n"; c += " FLT4 s3;\n"; std::string W[9] = {"f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8"}; std::string bias = "bias"; std::string xc[4] = {"X - 1", "X", "X + 1", "X + 2"}; std::string yc[4] = {"Y - 1", "Y", "Y + 1", "Y + 2"}; if (!weights_are_buffer) { c += " FLT4 f0 = args.weights.Read(0, S);\n"; c += " FLT4 f1 = args.weights.Read(1, S);\n"; c += " FLT4 f2 = args.weights.Read(2, S);\n"; c += " FLT4 f3 = args.weights.Read(3, S);\n"; c += " FLT4 f4 = args.weights.Read(4, S);\n"; c += " FLT4 f5 = args.weights.Read(5, S);\n"; c += " FLT4 f6 = args.weights.Read(6, S);\n"; c += " FLT4 f7 = args.weights.Read(7, S);\n"; c += " FLT4 f8 = args.weights.Read(8, S);\n"; } if (!op_def.src_tensors[0].SupportsZeroClamp(Axis::WIDTH, gpu_info)) { c += " int x0 = X - 1;\n"; c += " int x1 = X;\n"; c += " int x2 = X + 1;\n"; c += " int x3 = X + 2;\n"; c += " bool x0_in = x0 >= 0 && x0 < args.dst_tensor.Width();\n"; c += " bool x1_in = x1 >= 0 && x1 < args.dst_tensor.Width();\n"; c += " bool x2_in = x2 >= 0 && x2 < args.dst_tensor.Width();\n"; c += " bool x3_in = x3 >= 0 && x3 < args.dst_tensor.Width();\n"; c += " x0 = clamp(x0, 0, args.dst_tensor.Width() - 1);\n"; c += " x1 = clamp(x1, 0, args.dst_tensor.Width() - 1);\n"; c += " x2 = clamp(x2, 0, args.dst_tensor.Width() - 1);\n"; c += " x3 = clamp(x3, 0, args.dst_tensor.Width() - 1);\n"; xc[0] = "x0"; xc[1] = "x1"; xc[2] = "x2"; xc[3] = "x3"; } if (!op_def.src_tensors[0].SupportsZeroClamp(Axis::HEIGHT, gpu_info)) { c += " int y0 = Y - 1;\n"; c += " int y1 = Y;\n"; c += " int y2 = Y + 1;\n"; c += " int y3 = Y + 2;\n"; c += " bool y0_in = y0 >= 0 && y0 < args.dst_tensor.Height();\n"; c += " bool y1_in = y1 >= 0 && y1 < args.dst_tensor.Height();\n"; c += " bool y2_in = y2 >= 0 && y2 < args.dst_tensor.Height();\n"; c += " bool y3_in = y3 >= 0 && y3 < args.dst_tensor.Height();\n"; c += " y0 = clamp(y0, 0, args.dst_tensor.Height() - 1);\n"; c += " y1 = clamp(y1, 0, args.dst_tensor.Height() - 1);\n"; c += " y2 = clamp(y2, 0, args.dst_tensor.Height() - 1);\n"; c += " y3 = clamp(y3, 0, args.dst_tensor.Height() - 1);\n"; yc[0] = "y0"; yc[1] = "y1"; yc[2] = "y2"; yc[3] = "y3"; } if (local_mem_uploads || weights_are_buffer) { const bool use_direct_buffer = !local_mem_uploads && !gpu_info.SupportsPointersInKernels(); const std::string fetch_start = use_direct_buffer ? "args.weights.Read(S * 10 + " : "f["; const std::string fetch_end = use_direct_buffer ? ")" : "]"; W[0] = fetch_start + "0" + fetch_end; W[1] = fetch_start + "1" + fetch_end; W[2] = fetch_start + "2" + fetch_end; W[3] = fetch_start + "3" + fetch_end; W[4] = fetch_start + "4" + fetch_end; W[5] = fetch_start + "5" + fetch_end; W[6] = fetch_start + "6" + fetch_end; W[7] = fetch_start + "7" + fetch_end; W[8] = fetch_start + "8" + fetch_end; bias = fetch_start + "9" + fetch_end; } auto read_4x_line = [&](int y) { std::string s0_check, s1_check, s2_check, s3_check; if (!op_def.src_tensors[0].SupportsZeroClamp(Axis::WIDTH, gpu_info)) { s0_check += "x0_in"; s1_check += "x1_in"; s2_check += "x2_in"; s3_check += "x3_in"; } if (!op_def.src_tensors[0].SupportsZeroClamp(Axis::HEIGHT, gpu_info)) { const std::string y_in = "y" + std::to_string(y) + "_in"; s0_check += s0_check.empty() ? y_in : (" && " + y_in); s1_check += s1_check.empty() ? y_in : (" && " + y_in); s2_check += s2_check.empty() ? y_in : (" && " + y_in); s3_check += s3_check.empty() ? y_in : (" && " + y_in); } if (!s0_check.empty()) { s0_check = " * INIT_FLT(" + s0_check + ")"; } if (!s1_check.empty()) { s1_check = " * INIT_FLT(" + s1_check + ")"; } if (!s2_check.empty()) { s2_check = " * INIT_FLT(" + s2_check + ")"; } if (!s3_check.empty()) { s3_check = " * INIT_FLT(" + s3_check + ")"; } c += " s0 = args.src_tensor.Read(" + xc[0] + ", " + yc[y] + ", S)" + s0_check + ";\n"; c += " s1 = args.src_tensor.Read(" + xc[1] + ", " + yc[y] + ", S)" + s1_check + ";\n"; c += " s2 = args.src_tensor.Read(" + xc[2] + ", " + yc[y] + ", S)" + s2_check + ";\n"; c += " s3 = args.src_tensor.Read(" + xc[3] + ", " + yc[y] + ", S)" + s3_check + ";\n"; }; c += " {\n"; read_4x_line(0); c += " r0 += TO_ACCUM_TYPE(" + W[0] + " * s0);\n"; c += " r0 += TO_ACCUM_TYPE(" + W[1] + " * s1);\n"; c += " r1 += TO_ACCUM_TYPE(" + W[0] + " * s1);\n"; c += " r0 += TO_ACCUM_TYPE(" + W[2] + " * s2);\n"; c += " r1 += TO_ACCUM_TYPE(" + W[1] + " * s2);\n"; c += " r1 += TO_ACCUM_TYPE(" + W[2] + " * s3);\n"; c += " }\n"; c += " {\n"; read_4x_line(1); c += " r0 += TO_ACCUM_TYPE(" + W[3] + " * s0);\n"; c += " r2 += TO_ACCUM_TYPE(" + W[0] + " * s0);\n"; c += " r0 += TO_ACCUM_TYPE(" + W[4] + " * s1);\n"; c += " r1 += TO_ACCUM_TYPE(" + W[3] + " * s1);\n"; c += " r2 += TO_ACCUM_TYPE(" + W[1] + " * s1);\n"; c += " r3 += TO_ACCUM_TYPE(" + W[0] + " * s1);\n"; c += " r0 += TO_ACCUM_TYPE(" + W[5] + " * s2);\n"; c += " r1 += TO_ACCUM_TYPE(" + W[4] + " * s2);\n"; c += " r2 += TO_ACCUM_TYPE(" + W[2] + " * s2);\n"; c += " r3 += TO_ACCUM_TYPE(" + W[1] + " * s2);\n"; c += " r1 += TO_ACCUM_TYPE(" + W[5] + " * s3);\n"; c += " r3 += TO_ACCUM_TYPE(" + W[2] + " * s3);\n"; c += " }\n"; c += " {\n"; read_4x_line(2); c += " r0 += TO_ACCUM_TYPE(" + W[6] + " * s0);\n"; c += " r2 += TO_ACCUM_TYPE(" + W[3] + " * s0);\n"; c += " r0 += TO_ACCUM_TYPE(" + W[7] + " * s1);\n"; c += " r1 += TO_ACCUM_TYPE(" + W[6] + " * s1);\n"; c += " r2 += TO_ACCUM_TYPE(" + W[4] + " * s1);\n"; c += " r3 += TO_ACCUM_TYPE(" + W[3] + " * s1);\n"; c += " r0 += TO_ACCUM_TYPE(" + W[8] + " * s2);\n"; c += " r1 += TO_ACCUM_TYPE(" + W[7] + " * s2);\n"; c += " r2 += TO_ACCUM_TYPE(" + W[5] + " * s2);\n"; c += " r3 += TO_ACCUM_TYPE(" + W[4] + " * s2);\n"; c += " r1 += TO_ACCUM_TYPE(" + W[8] + " * s3);\n"; c += " r3 += TO_ACCUM_TYPE(" + W[5] + " * s3);\n"; c += " }\n"; c += " {\n"; read_4x_line(3); c += " r2 += TO_ACCUM_TYPE(" + W[6] + " * s0);\n"; c += " r2 += TO_ACCUM_TYPE(" + W[7] + " * s1);\n"; c += " r3 += TO_ACCUM_TYPE(" + W[6] + " * s1);\n"; c += " r2 += TO_ACCUM_TYPE(" + W[8] + " * s2);\n"; c += " r3 += TO_ACCUM_TYPE(" + W[7] + " * s2);\n"; c += " r3 += TO_ACCUM_TYPE(" + W[8] + " * s3);\n"; c += " }\n"; if (!weights_are_buffer) { c += " FLT4 bias = args.weights.Read(9, S);\n"; } c += " r0 += TO_ACCUM_TYPE(" + bias + ");\n"; c += " r1 += TO_ACCUM_TYPE(" + bias + ");\n"; c += " r2 += TO_ACCUM_TYPE(" + bias + ");\n"; c += " r3 += TO_ACCUM_TYPE(" + bias + ");\n"; if (local_mem_uploads) { c += " if (X >= args.dst_tensor.Width() || Y >= args.dst_tensor.Height() " "|| S >= args.dst_tensor.Slices()) { \n"; c += " return; \n"; c += " } \n"; } c += " if(X + 0 < args.dst_tensor.Width() && Y + 0 < " "args.dst_tensor.Height()) {\n"; c += " FLT4 result = TO_FLT4(r0);\n"; c += " args.dst_tensor.Write(result, X + 0, Y + 0, S);\n"; c += " }\n"; c += " if(X + 1 < args.dst_tensor.Width() && Y + 0 < " "args.dst_tensor.Height()) {\n"; c += " FLT4 result = TO_FLT4(r1);\n"; c += " args.dst_tensor.Write(result, X + 1, Y + 0, S);\n"; c += " }\n"; c += " if(X + 0 < args.dst_tensor.Width() && Y + 1 < " "args.dst_tensor.Height()) {\n"; c += " FLT4 result = TO_FLT4(r2);\n"; c += " args.dst_tensor.Write(result, X + 0, Y + 1, S);\n"; c += " }\n"; c += " if(X + 1 < args.dst_tensor.Width() && Y + 1 < " "args.dst_tensor.Height()) {\n"; c += " FLT4 result = TO_FLT4(r3);\n"; c += " args.dst_tensor.Write(result, X + 1, Y + 1, S);\n"; c += " }\n"; c += "}\n"; return c; } int3 DepthwiseConv3x3::GetGridSize() const { const int grid_x = DivideRoundUp(dst_[0]->Width(), 2) * dst_[0]->Batch(); const int grid_y = DivideRoundUp(dst_[0]->Height(), 2); const int grid_z = dst_[0]->Slices(); return int3(grid_x, grid_y, grid_z); } void DepthwiseConv3x3::GetPossibleKernelWorkGroups( TuningType tuning_type, const GpuInfo& gpu_info, const KernelInfo& kernel_info, std::vector<int3>* work_groups) const { if (local_mem_uploads_) { work_groups->push_back(work_group_size_); } else { GetPossibleWorkGroups(tuning_type, gpu_info, kernel_info, grid_size_, work_groups); } } bool IsDepthwiseConv3x3Supported(const GpuInfo& gpu_info, const DepthwiseConvolution2DAttributes& attr) { if (gpu_info.IsApiOpenCl() && gpu_info.IsAdreno()) { const std::string kBadDriver = "OpenCL 2.0 QUALCOMM build: commit #7daed58 changeid #I7ece6fe30d " "Date: 10/19/16"; if (absl::StrContains(gpu_info.opencl_info.platform_version, kBadDriver)) { return false; } } return attr.weights.shape.o == 1 && attr.dilations.w == 1 && attr.dilations.h == 1 && attr.weights.shape.w == 3 && attr.weights.shape.h == 3 && attr.strides.w == 1 && attr.strides.h == 1 && attr.padding.prepended.w == 1 && attr.padding.prepended.h == 1 && attr.padding.appended.w == 1 && attr.padding.appended.h == 1; } DepthwiseConv3x3 CreateDepthwiseConv3x3( const GpuInfo& gpu_info, const OperationDef& definition, const DepthwiseConvolution2DAttributes& attr) { bool weights_are_buffer = !gpu_info.SupportsImages() || gpu_info.IsPowerVR() || gpu_info.IsMali() || gpu_info.IsApple(); bool local_mem_uploads = (weights_are_buffer && gpu_info.IsPowerVR() && gpu_info.IsApiOpenCl() && gpu_info.opencl_info.dedicated_local_memory) || (gpu_info.IsApple() && gpu_info.apple_info.IsLocalMemoryPreferredOverGlobal()); DepthwiseConv3x3 result(definition, weights_are_buffer, local_mem_uploads, gpu_info); result.UploadWeightsAndBiases(attr.weights, attr.bias, weights_are_buffer); return result; } } }
#include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/tasks/depthwise_conv_3x3_stride_h2_test_util.h" #include "tensorflow/lite/delegates/gpu/common/tasks/depthwise_conv_3x3_test_util.h" namespace tflite { namespace gpu { namespace cl { namespace { TEST_F(OpenCLOperationTest, DepthwiseConv3x3SimpleWeights) { auto status = DepthwiseConv3x3SimpleWeightsTest(&exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } TEST_F(OpenCLOperationTest, DepthwiseConv3x3) { auto status = DepthwiseConv3x3Test(&exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } TEST_F(OpenCLOperationTest, DepthWiseConv3x3StrideH2SimpleWeights) { auto status = DepthWiseConv3x3StrideH2SimpleWeightsTest(&exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } } } } }
1,029
cpp
tensorflow/tensorflow
convolution_transposed
tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed.cc
tensorflow/lite/delegates/gpu/cl/kernels/convolution_transposed_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_CONVOLUTION_TRANSPOSED_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_CONVOLUTION_TRANSPOSED_H_ #include <cstdint> #include <memory> #include <string> #include <utility> #include <vector> #include "tensorflow/lite/delegates/gpu/common/data_type.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/task/buffer_desc.h" #include "tensorflow/lite/delegates/gpu/common/task/gpu_operation.h" #include "tensorflow/lite/delegates/gpu/common/task/tensor_desc.h" #include "tensorflow/lite/delegates/gpu/common/task/weights_conversion.h" #include "tensorflow/lite/delegates/gpu/common/task/weights_layout.h" #include "tensorflow/lite/delegates/gpu/common/tensor.h" #include "tensorflow/lite/delegates/gpu/common/types.h" namespace tflite { namespace gpu { class ConvolutionTransposed : public GPUOperation { public: ConvolutionTransposed() = default; void GetPossibleKernelWorkGroups( TuningType tuning_type, const GpuInfo& gpu_info, const KernelInfo& kernel_info, std::vector<int3>* work_groups) const override; absl::Status BindArguments(ArgumentsBinder* args) override; int3 GetGridSize() const override; ConvolutionTransposed(ConvolutionTransposed&& operation) = default; ConvolutionTransposed& operator=(ConvolutionTransposed&& operation) = default; ConvolutionTransposed(const ConvolutionTransposed&) = delete; ConvolutionTransposed& operator=(const ConvolutionTransposed&) = delete; WeightsDescription GetWeightsDescription() const { WeightsDescription desc; desc.type = DeduceDataTypeFromPrecision(definition_.precision); desc.layout = weights_layout_; desc.output_group_size = block_size_.w; return desc; } private: friend ConvolutionTransposed CreateConvolutionTransposed( const GpuInfo& gpu_info, const OperationDef& definition, const ConvolutionTransposedAttributes& attr); friend ConvolutionTransposed CreateConvolutionTransposed3D( const GpuInfo& gpu_info, const OperationDef& definition, const ConvolutionTransposed3DAttributes& attr); friend ConvolutionTransposed CreateConvolutionTransposedDynamicWeights( const GpuInfo& gpu_info, const OperationDef& definition, const ConvolutionTransposedAttributes& attr); ConvolutionTransposed(const OperationDef& definition, const ConvolutionTransposedAttributes& attr, const GpuInfo& gpu_info); ConvolutionTransposed(const OperationDef& definition, const ConvolutionTransposed3DAttributes& attr, const GpuInfo& gpu_info); template <DataType T> void UploadWeights(const tflite::gpu::Tensor<OHWI, T>& weights, bool weights_are_buffer); template <DataType T> void UploadWeights(const tflite::gpu::Tensor<OHWDI, T>& weights, bool weights_are_buffer); std::string GenerateConvolutionTransposedCode(const OperationDef& op_def, const GpuInfo& gpu_info, const int4& block_size); int4 stride_; int4 block_size_ = int4(1, 1, 1, 1); WeightsLayout weights_layout_; }; template <DataType T> void ConvolutionTransposed::UploadWeights( const tflite::gpu::Tensor<OHWI, T>& weights, bool weights_are_buffer) { const auto weights_desc = GetWeightsDescription(); const int flt_count = GetTotalElementsCountForLayout(weights_desc, weights.shape); std::vector<uint8_t> weights_data(flt_count * SizeOf(weights_desc.type)); RearrangeWeights(weights, weights_desc, absl::MakeSpan(weights_data)); if (weights_are_buffer) { BufferDescriptor desc; desc.element_type = weights_desc.type; desc.element_size = 4; desc.size = weights_data.size(); desc.data = std::move(weights_data); args_.AddObject("weights", std::make_unique<BufferDescriptor>(std::move(desc))); } else { uint2 tex_size = Get2dResourceSize(weights_desc, weights.shape); int sub_size = SizeOf(weights_desc.type) * 4 * tex_size.x * tex_size.y; for (int i = 0; i < 4; ++i) { TensorDescriptor desc = CreateConstantHWVec4TensorDescriptor( weights_desc.type, TensorStorageType::TEXTURE_2D, tex_size.x, tex_size.y, weights_data.data() + sub_size * i); args_.AddObject("weights" + std::to_string(i), std::make_unique<TensorDescriptor>(std::move(desc))); } } } template <DataType T> void ConvolutionTransposed::UploadWeights( const tflite::gpu::Tensor<OHWDI, T>& weights, bool weights_are_buffer) { const auto weights_desc = GetWeightsDescription(); const int flt_count = GetTotalElementsCountForLayout(weights_desc, weights.shape); std::vector<uint8_t> weights_data(flt_count * SizeOf(weights_desc.type)); RearrangeWeights(weights, weights_desc, absl::MakeSpan(weights_data)); if (weights_are_buffer) { BufferDescriptor desc; desc.element_type = weights_desc.type; desc.element_size = 4; desc.size = weights_data.size(); desc.data = std::move(weights_data); args_.AddObject("weights", std::make_unique<BufferDescriptor>(std::move(desc))); } else { uint2 tex_size = Get2dResourceSize(weights_desc, weights.shape); int sub_size = SizeOf(weights_desc.type) * 4 * tex_size.x * tex_size.y; for (int i = 0; i < 4; ++i) { TensorDescriptor desc = CreateConstantHWVec4TensorDescriptor( weights_desc.type, TensorStorageType::TEXTURE_2D, tex_size.x, tex_size.y, weights_data.data() + sub_size * i); args_.AddObject("weights" + std::to_string(i), std::make_unique<TensorDescriptor>(std::move(desc))); } } } ConvolutionTransposed CreateConvolutionTransposed( const GpuInfo& gpu_info, const OperationDef& definition, const ConvolutionTransposedAttributes& attr); ConvolutionTransposed CreateConvolutionTransposed3D( const GpuInfo& gpu_info, const OperationDef& definition, const ConvolutionTransposed3DAttributes& attr); ConvolutionTransposed CreateConvolutionTransposedDynamicWeights( const GpuInfo& gpu_info, const OperationDef& definition, const ConvolutionTransposedAttributes& attr); } } #endif #include "tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed.h" #include <memory> #include <string> #include <utility> #include <vector> #include "absl/strings/substitute.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/task/weights_layout.h" #include "tensorflow/lite/delegates/gpu/common/task/work_group_picking.h" namespace tflite { namespace gpu { namespace { bool UseBufferForWeights(const GpuInfo& gpu_info) { return gpu_info.IsMali() || gpu_info.IsApple() || gpu_info.IsAMD(); } } ConvolutionTransposed::ConvolutionTransposed( const OperationDef& definition, const ConvolutionTransposedAttributes& attr, const GpuInfo& gpu_info) : GPUOperation(definition), stride_(attr.stride.w, attr.stride.h, 1, 1), block_size_(2, 2, 1, 2) { if (UseBufferForWeights(gpu_info)) { if (gpu_info.IsApple()) { weights_layout_ = WeightsLayout::kOSpatialIOGroupO4I4; } else { weights_layout_ = WeightsLayout::kOSpatialIOGroupI4O4; } } else { if (gpu_info.IsApple()) { weights_layout_ = WeightsLayout::k2DX4O4YIsSpatialIAndXIsOOGroupI4; } else { weights_layout_ = WeightsLayout::k2DX4I4YIsSpatialIAndXIsOOGroupO4; } } const bool is_f16 = definition.precision == CalculationsPrecision::F16; if (gpu_info.IsMali()) { if (gpu_info.mali_info.IsMidgard()) { block_size_ = is_f16 ? int4(2, 1, 1, 2) : int4(2, 1, 1, 1); } else { block_size_ = is_f16 ? int4(2, 2, 1, 2) : int4(2, 2, 1, 1); } compiler_options_.push_back(CompilerOptions::kClFastRelaxedMath); } const int dst_depth = DivideRoundUp(attr.weights.shape.o, 4); if (dst_depth == 1 || dst_depth == 3) { if (!gpu_info.IsMali()) { block_size_.y *= block_size_.w; } block_size_.w = 1; } args_.AddInt("stride_x", stride_.x); args_.AddInt("stride_y", stride_.y); args_.AddInt("padding_x", attr.padding.prepended.w); args_.AddInt("padding_y", attr.padding.prepended.h); args_.AddInt("kernel_size_x", attr.weights.shape.w); args_.AddInt("kernel_size_y", attr.weights.shape.h); code_ = GenerateConvolutionTransposedCode(definition_, gpu_info, block_size_); } ConvolutionTransposed::ConvolutionTransposed( const OperationDef& definition, const ConvolutionTransposed3DAttributes& attr, const GpuInfo& gpu_info) : GPUOperation(definition), stride_(attr.stride.w, attr.stride.h, attr.stride.d, 1), block_size_(2, 2, 1, 2) { if (UseBufferForWeights(gpu_info)) { if (gpu_info.IsApple()) { weights_layout_ = WeightsLayout::kOSpatialIOGroupO4I4; } else { weights_layout_ = WeightsLayout::kOSpatialIOGroupI4O4; } } else { if (gpu_info.IsApple()) { weights_layout_ = WeightsLayout::k2DX4O4YIsSpatialIAndXIsOOGroupI4; } else { weights_layout_ = WeightsLayout::k2DX4I4YIsSpatialIAndXIsOOGroupO4; } } const bool is_f16 = definition.precision == CalculationsPrecision::F16; if (gpu_info.IsMali()) { if (gpu_info.mali_info.IsMidgard()) { block_size_ = is_f16 ? int4(2, 1, 1, 2) : int4(2, 1, 1, 1); } else { block_size_ = is_f16 ? int4(2, 2, 1, 2) : int4(2, 2, 1, 1); } compiler_options_.push_back(CompilerOptions::kClFastRelaxedMath); } const int dst_depth = DivideRoundUp(attr.weights.shape.o, 4); if (dst_depth == 1 || dst_depth == 3) { if (!gpu_info.IsMali()) { block_size_.y *= block_size_.w; } block_size_.w = 1; } args_.AddInt("stride_x", stride_.x); args_.AddInt("stride_y", stride_.y); args_.AddInt("stride_z", stride_.z); args_.AddInt("padding_x", attr.padding.prepended.w); args_.AddInt("padding_y", attr.padding.prepended.h); args_.AddInt("padding_z", attr.padding.prepended.d); args_.AddInt("kernel_size_x", attr.weights.shape.w); args_.AddInt("kernel_size_y", attr.weights.shape.h); args_.AddInt("kernel_size_z", attr.weights.shape.d); args_.AddInt("grid_size_y"); code_ = GenerateConvolutionTransposedCode(definition_, gpu_info, block_size_); } std::string ConvolutionTransposed::GenerateConvolutionTransposedCode( const OperationDef& op_def, const GpuInfo& gpu_info, const int4& block_size) { AddSrcTensor("src_tensor", op_def.src_tensors[0]); AddDstTensor("dst_tensor", op_def.dst_tensors[0]); if (op_def.src_tensors.size() != 1) { if (weights_layout_ == WeightsLayout::kOSpatialIOGroupI4O4 || weights_layout_ == WeightsLayout::kOSpatialIOGroupO4I4) { BufferDescriptor desc; desc.element_type = op_def.src_tensors[1].GetDataType(); desc.element_size = 4; desc.memory_type = MemoryType::GLOBAL; AddSrcBuffer("weights", desc); } else { for (int i = 0; i < 4; ++i) { const std::string name = "weights" + std::to_string(i); AddSrcTensor(name, definition_.src_tensors[1 + i]); } } } const auto& src_def = op_def.src_tensors[0]; std::string c; const bool weights_are_buffer = UseBufferForWeights(gpu_info); for (int s = 0; s < block_size.w; ++s) { std::string f0, f1, f2, f3; if (weights_are_buffer) { if (gpu_info.SupportsPointersInKernels()) { f0 = "weights_cache[" + std::to_string(s * 4 + 0) + "]"; f1 = "weights_cache[" + std::to_string(s * 4 + 1) + "]"; f2 = "weights_cache[" + std::to_string(s * 4 + 2) + "]"; f3 = "weights_cache[" + std::to_string(s * 4 + 3) + "]"; } else { f0 = "f0"; f1 = "f1"; f2 = "f2"; f3 = "f3"; } } else { f0 = "f" + std::to_string(s * 4 + 0); f1 = "f" + std::to_string(s * 4 + 1); f2 = "f" + std::to_string(s * 4 + 2); f3 = "f" + std::to_string(s * 4 + 3); } bool use_fma = gpu_info.IsAMD() && gpu_info.IsApiOpenCl(); if (GetWeightsDescription().IsI4O4()) { switch (op_def.precision) { case CalculationsPrecision::F32: case CalculationsPrecision::F16: if (use_fma) { c += "#define CONV" + std::to_string(s) + "(R, S) \\\n"; c += "R = fma(" + f0 + ", S.x, R); \\\n"; c += "R = fma(" + f1 + ", S.y, R); \\\n"; c += "R = fma(" + f2 + ", S.z, R); \\\n"; c += "R = fma(" + f3 + ", S.w, R); \n"; } else { c += "#define CONV" + std::to_string(s) + "(R, S) \\\n"; c += "R += S.x * " + f0 + "; \\\n"; c += "R += S.y * " + f1 + "; \\\n"; c += "R += S.z * " + f2 + "; \\\n"; c += "R += S.w * " + f3 + "; \n"; } break; case CalculationsPrecision::F32_F16: c += "#define CONV" + std::to_string(s) + "(R, S) \\\n"; c += "R += TO_ACCUM_TYPE(S.x * " + f0 + " + S.y * " + f1 + " + S.z * " + f2 + " + S.w * " + f3 + ");\n"; break; } } else { c += "#define CONV" + std::to_string(s) + "(R, S) \\\n"; c += "R.x += dot(S, " + f0 + "); \\\n"; c += "R.y += dot(S, " + f1 + "); \\\n"; c += "R.z += dot(S, " + f2 + "); \\\n"; c += "R.w += dot(S, " + f3 + "); \n"; } } auto generate_id = [&](const std::string& x, const std::string& y, const std::string& z) { std::string id; if (src_def.HasAxis(Axis::WIDTH)) { id += "_w" + x; } if (src_def.HasAxis(Axis::HEIGHT)) { id += "_h" + y; } if (src_def.HasAxis(Axis::DEPTH)) { id += "_d" + z; } return id; }; auto generate_id_full = [&](const std::string& x, const std::string& y, const std::string& z, const std::string& s) { return generate_id(x, y, z) + "_s" + s; }; auto generate_check = [&](const std::string& x, const std::string& y, const std::string& z) { std::string check; const std::vector<Axis> axes{Axis::WIDTH, Axis::HEIGHT, Axis::DEPTH}; const std::vector<std::string> names{"in_x", "in_y", "in_z"}; const std::vector<std::string> coords{x, y, z}; for (int i = 0; i < axes.size(); ++i) { const auto& axis = axes[i]; if (src_def.HasAxis(axis) && !src_def.SupportsZeroClamp(axis, gpu_info) && block_size[i] != 1) { if (!check.empty()) { check += " && "; } check += names[i] + coords[i]; } } return check; }; c += "MAIN_FUNCTION($0) {\n"; if (op_def.IsBatchSupported()) { c += " int linear_id = GLOBAL_ID_0;\n"; c += " int dst_x = (linear_id / args.dst_tensor.Batch());\n"; c += " int B = linear_id % args.dst_tensor.Batch();\n"; c += " args.dst_tensor.SetBatchRef(B);\n"; c += " args.src_tensor.SetBatchRef(B);\n"; } else { c += " int dst_x = GLOBAL_ID_0;\n"; } c += " int rem_x = dst_x % args.stride_x;\n"; c += " int ceil_x = dst_x / args.stride_x;\n"; c += " dst_x = ceil_x * args.stride_x * " + std::to_string(block_size.x) + " + rem_x;\n"; if (src_def.HasAxis(Axis::DEPTH)) { c += " int linear_id_y = GLOBAL_ID_1;\n"; c += " int dst_y = linear_id_y % args.grid_size_y;\n"; c += " int dst_z = linear_id_y / args.grid_size_y;\n"; c += " int rem_z = dst_z % args.stride_z;\n"; c += " int ceil_z = dst_z / args.stride_z;\n"; c += " dst_z = ceil_z * args.stride_z * " + std::to_string(block_size.z) + " + rem_z;\n"; c += " if (dst_z >= args.dst_tensor.Depth()) return;\n"; } else { c += " int dst_y = GLOBAL_ID_1;\n"; } c += " int rem_y = dst_y % args.stride_y;\n"; c += " int ceil_y = dst_y / args.stride_y;\n"; c += " dst_y = ceil_y * args.stride_y * " + std::to_string(block_size.y) + " + rem_y;\n"; c += " int dst_s = GLOBAL_ID_2 * " + std::to_string(block_size.w) + ";\n"; c += " if (dst_x >= args.dst_tensor.Width() || dst_y >= " "args.dst_tensor.Height() || dst_s >= " "args.dst_tensor.Slices()) return;\n"; if (weights_are_buffer) { c += " int f_base = dst_s * args.src_tensor.Slices() * args.kernel_size_x " "* args.kernel_size_y"; if (src_def.HasAxis(Axis::DEPTH)) { c += " * args.kernel_size_z"; } c += " * 4;\n"; } for (int s = 0; s < block_size.w; ++s) { const std::string sind = std::to_string(s); for (int z = 0; z < block_size.z; ++z) { const std::string zind = std::to_string(z); for (int y = 0; y < block_size.y; ++y) { const std::string yind = std::to_string(y); for (int x = 0; x < block_size.x; ++x) { const std::string xind = std::to_string(x); c += " ACCUM_FLT4 r" + generate_id_full(xind, yind, zind, sind) + " = INIT_ACCUM_FLT4(0.0f);\n"; } } } } c += " int kernel_first_dst_x = dst_x + args.padding_x;\n"; c += " int kernel_first_dst_y = dst_y + args.padding_y;\n"; c += " int kernel_last_dst_x = kernel_first_dst_x - args.kernel_size_x;\n"; c += " int kernel_last_dst_y = kernel_first_dst_y - args.kernel_size_y;\n"; c += " int offset_x = abs(args.padding_x);\n"; c += " int offset_x_strided = offset_x * args.stride_x;\n"; c += " int src_x = (kernel_first_dst_x + offset_x_strided) / args.stride_x - " "offset_x;\n"; c += " int offset_y = abs(args.padding_y);\n"; c += " int offset_y_strided = offset_y * args.stride_y;\n"; c += " int src_y = (kernel_first_dst_y + offset_y_strided) / args.stride_y - " "offset_y;\n"; if (src_def.HasAxis(Axis::DEPTH)) { c += " int kernel_first_dst_z = dst_z + args.padding_z;\n"; c += " int kernel_last_dst_z = kernel_first_dst_z - args.kernel_size_z;\n"; c += " int offset_z = abs(args.padding_z);\n"; c += " int offset_z_strided = offset_z * args.stride_z;\n"; c += " int src_z = (kernel_first_dst_z + offset_z_strided) / " "args.stride_z - offset_z;\n"; c += " int src_as_dst_z = src_z * args.stride_z;\n"; c += " for (;src_as_dst_z > kernel_last_dst_z; src_z -= 1, src_as_dst_z -= " "args.stride_z) {\n"; for (int z = 0; z < block_size.z; ++z) { const std::string zindex = std::to_string(z); c += " int sz" + zindex + " = src_z + " + zindex + ";\n"; if (!src_def.SupportsZeroClamp(Axis::DEPTH, gpu_info)) { c += " bool in_z" + zindex + " = sz" + zindex + " >= 0 && sz" + zindex + " < args.src_tensor.Depth();\n"; if (!src_def.CanReadOutOfBorder(Axis::DEPTH)) { c += " sz" + zindex + " = clamp(sz" + zindex + ", 0, args.src_tensor.Depth() - 1);\n"; } } } if (block_size.z == 1 && !src_def.SupportsZeroClamp(Axis::DEPTH, gpu_info)) { c += " if (!in_z0) continue;\n"; } c += " int kernel_z = kernel_first_dst_z - src_as_dst_z;\n"; c += " int src_as_dst_y = src_y * args.stride_y;\n"; c += " int src_y_copy = src_y;\n"; c += " for (;src_as_dst_y > kernel_last_dst_y; src_y_copy -= 1, " "src_as_dst_y -= args.stride_y) {\n"; } else { c += " int src_as_dst_y = src_y * args.stride_y;\n"; c += " for (;src_as_dst_y > kernel_last_dst_y; src_y -= 1, src_as_dst_y " "-= args.stride_y) {\n"; } for (int y = 0; y < block_size.y; ++y) { const std::string yindex = std::to_string(y); const std::string src_y = src_def.HasAxis(Axis::DEPTH) ? "src_y_copy" : "src_y"; c += " int sy" + yindex + " = " + src_y + " + " + yindex + ";\n"; if (!src_def.SupportsZeroClamp(Axis::HEIGHT, gpu_info)) { c += " bool in_y" + yindex + " = sy" + yindex + " >= 0 && sy" + yindex + " < args.src_tensor.Height();\n"; if (!src_def.CanReadOutOfBorder(Axis::HEIGHT)) { c += " sy" + yindex + " = clamp(sy" + yindex + ", 0, args.src_tensor.Height() - 1);\n"; } } } if (block_size.y == 1 && !src_def.SupportsZeroClamp(Axis::HEIGHT, gpu_info)) { c += " if (!in_y0) continue;\n"; } c += " int kernel_y = kernel_first_dst_y - src_as_dst_y;\n"; c += " int src_as_dst_x = src_x * args.stride_x;\n"; c += " int src_x_copy = src_x;\n"; c += " for (;src_as_dst_x > kernel_last_dst_x; src_x_copy -= 1, " "src_as_dst_x " "-= args.stride_x) {\n"; for (int x = 0; x < block_size.x; ++x) { const std::string xindex = std::to_string(x); c += " int sx" + xindex + " = src_x_copy + " + xindex + ";\n"; if (!src_def.SupportsZeroClamp(Axis::WIDTH, gpu_info)) { c += " bool in_x" + xindex + " = sx" + xindex + " >= 0 && sx" + xindex + " < args.src_tensor.Width();\n"; if (!src_def.CanReadOutOfBorder(Axis::WIDTH)) { c += " sx" + xindex + " = clamp(sx" + xindex + ", 0, args.src_tensor.Width() - 1);\n"; } } } if (block_size.x == 1 && !src_def.SupportsZeroClamp(Axis::WIDTH, gpu_info)) { c += " if (!in_x0) continue;\n"; } for (int z = 0; z < block_size.z; ++z) { const std::string zind = std::to_string(z); for (int y = 0; y < block_size.y; ++y) { const std::string yind = std::to_string(y); for (int x = 0; x < block_size.x; ++x) { const std::string xind = std::to_string(x); const std::string id = generate_id(xind, yind, zind); const std::string check = generate_check(xind, yind, zind); std::string coords = "sx" + xind + ", sy" + yind; if (src_def.HasAxis(Axis::DEPTH)) { coords += ", sz" + zind; } if (src_def.IsLinear()) { c += " int addr" + id + " = args.src_tensor.GetAddress(" + coords + ", 0);\n"; if (src_def.ReturnsZeroForNegOneRead(gpu_info)) { c += " addr" + id + " = select(-1, addr" + id + ", (" + check + "));\n"; c += " int ds" + id + " = select(0, args.src_tensor.SliceStride(), (" + check + "));\n"; } } } } } if (src_def.IsLinear() && !src_def.ReturnsZeroForNegOneRead(gpu_info)) { c += " int ds = args.src_tensor.SliceStride();\n"; } c += " int kernel_x = kernel_first_dst_x - src_as_dst_x;\n"; if (src_def.HasAxis(Axis::DEPTH)) { c += " int kernel_index = (kernel_z * args.kernel_size_y + kernel_y) " "* args.kernel_size_x + kernel_x;\n"; } else { c += " int kernel_index = kernel_y * args.kernel_size_x + kernel_x;\n"; } if (weights_are_buffer) { c += " int f_offset = f_base + kernel_index * " "args.src_tensor.Slices() * " + std::to_string(block_size.w * 4) + ";\n"; } else { c += " int x_c = kernel_index * args.src_tensor.Slices();\n"; } c += " for (int s = 0; s < args.src_tensor.Slices(); ++s) {\n"; const bool conditional_read = gpu_info.IsMali(); for (int z = 0; z < block_size.z; ++z) { const std::string zind = std::to_string(z); for (int y = 0; y < block_size.y; ++y) { const std::string yind = std::to_string(y); for (int x = 0; x < block_size.x; ++x) { const std::string xind = std::to_string(x); const std::string id = generate_id(xind, yind, zind); std::string address; if (src_def.IsLinear()) { address = "addr" + id; } else { address = "sx" + xind + ", sy" + yind; if (src_def.HasAxis(Axis::DEPTH)) { address += ", sz" + zind; } address += ", s"; } if (src_def.ReturnsZeroForNegOneRead(gpu_info)) { c += " FLT4 src" + id + " = args.src_tensor.Read(" + address + "); " + address + " += ds" + id + ";\n"; } else { const std::string check = generate_check(xind, yind, zind); if (!check.empty()) { if (conditional_read) { c += " FLT4 src" + id + " = " + check + " ? args.src_tensor.Read(" + address + ") : INIT_FLT4(0.0f);\n"; } else { c += " FLT4 src" + id + " = args.src_tensor.Read(" + address + ") * INIT_FLT(" + check + ");\n"; } } else { c += " FLT4 src" + id + " = args.src_tensor.Read(" + address + ");\n"; } if (src_def.IsLinear()) { c += " addr" + id + " += ds;\n"; } } } } } if (weights_are_buffer) { if (gpu_info.SupportsPointersInKernels()) { c += " __global FLT4* weights_cache = " "args.weights.GetPtr(f_offset);\n"; } } else { for (int s = 0; s < block_size.w; ++s) { c += absl::Substitute( R"( FLT4 f$1 = args.weights0.Read(dst_s + $0, x_c); FLT4 f$2 = args.weights1.Read(dst_s + $0, x_c); FLT4 f$3 = args.weights2.Read(dst_s + $0, x_c); FLT4 f$4 = args.weights3.Read(dst_s + $0, x_c); )", s, s * 4 + 0, s * 4 + 1, s * 4 + 2, s * 4 + 3); } c += " x_c++;\n"; } if (weights_are_buffer && !gpu_info.SupportsPointersInKernels()) { c += " FLT4 f0, f1, f2, f3;\n"; } for (int s = 0; s < block_size.w; ++s) { if (weights_are_buffer && !gpu_info.SupportsPointersInKernels()) { c += " f0 = args.weights.Read(f_offset + " + std::to_string(s * 4 + 0) + ");\n"; c += " f1 = args.weights.Read(f_offset + " + std::to_string(s * 4 + 1) + ");\n"; c += " f2 = args.weights.Read(f_offset + " + std::to_string(s * 4 + 2) + ");\n"; c += " f3 = args.weights.Read(f_offset + " + std::to_string(s * 4 + 3) + ");\n"; } const std::string sind = std::to_string(s); for (int z = 0; z < block_size.z; ++z) { const std::string zind = std::to_string(z); for (int y = 0; y < block_size.y; ++y) { const std::string yind = std::to_string(y); for (int x = 0; x < block_size.x; ++x) { const std::string xind = std::to_string(x); const std::string id = generate_id(xind, yind, zind); const std::string full_id = generate_id_full(xind, yind, zind, sind); c += " CONV" + sind + "(r" + full_id + ", src" + id + ");\n"; } } } } if (weights_are_buffer) { c += " f_offset += " + std::to_string(block_size.w * 4) + ";\n"; } c += " }\n"; c += " }\n"; c += " }\n"; if (src_def.HasAxis(Axis::DEPTH)) { c += " }\n"; } for (int s = 0; s < block_size.w; ++s) { const std::string sind = std::to_string(s); c += " if (dst_s < args.dst_tensor.Slices()) {\n"; c += " FLT4 bias_val = args.biases.Read(dst_s);\n"; for (int z = 0; z < block_size.z; ++z) { const std::string zind = std::to_string(z); for (int y = 0; y < block_size.y; ++y) { const std::string yind = std::to_string(y); for (int x = 0; x < block_size.x; ++x) { const std::string xind = std::to_string(x); const std::string id = generate_id_full(xind, yind, zind, sind); std::string checks = "xc < args.dst_tensor.Width() && yc < args.dst_tensor.Height()"; std::string coords = "xc, yc"; c += " {\n"; c += " int xc = dst_x + args.stride_x * " + xind + ";\n"; c += " int yc = dst_y + args.stride_y * " + yind + ";\n"; if (src_def.HasAxis(Axis::DEPTH)) { c += " int zc = dst_z + args.stride_z * " + zind + ";\n"; checks += " && zc < args.dst_tensor.Depth()"; coords += ", zc"; } c += " if (" + checks + ") {\n"; c += " FLT4 res = TO_FLT4(r" + id + ") + bias_val;\n"; c += " args.dst_tensor.Write(res, " + coords + ", dst_s);\n"; c += " }\n"; c += " }\n"; } } } c += " }\n"; c += " dst_s++;\n"; } c += "}\n"; return c; } absl::Status ConvolutionTransposed::BindArguments(ArgumentsBinder* args) { if (definition_.src_tensors[0].HasAxis(Axis::DEPTH)) { const int aligned_h = AlignByN(dst_[0]->Height(), stride_.y * block_size_.y); RETURN_IF_ERROR( args->SetInt("grid_size_y", DivideRoundUp(aligned_h, block_size_.y))); } return absl::OkStatus(); } int3 ConvolutionTransposed::GetGridSize() const { const int aligned_w = AlignByN(dst_[0]->Width(), stride_.x * block_size_.x); const int aligned_h = AlignByN(dst_[0]->Height(), stride_.y * block_size_.y); const int aligned_d = AlignByN(dst_[0]->Depth(), stride_.z * block_size_.z); const int grid_x = DivideRoundUp(aligned_w, block_size_.x) * dst_[0]->Batch(); const int grid_y = DivideRoundUp(aligned_h, block_size_.y) * DivideRoundUp(aligned_d, block_size_.z); const int grid_z = DivideRoundUp(dst_[0]->Slices(), block_size_.w); return int3(grid_x, grid_y, grid_z); } void ConvolutionTransposed::GetPossibleKernelWorkGroups( TuningType tuning_type, const GpuInfo& gpu_info, const KernelInfo& kernel_info, std::vector<int3>* work_groups) const { GetPossibleWorkGroupsConv(tuning_type, gpu_info, kernel_info, grid_size_, work_groups); } ConvolutionTransposed CreateConvolutionTransposed( const GpuInfo& gpu_info, const OperationDef& definition, const ConvolutionTransposedAttributes& attr) { ConvolutionTransposed result(definition, attr, gpu_info); result.UploadWeights(attr.weights, UseBufferForWeights(gpu_info)); TensorDescriptor bias_tensor_desc = CreateConstantLinearTensorDescriptor( gpu_info, definition.src_tensors[0].GetDataType(), attr.bias); result.args_.AddObject("biases", std::make_unique<TensorDescriptor>( std::move(bias_tensor_desc))); return result; } ConvolutionTransposed CreateConvolutionTransposed3D( const GpuInfo& gpu_info, const OperationDef& definition, const ConvolutionTransposed3DAttributes& attr) { ConvolutionTransposed result(definition, attr, gpu_info); result.UploadWeights(attr.weights, UseBufferForWeights(gpu_info)); TensorDescriptor bias_tensor_desc = CreateConstantLinearTensorDescriptor( gpu_info, definition.src_tensors[0].GetDataType(), attr.bias); result.args_.AddObject("biases", std::make_unique<TensorDescriptor>( std::move(bias_tensor_desc))); return result; } ConvolutionTransposed CreateConvolutionTransposedDynamicWeights( const GpuInfo& gpu_info, const OperationDef& definition, const ConvolutionTransposedAttributes& attr) { OperationDef new_def = definition; new_def.src_tensors = { definition.src_tensors[0]};
#include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/tasks/convolution_transposed_test_util.h" namespace tflite { namespace gpu { namespace cl { namespace { TEST_F(OpenCLOperationTest, ConvolutionTransposedSimpleWeights) { auto status = ConvolutionTransposedSimpleWeightsTest(&exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } TEST_F(OpenCLOperationTest, ConvolutionTransposed) { auto status = ConvolutionTransposedTest(&exec_env_); ASSERT_TRUE(status.ok()) << status.message(); } } } } }
1,030
cpp
tensorflow/tensorflow
quantize_and_dequantize
tensorflow/lite/delegates/gpu/gl/kernels/quantize_and_dequantize.cc
tensorflow/lite/delegates/gpu/cl/kernels/quantize_and_dequantize_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_QUANTIZE_AND_DEQUANTIZE_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_QUANTIZE_AND_DEQUANTIZE_H_ #include <memory> #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/gl/node_shader.h" namespace tflite { namespace gpu { namespace gl { std::unique_ptr<NodeShader> NewQuantizeAndDequantizeNodeShader(); } } } #endif #include "tensorflow/lite/delegates/gpu/gl/kernels/quantize_and_dequantize.h" #include <any> #include <memory> #include <string> #include "absl/memory/memory.h" #include "tensorflow/lite/delegates/gpu/common/data_type.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/types.h" namespace tflite { namespace gpu { namespace gl { namespace { class QuantizeAndDequantize : public NodeShader { public: absl::Status GenerateCode(const GenerationContext& ctx, GeneratedCode* generated_code) const final { std::string code = R"( value_0 = clamp(value_0, vec4($quant_min$), vec4($quant_max$)); value_0 = (value_0 - vec4($quant_min$)) / vec4($quant_scale$); value_0 = floor(value_0 + vec4(0.5)); value_0 = value_0 * vec4($quant_scale$) + vec4($quant_min$); )"; const auto& attr = std::any_cast<const QuantizeAndDequantizeAttributes&>(ctx.op_attr); *generated_code = { {{"quant_min", attr.min}, {"quant_max", attr.max}, {"quant_scale", attr.scale}}, {}, {}, uint3(), uint3(), code, IOStructure::AUTO, IOStructure::AUTO, }; return absl::OkStatus(); } }; } std::unique_ptr<NodeShader> NewQuantizeAndDequantizeNodeShader() { return std::make_unique<QuantizeAndDequantize>(); } } } }
#include "tensorflow/lite/delegates/gpu/gl/kernels/quantize_and_dequantize.h" #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/gl/kernels/test_util.h" #include "tensorflow/lite/kernels/internal/quantization_util.h" using ::testing::FloatNear; using ::testing::Pointwise; namespace tflite { namespace gpu { namespace gl { namespace { TEST(QuantizeAndDequantizeTest, Dim2Bits8) { TensorRef<BHWC> input; input.type = DataType::FLOAT32; input.ref = 0; input.shape = BHWC(1, 3, 2, 1); const int num_bits = 8; const int quant_min = 0; const int quant_max = (1 << num_bits) - 1; QuantizeAndDequantizeAttributes attr; NudgeQuantizationRange( 0.0, 1.0, quant_min, quant_max, &attr.min, &attr.max, &attr.scale); TensorRef<BHWC> output; output.type = DataType::FLOAT32; output.ref = 1; output.shape = BHWC(1, 3, 2, 1); SingleOpModel model({ToString(OperationType::QUANTIZE_AND_DEQUANTIZE), attr}, {input}, {output}); ASSERT_TRUE( model.PopulateTensor(0, {0.0, 1.0, 0.25, 0.50, 0.4444444, 0.00001})); ASSERT_OK(model.Invoke(*NewQuantizeAndDequantizeNodeShader())); EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {0.0f, 1.0f, 0.25098f, 0.498039f, 0.443137f, 0.0f})); } TEST(QuantizeAndDequantizeTest, Dim3Bits8_NegativeRange) { TensorRef<BHWC> input; input.type = DataType::FLOAT32; input.ref = 0; input.shape = BHWC(1, 3, 1, 2); const int num_bits = 8; const int quant_min = 0; const int quant_max = (1 << num_bits) - 1; QuantizeAndDequantizeAttributes attr; NudgeQuantizationRange( -0.9, 0.9, quant_min, quant_max, &attr.min, &attr.max, &attr.scale); TensorRef<BHWC> output; output.type = DataType::FLOAT32; output.ref = 1; output.shape = BHWC(1, 3, 1, 2); SingleOpModel model({ToString(OperationType::QUANTIZE_AND_DEQUANTIZE), attr}, {input}, {output}); ASSERT_TRUE( model.PopulateTensor(0, {0.0, -0.9, 0.25, 0.50, 0.4444444, -0.00001})); ASSERT_OK(model.Invoke(*NewQuantizeAndDequantizeNodeShader())); EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {0.0f, -0.896471f, 0.247059f, 0.501176f, 0.444706f, 0.0f})); } TEST(QuantizeAndDequantizeTest, Dim3Bits16) { TensorRef<BHWC> input; input.type = DataType::FLOAT32; input.ref = 0; input.shape = BHWC(1, 3, 1, 2); const int num_bits = 16; const int quant_min = 0; const int quant_max = (1 << num_bits) - 1; QuantizeAndDequantizeAttributes attr; NudgeQuantizationRange( 0.0, 1.0, quant_min, quant_max, &attr.min, &attr.max, &attr.scale); TensorRef<BHWC> output; output.type = DataType::FLOAT32; output.ref = 1; output.shape = BHWC(1, 3, 1, 2); SingleOpModel model({ToString(OperationType::QUANTIZE_AND_DEQUANTIZE), attr}, {input}, {output}); ASSERT_TRUE( model.PopulateTensor(0, {0.0, 1.0, 0.25, 0.50, 0.4444444, 0.00001})); ASSERT_OK(model.Invoke(*NewQuantizeAndDequantizeNodeShader())); EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {0.0f, 1.0f, 0.250004f, 0.500008f, 0.44445f, 1.5259e-05f})); } TEST(QuantizeAndDequantizeTest, Dim2Bits16_NegativeRange) { TensorRef<BHWC> input; input.type = DataType::FLOAT32; input.ref = 0; input.shape = BHWC(1, 3, 2, 1); const int num_bits = 16; const int quant_min = 0; const int quant_max = (1 << num_bits) - 1; QuantizeAndDequantizeAttributes attr; NudgeQuantizationRange( -0.9, 0.9, quant_min, quant_max, &attr.min, &attr.max, &attr.scale); TensorRef<BHWC> output; output.type = DataType::FLOAT32; output.ref = 1; output.shape = BHWC(1, 3, 2, 1); SingleOpModel model({ToString(OperationType::QUANTIZE_AND_DEQUANTIZE), attr}, {input}, {output}); ASSERT_TRUE( model.PopulateTensor(0, {0.0, -0.9, 0.25, 0.50, 0.4444444, -0.00001})); ASSERT_OK(model.Invoke(*NewQuantizeAndDequantizeNodeShader())); EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {0.0f, -0.900014f, 0.249998f, 0.499995f, 0.444431f, 0.0f})); } } } } }
1,031
cpp
tensorflow/tensorflow
prelu
tensorflow/lite/delegates/gpu/gl/kernels/prelu.cc
tensorflow/lite/delegates/gpu/cl/kernels/prelu_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_PRELU_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_PRELU_H_ #include <memory> #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/gl/node_shader.h" namespace tflite { namespace gpu { namespace gl { std::unique_ptr<NodeShader> NewPReLUNodeShader(); } } } #endif #include "tensorflow/lite/delegates/gpu/gl/kernels/prelu.h" #include <algorithm> #include <any> #include <cstdint> #include <cstring> #include <memory> #include <string> #include <variant> #include <vector> #include "absl/memory/memory.h" #include "tensorflow/lite/delegates/gpu/common/convert.h" #include "tensorflow/lite/delegates/gpu/common/data_type.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/types.h" namespace tflite { namespace gpu { namespace gl { namespace { class PReLULinearAlpha : public NodeShader { public: absl::Status GenerateCode(const GenerationContext& ctx, GeneratedCode* generated_code) const final { const auto& attr = std::any_cast<const PReLUAttributes&>(ctx.op_attr); auto alpha = std::get_if<Tensor<Linear, DataType::FLOAT32>>(&attr.alpha); if (!alpha) { return absl::InvalidArgumentError("Alpha is missing"); } if (alpha->shape.v != ctx.output_shapes[0][3]) { return absl::InvalidArgumentError( "Alpha shape does not match the number of channels."); } *generated_code = GeneratedCode{ {}, {{"alpha", MakeReadonlyObject(alpha->data)}}, {}, uint3(static_cast<int>(ctx.output_shapes[0][2]), static_cast<int>(ctx.output_shapes[0][1]), DivideRoundUp(static_cast<int>(ctx.output_shapes[0][3]), 4)), uint3(), "value_0 = max(value_0, 0.0) + $alpha[gid.z]$ * min(value_0, " "0.0);", IOStructure::AUTO, IOStructure::AUTO, }; return absl::OkStatus(); } }; class PReLUFull : public NodeShader { public: absl::Status GenerateCode(const GenerationContext& ctx, GeneratedCode* generated_code) const final { const auto& attr = std::any_cast<const PReLUAttributes&>(ctx.op_attr); auto alpha = std::get_if<Tensor<HWC, DataType::FLOAT32>>(&attr.alpha); if (!alpha) { return absl::InvalidArgumentError("Alpha is missing"); } if (alpha->shape.h != ctx.output_shapes[0][1] || alpha->shape.w != ctx.output_shapes[0][2] || alpha->shape.c != ctx.output_shapes[0][3]) { return absl::InvalidArgumentError( "Alpha shape does not match input shape."); } ObjectSize obj_size = uint3(static_cast<int>(ctx.output_shapes[0][2]), static_cast<int>(ctx.output_shapes[0][1]), DivideRoundUp(static_cast<int>(ctx.output_shapes[0][3]), 4)); *generated_code = GeneratedCode{ {}, {{"alpha", MakeReadonlyObject(obj_size, ConvertToPHWC4(*alpha))}}, {}, uint3(static_cast<int>(ctx.output_shapes[0][2]), static_cast<int>(ctx.output_shapes[0][1]), DivideRoundUp(static_cast<int>(ctx.output_shapes[0][3]), 4)), uint3(), "value_0 = max(value_0, 0.0) + $alpha[gid.x, gid.y, gid.z]$ " "* min(value_0, 0.0);", IOStructure::AUTO, IOStructure::AUTO, }; return absl::OkStatus(); } }; class PReLU : public NodeShader { public: absl::Status GenerateCode(const GenerationContext& ctx, GeneratedCode* generated_code) const final { const auto& attr = std::any_cast<const PReLUAttributes&>(ctx.op_attr); auto* alpha = std::get_if<Tensor<HWC, DataType::FLOAT32>>(&attr.alpha); return alpha ? full_.GenerateCode(ctx, generated_code) : linear_.GenerateCode(ctx, generated_code); } private: PReLULinearAlpha linear_; PReLUFull full_; }; } std::unique_ptr<NodeShader> NewPReLUNodeShader() { return std::make_unique<PReLU>(); } } } }
#include "tensorflow/lite/delegates/gpu/gl/kernels/prelu.h" #include <utility> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/gl/kernels/test_util.h" using ::testing::FloatNear; using ::testing::Pointwise; namespace tflite { namespace gpu { namespace gl { namespace { TEST(PReluTest, LinearAlpha) { TensorRef<BHWC> input; input.type = DataType::FLOAT32; input.ref = 0; input.shape = BHWC(1, 2, 2, 1); PReLUAttributes attr; Tensor<Linear, DataType::FLOAT32> alpha; alpha.shape.v = 1; alpha.id = 1; alpha.data = {2}; attr.alpha = std::move(alpha); TensorRef<BHWC> output; output.type = DataType::FLOAT32; output.ref = 2; output.shape = BHWC(1, 2, 2, 1); SingleOpModel model({ToString(OperationType::PRELU), attr}, {input}, {output}); ASSERT_TRUE(model.PopulateTensor(0, {-1.0, -2.0, 1.0, 2.0})); ASSERT_OK(model.Invoke(*NewPReLUNodeShader())); EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {-2, -4, 1, 2})); } TEST(PReluTest, 2DAlpha) { TensorRef<BHWC> input; input.type = DataType::FLOAT32; input.ref = 0; input.shape = BHWC(1, 2, 2, 1); OperationType op_type = OperationType::PRELU; PReLUAttributes attr; Tensor<HWC, DataType::FLOAT32> alpha; alpha.shape = HWC(2, 2, 1); alpha.id = 1; alpha.data = {1, 2, 2, 2}; attr.alpha = std::move(alpha); TensorRef<BHWC> output; output.type = DataType::FLOAT32; output.ref = 2; output.shape = BHWC(1, 2, 2, 1); SingleOpModel model({ToString(op_type), attr}, {input}, {output}); ASSERT_TRUE(model.PopulateTensor(0, {0.0, -1.0, 2.0, -3.0})); ASSERT_OK(model.Invoke(*NewPReLUNodeShader())); EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {0, -2, 2, -6})); } TEST(PReluTest, 2DAlphaWidthNotEqualHeight) { TensorRef<BHWC> input; input.type = DataType::FLOAT32; input.ref = 0; input.shape = BHWC(1, 2, 1, 1); OperationType op_type = OperationType::PRELU; PReLUAttributes attr; Tensor<HWC, DataType::FLOAT32> alpha; alpha.shape = HWC(2, 1, 1); alpha.id = 1; alpha.data = {1, 1}; attr.alpha = std::move(alpha); TensorRef<BHWC> output; output.type = DataType::FLOAT32; output.ref = 2; output.shape = BHWC(1, 2, 1, 1); SingleOpModel model({ToString(op_type), attr}, {input}, {output}); ASSERT_TRUE(model.PopulateTensor(0, {-1.0, -1.0})); ASSERT_OK(model.Invoke(*NewPReLUNodeShader())); EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {-1, -1})); } TEST(PReluTest, 3DAlpha) { TensorRef<BHWC> input; input.type = DataType::FLOAT32; input.ref = 0; input.shape = BHWC(1, 2, 2, 2); OperationType op_type = OperationType::PRELU; PReLUAttributes attr; Tensor<HWC, DataType::FLOAT32> alpha; alpha.shape = HWC(2, 2, 2); alpha.id = 1; alpha.data = {1, 1, 2, 2, 2, 2, 2, 2}; attr.alpha = std::move(alpha); TensorRef<BHWC> output; output.type = DataType::FLOAT32; output.ref = 2; output.shape = BHWC(1, 2, 2, 2); SingleOpModel model({ToString(op_type), attr}, {input}, {output}); ASSERT_TRUE( model.PopulateTensor(0, {0.0, 0.0, -1.0, -1.0, 2.0, 2.0, -3.0, -3.0})); ASSERT_OK(model.Invoke(*NewPReLUNodeShader())); EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {0, 0, -2, -2, 2, 2, -6, -6})); } } } } }
1,032
cpp
tensorflow/tensorflow
conv_pointwise
tensorflow/lite/delegates/gpu/common/tasks/special/conv_pointwise.cc
tensorflow/lite/delegates/gpu/common/tasks/special/conv_pointwise_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_SPECIAL_CONV_POINTWISE_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_COMMON_TASKS_SPECIAL_CONV_POINTWISE_H_ #include <map> #include <set> #include <vector> #include "tensorflow/lite/delegates/gpu/common/model.h" #include "tensorflow/lite/delegates/gpu/common/selectors/subgraph.h" namespace tflite { namespace gpu { struct ConvPointwiseAttributes { std::vector<int2> offsets; bool mean; }; GPUOperation CreateConvPointwise(const OperationDef& definition, const ConvPointwiseAttributes& attr); absl::Status TryFusedPointwiseConv( const GraphFloat32& graph, NodeId first_node_id, CalculationsPrecision precision, const std::map<ValueId, TensorDescriptor>& tensor_descriptors, std::set<NodeId>* consumed_nodes, GPUOperationsSubgraph* gpu_subgraph); } } #endif #include "tensorflow/lite/delegates/gpu/common/tasks/special/conv_pointwise.h" #include <cstdint> #include <map> #include <memory> #include <set> #include <string> #include <utility> #include <vector> #include "absl/strings/str_cat.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/util.h" namespace tflite { namespace gpu { namespace { std::string GenerateCode(const ConvPointwiseAttributes& attr) { std::string c = R"( MAIN_FUNCTION($0) { int linear_id = GLOBAL_ID_0; int X = linear_id / args.dst_tensor.Batch(); int B = linear_id % args.dst_tensor.Batch(); args.weights_tensor.SetBatchRef(B); args.src_tensor.SetBatchRef(B); args.dst_tensor.SetBatchRef(B); int Y = GLOBAL_ID_1; int S = GLOBAL_ID_2; if (X >= args.dst_tensor.Width() || Y >= args.dst_tensor.Height() || S >= args.dst_tensor.Slices()) return; int4 offset0 = args.offsets.Read(S * 2 + 0, 0); int4 offset1 = args.offsets.Read(S * 2 + 1, 0); ACCUM_FLT4 res = INIT_ACCUM_FLT4(0.0f); FLT4 last_mask; int last_src_ch = (args.src_tensor.Slices() - 1) * 4; last_mask.x = INIT_FLT(1.0f); last_mask.y = last_src_ch + 1 < args.src_tensor.Channels() ? INIT_FLT(1.0f) : INIT_FLT(0.0f); last_mask.z = last_src_ch + 2 < args.src_tensor.Channels() ? INIT_FLT(1.0f) : INIT_FLT(0.0f); last_mask.w = last_src_ch + 3 < args.src_tensor.Channels() ? INIT_FLT(1.0f) : INIT_FLT(0.0f); for (int s = 0; s < args.src_tensor.Slices(); ++s) { FLT4 src = args.src_tensor.Read(X, Y, s); FLT4 w0 = args.weights_tensor.Read(X + offset0.x, Y + offset0.y, s); FLT4 w1 = args.weights_tensor.Read(X + offset0.z, Y + offset0.w, s); FLT4 w2 = args.weights_tensor.Read(X + offset1.x, Y + offset1.y, s); FLT4 w3 = args.weights_tensor.Read(X + offset1.z, Y + offset1.w, s); FLT4 mask = INIT_FLT4(1.0f); if (s == (args.src_tensor.Slices() - 1)) { mask = last_mask; } src *= mask; res.x += dot(src, w0); res.y += dot(src, w1); res.z += dot(src, w2); res.w += dot(src, w3); } FLT4 result = TO_FLT4(res); )"; if (attr.mean) { c += " result = result / INIT_FLT(args.src_tensor.Channels());\n"; } c += " args.dst_tensor.Write(result, X, Y, S);\n"; c += "}\n"; return c; } struct NodeContext { Node* node; std::vector<Value*> inputs; std::vector<Value*> outputs; }; absl::Status IsNode(const GraphFloat32& graph, OperationType op_type, int inputs_count, int outputs_count, Node* node, NodeContext* node_context) { const std::string op_desc = ToString(op_type); node_context->node = node; if (node_context->node == nullptr) { return absl::NotFoundError(absl::StrCat("Invalid ", op_desc, " node.")); } if (OperationTypeFromString(node_context->node->operation.type) != op_type) { return absl::InternalError( absl::StrCat("Not correct node type. Expected ", op_desc, ", received ", node_context->node->operation.type)); } node_context->inputs = graph.FindInputs(node_context->node->id); node_context->outputs = graph.FindOutputs(node_context->node->id); if (inputs_count != -1) { if (node_context->inputs.size() != inputs_count) { return absl::InternalError( absl::StrCat("Expected ", inputs_count, " input in a ", op_desc, " node. Node has ", node_context->inputs.size())); } } if (node_context->outputs.size() != outputs_count) { return absl::InternalError( absl::StrCat("Expected ", outputs_count, " output in a ", op_desc, " node. Node has ", node_context->outputs.size())); } return absl::OkStatus(); } absl::Status IsMeanNode(const GraphFloat32& graph, Node* node, NodeContext* node_context) { RETURN_IF_ERROR(IsNode(graph, OperationType::MEAN, 1, 1, node, node_context)); auto mean_attr = absl::any_cast<MeanAttributes>(node_context->node->operation.attributes); if (mean_attr.dims != std::set<Axis>{Axis::CHANNELS}) { return absl::InternalError("Expected mean node with channels reduction."); } return absl::OkStatus(); } absl::Status IsReduceSumNode(const GraphFloat32& graph, Node* node, NodeContext* node_context) { RETURN_IF_ERROR( IsNode(graph, OperationType::REDUCE_SUM, 1, 1, node, node_context)); auto reduce_attr = std::any_cast<ReduceAttributes>(node_context->node->operation.attributes); if (reduce_attr.dims != std::set<Axis>{Axis::CHANNELS}) { return absl::InternalError( "Expected reduce_sum node with channels reduction."); } return absl::OkStatus(); } absl::Status IsMulNode(const GraphFloat32& graph, Node* node, NodeContext* node_context) { RETURN_IF_ERROR(IsNode(graph, OperationType::MUL, 2, 1, node, node_context)); if (node_context->inputs[0]->tensor.shape != node_context->inputs[1]->tensor.shape) { return absl::InternalError("Expected mul node with 2 equal tensors."); } return absl::OkStatus(); } absl::Status IsSliceNode(const GraphFloat32& graph, Node* node, NodeContext* node_context) { RETURN_IF_ERROR( IsNode(graph, OperationType::SLICE, 1, 1, node, node_context)); auto slice_attr = absl::any_cast<SliceAttributes>(node_context->node->operation.attributes); if (slice_attr.strides != BHWC(1, 1, 1, 1)) { return absl::InternalError("Not valid attributes in slice node."); } return absl::OkStatus(); } absl::Status IsConcatNode(const GraphFloat32& graph, Node* node, NodeContext* node_context) { RETURN_IF_ERROR( IsNode(graph, OperationType::CONCAT, -1, 1, node, node_context)); auto concat_attr = absl::any_cast<ConcatAttributes>( node_context->node->operation.attributes); if (concat_attr.axis != Axis::CHANNELS) { return absl::InternalError("Not valid attributes in concat node."); } return absl::OkStatus(); } absl::Status GetOffset(const GraphFloat32& graph, NodeId concat_input_node, NodeId second_commom_input_id, int* offset_x, int* offset_y, std::set<NodeId>* consumed_nodes) { NodeContext reduce_node, mul_node, slice_node; absl::Status status = IsMeanNode(graph, graph.FindProducer(concat_input_node), &reduce_node); if (!status.ok()) { RETURN_IF_ERROR(IsReduceSumNode( graph, graph.FindProducer(concat_input_node), &reduce_node)); } RETURN_IF_ERROR(IsMulNode( graph, graph.FindProducer(reduce_node.inputs[0]->id), &mul_node)); const ValueId slice_output_id = mul_node.inputs[0]->id == second_commom_input_id ? mul_node.inputs[1]->id : mul_node.inputs[0]->id; RETURN_IF_ERROR( IsSliceNode(graph, graph.FindProducer(slice_output_id), &slice_node)); auto slice_attr = absl::any_cast<SliceAttributes>(slice_node.node->operation.attributes); *offset_x = slice_attr.starts.w; *offset_y = slice_attr.starts.h; consumed_nodes->insert(reduce_node.node->id); consumed_nodes->insert(mul_node.node->id); consumed_nodes->insert(slice_node.node->id); return absl::OkStatus(); } } GPUOperation CreateConvPointwise(const OperationDef& definition, const ConvPointwiseAttributes& attr) { const int dst_channels = attr.offsets.size(); const int dst_depth = DivideRoundUp(dst_channels, 4); std::vector<int32_t> offsets_data(dst_depth * 2 * 4, 0); for (int i = 0; i < attr.offsets.size(); ++i) { offsets_data[i * 2 + 0] = attr.offsets[i].x; offsets_data[i * 2 + 1] = attr.offsets[i].y; } for (int i = attr.offsets.size(); i < offsets_data.size() / 2; ++i) { offsets_data[i * 2 + 0] = attr.offsets.back().x; offsets_data[i * 2 + 1] = attr.offsets.back().y; } GPUOperation op(definition); op.AddSrcTensor("src_tensor", definition.src_tensors[0]); op.AddSrcTensor("weights_tensor", definition.src_tensors[1]); op.AddDstTensor("dst_tensor", definition.dst_tensors[0]); op.code_ = GenerateCode(attr); op.tensor_to_grid_ = TensorToGrid::kWBToX_HDToY_SToZ; TensorDescriptor desc = CreateConstantHWVec4TensorDescriptor( DataType::INT32, TensorStorageType::TEXTURE_2D, dst_depth * 2, 1, reinterpret_cast<uint8_t*>(offsets_data.data())); op.args_.AddObject("offsets", std::make_unique<TensorDescriptor>(desc)); return op; } absl::Status TryFusedPointwiseConv( const GraphFloat32& graph, NodeId first_node_id, CalculationsPrecision precision, const std::map<ValueId, TensorDescriptor>& tensor_descriptors, std::set<NodeId>* consumed_nodes, GPUOperationsSubgraph* gpu_subgraph) { NodeContext slice_node; RETURN_IF_ERROR( IsSliceNode(graph, graph.GetNode(first_node_id), &slice_node)); const auto& first_commom_input = slice_node.inputs[0]; auto slice_consumers = graph.FindConsumers(slice_node.outputs[0]->id); if (slice_consumers.size() != 1) { return absl::NotFoundError("FusedPointwiseConv not suitable."); } NodeContext mul_node; RETURN_IF_ERROR(IsMulNode(graph, slice_consumers[0], &mul_node)); const auto& second_commom_input = mul_node.inputs[0]->id == slice_node.outputs[0]->id ? mul_node.inputs[1] : mul_node.inputs[0]; auto mul_consumers = graph.FindConsumers(mul_node.outputs[0]->id); if (mul_consumers.size() != 1) { return absl::NotFoundError("FusedPointwiseConv not suitable."); } NodeContext reduce_node; bool mean = true; absl::Status status = IsMeanNode(graph, mul_consumers[0], &reduce_node); if (!status.ok()) { RETURN_IF_ERROR(IsReduceSumNode(graph, mul_consumers[0], &reduce_node)); mean = false; } auto reduce_consumers = graph.FindConsumers(reduce_node.outputs[0]->id); if (reduce_consumers.size() != 1) { return absl::NotFoundError("FusedPointwiseConv not suitable."); } NodeContext concat_node; RETURN_IF_ERROR(IsConcatNode(graph, reduce_consumers[0], &concat_node)); ConvPointwiseAttributes op_attr; op_attr.mean = mean; std::set<NodeId> temp_consumed_nodes; for (const auto& concat_input : concat_node.inputs) { int offset_x, offset_y; RETURN_IF_ERROR(GetOffset(graph, concat_input->id, second_commom_input->id, &offset_x, &offset_y, &temp_consumed_nodes)); op_attr.offsets.push_back(int2(offset_x, offset_y)); } consumed_nodes->insert(temp_consumed_nodes.begin(), temp_consumed_nodes.end()); consumed_nodes->insert(concat_node.node->id); OperationDef op_def; op_def.precision = precision; auto it = tensor_descriptors.find(second_commom_input->id); if (it != tensor_descriptors.end()) { op_def.src_tensors.push_back(it->second); } it = tensor_descriptors.find(first_commom_input->id); if (it != tensor_descriptors.end()) { op_def.src_tensors.push_back(it->second); } it = tensor_descriptors.find(concat_node.outputs[0]->id); if (it != tensor_descriptors.end()) { op_def.dst_tensors.push_back(it->second); } std::unique_ptr<GPUOperation>* gpu_op = InitSingleOpSubgraph({second_commom_input, first_commom_input}, {concat_node.outputs[0]}, gpu_subgraph); auto operation = CreateConvPointwise(op_def, op_attr); *gpu_op = std::make_unique<GPUOperation>(std::move(operation)); return absl::OkStatus(); } } }
#include "tensorflow/lite/delegates/gpu/common/tasks/special/conv_pointwise.h" #include <memory> #include <utility> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/lite/delegates/gpu/cl/kernels/cl_test.h" #include "tensorflow/lite/delegates/gpu/common/precision.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/common/task/gpu_operation.h" #include "tensorflow/lite/delegates/gpu/common/task/testing_util.h" #include "tensorflow/lite/delegates/gpu/common/tensor.h" #include "tensorflow/lite/delegates/gpu/common/types.h" namespace tflite { namespace gpu { namespace cl { TEST_F(OpenCLOperationTest, SliceMulMeanConcat) { TestExecutionEnvironment* env = &exec_env_; TensorFloat32 src_tensor; src_tensor.shape = BHWC(1, 2, 1, 2); src_tensor.data = {3.0f, 4.0f, 5.0f, 6.0f}; TensorFloat32 weights_tensor; weights_tensor.shape = BHWC(1, 2, 1, 2); weights_tensor.data = {1.0f, 2.0f, 1.0f, 2.0f}; ConvPointwiseAttributes op_attr; op_attr.mean = true; op_attr.offsets.push_back(int2(0, 0)); for (auto precision : env->GetSupportedPrecisions()) { auto data_type = DeduceDataTypeFromPrecision(precision); for (auto storage : env->GetSupportedStorages(data_type)) { const float eps = precision == CalculationsPrecision::F32 ? 1e-6f : 1e-2f; OperationDef op_def; op_def.precision = precision; op_def.src_tensors.push_back({data_type, storage, Layout::HWC}); op_def.src_tensors.push_back({data_type, storage, Layout::HWC}); op_def.dst_tensors.push_back({data_type, storage, Layout::HWC}); TensorFloat32 dst_tensor; GPUOperation operation = CreateConvPointwise(op_def, op_attr); ASSERT_OK(env->ExecuteGPUOperation( {src_tensor, weights_tensor}, std::make_unique<GPUOperation>(std::move(operation)), BHWC(1, 2, 1, 2), &dst_tensor)); ASSERT_OK(PointWiseNear({5.5f, 5.5f, 8.5f, 8.5f}, dst_tensor.data, eps)); } } } TEST_F(OpenCLOperationTest, SliceMulSumConcat) { TestExecutionEnvironment* env = &exec_env_; TensorFloat32 src_tensor; src_tensor.shape = BHWC(1, 2, 1, 2); src_tensor.data = {3.0f, 4.0f, 5.0f, 6.0f}; TensorFloat32 weights_tensor; weights_tensor.shape = BHWC(1, 2, 1, 2); weights_tensor.data = {1.0f, 2.0f, 1.0f, 2.0f}; ConvPointwiseAttributes op_attr; op_attr.mean = false; op_attr.offsets.push_back(int2(0, 0)); for (auto precision : env->GetSupportedPrecisions()) { auto data_type = DeduceDataTypeFromPrecision(precision); for (auto storage : env->GetSupportedStorages(data_type)) { const float eps = precision == CalculationsPrecision::F32 ? 1e-6f : 1e-2f; OperationDef op_def; op_def.precision = precision; op_def.src_tensors.push_back({data_type, storage, Layout::HWC}); op_def.src_tensors.push_back({data_type, storage, Layout::HWC}); op_def.dst_tensors.push_back({data_type, storage, Layout::HWC}); TensorFloat32 dst_tensor; GPUOperation operation = CreateConvPointwise(op_def, op_attr); ASSERT_OK(env->ExecuteGPUOperation( {src_tensor, weights_tensor}, std::make_unique<GPUOperation>(std::move(operation)), BHWC(1, 2, 1, 2), &dst_tensor)); ASSERT_OK( PointWiseNear({11.0f, 11.0f, 17.0f, 17.0f}, dst_tensor.data, eps)); } } } } } }
1,033
cpp
tensorflow/tensorflow
gl_buffer
tensorflow/lite/delegates/gpu/gl/gl_buffer.cc
tensorflow/lite/delegates/gpu/gl/gl_buffer_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_GL_BUFFER_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_GL_GL_BUFFER_H_ #include <cstring> #include <functional> #include <vector> #include "absl/types/span.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/gl/gl_call.h" #include "tensorflow/lite/delegates/gpu/gl/gl_errors.h" #include "tensorflow/lite/delegates/gpu/gl/portable_gl31.h" namespace tflite { namespace gpu { namespace gl { class GlBuffer { public: GlBuffer(GLenum target, GLuint id, size_t bytes_size, size_t offset, bool has_ownership) : target_(target), id_(id), bytes_size_(bytes_size), offset_(offset), has_ownership_(has_ownership) {} GlBuffer() : GlBuffer(GL_INVALID_ENUM, GL_INVALID_INDEX, 0, 0, false) {} GlBuffer(GlBuffer&& buffer); GlBuffer& operator=(GlBuffer&& buffer); GlBuffer(const GlBuffer&) = delete; GlBuffer& operator=(const GlBuffer&) = delete; ~GlBuffer(); template <typename T> absl::Status Read(absl::Span<T> data) const; template <typename T> absl::Status Write(absl::Span<const T> data); template <typename T> absl::Status MappedRead( const std::function<absl::Status(absl::Span<const T>)>& reader) const; template <typename T> absl::Status MappedWrite( const std::function<absl::Status(absl::Span<T>)>& writer); absl::Status MakeView(size_t offset, size_t bytes_size, GlBuffer* gl_buffer); GlBuffer MakeRef(); absl::Status BindToIndex(uint32_t index) const; void Release() { has_ownership_ = false; } size_t bytes_size() const { return bytes_size_; } const GLenum target() const { return target_; } const GLuint id() const { return id_; } bool is_valid() const { return id_ != GL_INVALID_INDEX; } size_t offset() const { return offset_; } bool has_ownership() const { return has_ownership_; } private: void Invalidate(); GLenum target_; GLuint id_; size_t bytes_size_; size_t offset_; bool has_ownership_; }; absl::Status CopyBuffer(const GlBuffer& read_buffer, const GlBuffer& write_buffer); absl::Status GetSSBOSize(GLuint id, int64_t* size_bytes); template <typename T> absl::Status CreateReadWriteShaderStorageBuffer(uint32_t num_elements, GlBuffer* gl_buffer); template <typename T> absl::Status CreateReadOnlyShaderStorageBuffer(absl::Span<const T> data, GlBuffer* gl_buffer); template <typename T> absl::Status AppendFromBuffer(const GlBuffer& buffer, std::vector<T>* data) { if (buffer.bytes_size() % sizeof(T) != 0) { return absl::InvalidArgumentError("Buffer is not aligned"); } size_t num_elements = buffer.bytes_size() / sizeof(T); data->resize(data->size() + num_elements); return buffer.Read<T>( absl::MakeSpan(data->data() + data->size() - num_elements, num_elements)); } class GlPersistentBuffer : public GlBuffer { public: GlPersistentBuffer(GLenum target, GLuint id, size_t bytes_size, size_t offset, bool has_ownership, void* data); GlPersistentBuffer(); GlPersistentBuffer(GlPersistentBuffer&& buffer); GlPersistentBuffer& operator=(GlPersistentBuffer&& buffer); GlPersistentBuffer(const GlPersistentBuffer&) = delete; GlPersistentBuffer& operator=(const GlPersistentBuffer&) = delete; ~GlPersistentBuffer(); void* data() { return data_; } private: void* data_; }; absl::Status CreatePersistentBuffer(size_t size, GlPersistentBuffer* gl_buffer); namespace gl_buffer_internal { class BufferId { public: BufferId() : id_(GL_INVALID_INDEX) { TFLITE_GPU_CALL_GL(glGenBuffers, 1 , &id_) .IgnoreError(); } explicit BufferId(GLuint id) : id_(id) {} ~BufferId() { if (id_ != GL_INVALID_INDEX) { TFLITE_GPU_CALL_GL(glDeleteBuffers, 1, &id_).IgnoreError(); } } GLuint id() const { return id_; } GLuint Release() { GLuint id = GL_INVALID_INDEX; std::swap(id, id_); return id; } private: GLuint id_; }; class BufferBinder { public: BufferBinder(GLenum target, GLuint id) : target_(target), prev_id_(0) { TFLITE_GPU_CALL_GL(glBindBuffer, target_, id).IgnoreError(); } BufferBinder(GLenum target, GLuint id, GLuint prev_id) : target_(target), prev_id_(prev_id) { TFLITE_GPU_CALL_GL(glBindBuffer, target_, id).IgnoreError(); } ~BufferBinder() { TFLITE_GPU_CALL_GL(glBindBuffer, target_, prev_id_).IgnoreError(); } private: const GLenum target_; GLuint prev_id_; }; class BufferMapper { public: BufferMapper(GLenum target, size_t offset, size_t bytes, GLbitfield access); ~BufferMapper(); void* data() { return data_; } private: const GLenum target_; void* data_; }; } template <typename T> absl::Status CreateReadWriteShaderStorageBuffer(uint32_t num_elements, GlBuffer* gl_buffer) { gl_buffer_internal::BufferId id; gl_buffer_internal::BufferBinder binder(GL_SHADER_STORAGE_BUFFER, id.id()); RETURN_IF_ERROR(TFLITE_GPU_CALL_GL( glBufferData, GL_SHADER_STORAGE_BUFFER, num_elements * sizeof(T), std::vector<T>(num_elements).data(), GL_STREAM_COPY)); *gl_buffer = GlBuffer{GL_SHADER_STORAGE_BUFFER, id.Release(), num_elements * sizeof(T), 0, true}; return absl::OkStatus(); } template <typename T> absl::Status CreateReadOnlyShaderStorageBuffer(absl::Span<const T> data, GlBuffer* gl_buffer) { gl_buffer_internal::BufferId id; gl_buffer_internal::BufferBinder binder(GL_SHADER_STORAGE_BUFFER, id.id()); RETURN_IF_ERROR(TFLITE_GPU_CALL_GL(glBufferData, GL_SHADER_STORAGE_BUFFER, data.size() * sizeof(T), data.data(), GL_STATIC_READ)); *gl_buffer = GlBuffer{GL_SHADER_STORAGE_BUFFER, id.Release(), data.size() * sizeof(T), 0, true}; return absl::OkStatus(); } template <typename T> absl::Status GlBuffer::Read(absl::Span<T> data) const { if (data.size() * sizeof(T) < bytes_size()) { return absl::InvalidArgumentError( "Read from buffer failed. Destination data is shorter than buffer."); } return MappedRead<T>([this, data](absl::Span<const T> src) { std::memcpy(data.data(), src.data(), bytes_size()); return absl::OkStatus(); }); } template <typename T> absl::Status GlBuffer::Write(absl::Span<const T> data) { if (data.size() * sizeof(T) > bytes_size_) { return absl::InvalidArgumentError( "Write to buffer failed. Source data is larger than buffer."); } gl_buffer_internal::BufferBinder binder(target_, id_); return TFLITE_GPU_CALL_GL(glBufferSubData, target_, offset_, bytes_size_, data.data()); } template <typename T> absl::Status GlBuffer::MappedRead( const std::function<absl::Status(absl::Span<const T> d)>& reader) const { if (bytes_size_ % sizeof(T) != 0) { return absl::InvalidArgumentError("Buffer is not aligned"); } gl_buffer_internal::BufferBinder binder(target_, id_); gl_buffer_internal::BufferMapper mapper(target_, offset_, bytes_size_, GL_MAP_READ_BIT); if (!mapper.data()) { return GetOpenGlErrors(); } return reader(absl::MakeSpan(reinterpret_cast<const T*>(mapper.data()), bytes_size_ / sizeof(T))); } template <typename T> absl::Status GlBuffer::MappedWrite( const std::function<absl::Status(absl::Span<T> d)>& writer) { if (bytes_size_ % sizeof(T) != 0) { return absl::InvalidArgumentError("Buffer is not aligned"); } gl_buffer_internal::BufferBinder binder(target_, id_); gl_buffer_internal::BufferMapper mapper(target_, offset_, bytes_size_, GL_MAP_WRITE_BIT); if (!mapper.data()) { return GetOpenGlErrors(); } return writer(absl::MakeSpan(reinterpret_cast<T*>(mapper.data()), bytes_size_ / sizeof(T))); } } } } #endif #include "tensorflow/lite/delegates/gpu/gl/gl_buffer.h" #include <utility> #include "tensorflow/lite/delegates/gpu/common/status.h" namespace tflite { namespace gpu { namespace gl { absl::Status CopyBuffer(const GlBuffer& read_buffer, const GlBuffer& write_buffer) { if (read_buffer.bytes_size() != write_buffer.bytes_size()) { return absl::InvalidArgumentError( "Read buffer does not match write buffer size."); } gl_buffer_internal::BufferBinder read_buffer_binder(GL_COPY_READ_BUFFER, read_buffer.id()); gl_buffer_internal::BufferBinder write_buffer_binder(GL_COPY_WRITE_BUFFER, write_buffer.id()); return TFLITE_GPU_CALL_GL(glCopyBufferSubData, GL_COPY_READ_BUFFER, GL_COPY_WRITE_BUFFER, read_buffer.offset(), write_buffer.offset(), read_buffer.bytes_size()); } absl::Status GetSSBOSize(GLuint id, int64_t* size_bytes) { GLuint prev_id; RETURN_IF_ERROR(TFLITE_GPU_CALL_GL(glGetIntegerv, GL_SHADER_STORAGE_BUFFER_BINDING, reinterpret_cast<GLint*>(&prev_id))); gl_buffer_internal::BufferBinder binder(GL_SHADER_STORAGE_BUFFER, id, prev_id); return TFLITE_GPU_CALL_GL(glGetBufferParameteri64v, GL_SHADER_STORAGE_BUFFER, GL_BUFFER_SIZE, size_bytes); } GlBuffer::GlBuffer(GlBuffer&& buffer) : GlBuffer(buffer.target_, buffer.id_, buffer.bytes_size_, buffer.offset_, buffer.has_ownership_) { buffer.has_ownership_ = false; } GlBuffer& GlBuffer::operator=(GlBuffer&& buffer) { if (this != &buffer) { Invalidate(); target_ = buffer.target_; bytes_size_ = buffer.bytes_size_; offset_ = buffer.offset_; has_ownership_ = buffer.has_ownership_; id_ = buffer.id_; buffer.has_ownership_ = false; } return *this; } GlBuffer::~GlBuffer() { Invalidate(); } void GlBuffer::Invalidate() { if (has_ownership_ && id_ != GL_INVALID_INDEX) { TFLITE_GPU_CALL_GL(glDeleteBuffers, 1, &id_).IgnoreError(); id_ = GL_INVALID_INDEX; } } absl::Status GlBuffer::BindToIndex(uint32_t index) const { return TFLITE_GPU_CALL_GL(glBindBufferRange, target_, index, id_, offset_, bytes_size_); } absl::Status GlBuffer::MakeView(size_t offset, size_t bytes_size, GlBuffer* gl_buffer) { if (offset + bytes_size > bytes_size_) { return absl::OutOfRangeError("GlBuffer view is out of range."); } *gl_buffer = GlBuffer(target_, id_, bytes_size, offset_ + offset, false); return absl::OkStatus(); } GlBuffer GlBuffer::MakeRef() { return GlBuffer(target_, id_, bytes_size_, offset_, false); } GlPersistentBuffer::GlPersistentBuffer(GLenum target, GLuint id, size_t bytes_size, size_t offset, bool has_ownership, void* data) : GlBuffer(target, id, bytes_size, offset, has_ownership), data_(data) {} GlPersistentBuffer::GlPersistentBuffer() : GlPersistentBuffer(GL_INVALID_ENUM, GL_INVALID_INDEX, 0, 0, false, nullptr) {} GlPersistentBuffer::GlPersistentBuffer(GlPersistentBuffer&& buffer) : GlBuffer(std::move(buffer)), data_(buffer.data_) {} GlPersistentBuffer& GlPersistentBuffer::operator=(GlPersistentBuffer&& buffer) { if (this != &buffer) { data_ = buffer.data_; GlBuffer::operator=(std::move(buffer)); } return *this; } GlPersistentBuffer::~GlPersistentBuffer() { if (!data_) return; gl_buffer_internal::BufferBinder binder(GL_SHADER_STORAGE_BUFFER, id()); glUnmapBuffer(GL_SHADER_STORAGE_BUFFER); } absl::Status CreatePersistentBuffer(size_t size, GlPersistentBuffer* gl_buffer) { PFNGLBUFFERSTORAGEEXTPROC glBufferStorageEXT = nullptr; glBufferStorageEXT = reinterpret_cast<PFNGLBUFFERSTORAGEEXTPROC>( eglGetProcAddress("glBufferStorageEXT")); if (!glBufferStorageEXT) { return absl::UnavailableError("glBufferStorageEXT is not supported"); } gl_buffer_internal::BufferId id; gl_buffer_internal::BufferBinder binder(GL_SHADER_STORAGE_BUFFER, id.id()); RETURN_IF_ERROR(TFLITE_GPU_CALL_GL( glBufferStorageEXT, GL_SHADER_STORAGE_BUFFER, size, nullptr, GL_MAP_COHERENT_BIT_EXT | GL_MAP_READ_BIT | GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT_EXT)); void* data = nullptr; RETURN_IF_ERROR(TFLITE_GPU_CALL_GL( glMapBufferRange, &data, GL_SHADER_STORAGE_BUFFER, 0, size, GL_MAP_READ_BIT | GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT_EXT)); *gl_buffer = GlPersistentBuffer{ GL_SHADER_STORAGE_BUFFER, id.Release(), size, 0, true, data}; return absl::OkStatus(); } namespace gl_buffer_internal { BufferMapper::BufferMapper(GLenum target, size_t offset, size_t bytes, GLbitfield access) : target_(target), data_(glMapBufferRange(target_, offset, bytes, access)) {} BufferMapper::~BufferMapper() { TFLITE_GPU_CALL_GL(glUnmapBuffer, target_).IgnoreError(); } }; } } }
#include "tensorflow/lite/delegates/gpu/gl/gl_buffer.h" #include <memory> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/gl/egl_environment.h" namespace tflite { namespace gpu { namespace gl { namespace { TEST(Buffer, CreateReadWrite) { std::unique_ptr<EglEnvironment> env; ASSERT_TRUE(EglEnvironment::NewEglEnvironment(&env).ok()); GlBuffer buffer; ASSERT_TRUE(CreateReadWriteShaderStorageBuffer<float>(4, &buffer).ok()); std::vector<float> from_buffer; ASSERT_TRUE(AppendFromBuffer(buffer, &from_buffer).ok()); EXPECT_THAT(from_buffer, testing::ElementsAre(0, 0, 0, 0)); } TEST(Buffer, Read) { std::unique_ptr<EglEnvironment> env; ASSERT_TRUE(EglEnvironment::NewEglEnvironment(&env).ok()); std::vector<float> test = {0, 1, 2, 3}; GlBuffer buffer; ASSERT_TRUE(CreateReadOnlyShaderStorageBuffer<float>(test, &buffer).ok()); std::vector<float> from_buffer; ASSERT_TRUE(AppendFromBuffer(buffer, &from_buffer).ok()); EXPECT_EQ(test, from_buffer); } TEST(Buffer, Write) { std::unique_ptr<EglEnvironment> env; ASSERT_TRUE(EglEnvironment::NewEglEnvironment(&env).ok()); GlBuffer buffer; ASSERT_TRUE(CreateReadWriteShaderStorageBuffer<float>(4, &buffer).ok()); std::vector<float> test = {0, 1, 2, 3}; ASSERT_TRUE(buffer.Write<float>(test).ok()); std::vector<float> from_buffer; ASSERT_TRUE(AppendFromBuffer(buffer, &from_buffer).ok()); EXPECT_EQ(test, from_buffer); } TEST(Buffer, View) { std::unique_ptr<EglEnvironment> env; ASSERT_TRUE(EglEnvironment::NewEglEnvironment(&env).ok()); GlBuffer buffer; ASSERT_TRUE(CreateReadWriteShaderStorageBuffer<float>(6, &buffer).ok()); EXPECT_TRUE(buffer.has_ownership()); EXPECT_EQ(24, buffer.bytes_size()); EXPECT_EQ(0, buffer.offset()); GlBuffer view; ASSERT_TRUE(buffer.MakeView(4, 16, &view).ok()); EXPECT_FALSE(view.has_ownership()); EXPECT_EQ(16, view.bytes_size()); EXPECT_EQ(4, view.offset()); std::vector<float> test = {1, 2, 3, 4}; ASSERT_TRUE(view.Write<float>(test).ok()); std::vector<float> from_buffer; ASSERT_TRUE(AppendFromBuffer(buffer, &from_buffer).ok()); EXPECT_THAT(from_buffer, testing::ElementsAre(0, 1, 2, 3, 4, 0)); std::vector<float> from_view; ASSERT_TRUE(AppendFromBuffer(view, &from_view).ok()); EXPECT_THAT(from_view, testing::ElementsAre(1, 2, 3, 4)); } TEST(Buffer, SubView) { std::unique_ptr<EglEnvironment> env; ASSERT_TRUE(EglEnvironment::NewEglEnvironment(&env).ok()); GlBuffer buffer; ASSERT_TRUE(CreateReadWriteShaderStorageBuffer<float>(6, &buffer).ok()); GlBuffer view1; ASSERT_TRUE(buffer.MakeView(4, 16, &view1).ok()); GlBuffer view2; EXPECT_FALSE(view1.MakeView(1, 16, &view2).ok()); ASSERT_TRUE(view1.MakeView(2, 2, &view2).ok()); EXPECT_FALSE(view2.has_ownership()); EXPECT_EQ(2, view2.bytes_size()); EXPECT_EQ(6, view2.offset()); } TEST(Buffer, Copy) { std::unique_ptr<EglEnvironment> env; ASSERT_TRUE(EglEnvironment::NewEglEnvironment(&env).ok()); GlBuffer buffer; ASSERT_TRUE(CreateReadWriteShaderStorageBuffer<float>(4, &buffer).ok()); GlBuffer view1; ASSERT_TRUE(buffer.MakeView(4, 4, &view1).ok()); GlBuffer view2; ASSERT_TRUE(buffer.MakeView(8, 4, &view2).ok()); ASSERT_TRUE(view1.Write<float>({1}).ok()); ASSERT_TRUE(CopyBuffer(view1, view2).ok()); std::vector<float> from_buffer; ASSERT_TRUE(AppendFromBuffer(buffer, &from_buffer).ok()); EXPECT_THAT(from_buffer, testing::ElementsAre(0, 1, 1, 0)); } } } } }
1,034
cpp
tensorflow/tensorflow
android_sync
tensorflow/lite/delegates/gpu/gl/android_sync.cc
tensorflow/lite/delegates/gpu/gl/android_sync_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_ANDROID_SYNC_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_GL_ANDROID_SYNC_H_ namespace tflite::gpu::gl { bool WaitFdGpu(int fence_fd); int CreateFdGpu(); } #endif #include "tensorflow/lite/delegates/gpu/gl/android_sync.h" #include <EGL/egl.h> #include <EGL/eglext.h> #include <EGL/eglplatform.h> #include <GLES2/gl2.h> #include <unistd.h> namespace { PFNEGLDUPNATIVEFENCEFDANDROIDPROC eglDupNativeFenceFDANDROID; PFNEGLCREATESYNCKHRPROC eglCreateSyncKHR; PFNEGLWAITSYNCKHRPROC eglWaitSyncKHR; PFNEGLDESTROYSYNCKHRPROC eglDestroySyncKHR; bool IsGlSupported() { static const bool extensions_allowed = [] { eglDupNativeFenceFDANDROID = reinterpret_cast<PFNEGLDUPNATIVEFENCEFDANDROIDPROC>( eglGetProcAddress("eglDupNativeFenceFDANDROID")); eglCreateSyncKHR = reinterpret_cast<PFNEGLCREATESYNCKHRPROC>( eglGetProcAddress("eglCreateSyncKHR")); eglWaitSyncKHR = reinterpret_cast<PFNEGLWAITSYNCKHRPROC>( eglGetProcAddress("eglWaitSyncKHR")); eglDestroySyncKHR = reinterpret_cast<PFNEGLDESTROYSYNCKHRPROC>( eglGetProcAddress("eglDestroySyncKHR")); return eglWaitSyncKHR && eglCreateSyncKHR && eglDupNativeFenceFDANDROID && eglDestroySyncKHR; }(); return extensions_allowed; } } namespace tflite::gpu::gl { bool WaitFdGpu(int fence_fd) { if (fence_fd == -1) { return false; } if (!IsGlSupported()) { return false; } EGLDisplay egl_display = eglGetDisplay(EGL_DEFAULT_DISPLAY); if (egl_display == EGL_NO_DISPLAY) return false; int fd_for_egl = dup(fence_fd); EGLint sync_attribs[] = {EGL_SYNC_NATIVE_FENCE_FD_ANDROID, (EGLint)fd_for_egl, EGL_NONE}; EGLSync fence_sync = eglCreateSyncKHR( egl_display, EGL_SYNC_NATIVE_FENCE_ANDROID, sync_attribs); if (fence_sync != EGL_NO_SYNC_KHR) { eglWaitSyncKHR(egl_display, fence_sync, 0); return true; } else { close(fd_for_egl); return false; } } int CreateFdGpu() { if (IsGlSupported()) { EGLDisplay egl_display = eglGetDisplay(EGL_DEFAULT_DISPLAY); if (egl_display != EGL_NO_DISPLAY) { EGLSync fence_sync = eglCreateSyncKHR(egl_display, EGL_SYNC_NATIVE_FENCE_ANDROID, nullptr); if (fence_sync != EGL_NO_SYNC_KHR) { int fence_fd = eglDupNativeFenceFDANDROID(egl_display, fence_sync); if (fence_fd == -1) { eglDestroySyncKHR(egl_display, fence_sync); } else { return fence_fd; } } } } glFinish(); return -1; } }
#include "tensorflow/lite/delegates/gpu/gl/android_sync.h" #include <memory> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/lite/delegates/gpu/gl/egl_environment.h" namespace tflite::gpu::gl { TEST(AsyncBufferTest, FenceTest) { EXPECT_EQ(CreateFdGpu(), -1); EXPECT_FALSE(WaitFdGpu(1)); std::unique_ptr<EglEnvironment> env; EXPECT_OK(EglEnvironment::NewEglEnvironment(&env)); int gpu_fd = CreateFdGpu(); EXPECT_GE(gpu_fd, 0); EXPECT_TRUE(WaitFdGpu(gpu_fd)); } }
1,035
cpp
tensorflow/tensorflow
compiler
third_party/xla/xla/backends/interpreter/compiler.cc
third_party/xla/xla/service/compiler_test.cc
#ifndef XLA_BACKENDS_INTERPRETER_COMPILER_H_ #define XLA_BACKENDS_INTERPRETER_COMPILER_H_ #include <memory> #include <vector> #include "absl/status/status.h" #include "absl/status/statusor.h" #include "xla/backends/interpreter/platform_id.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/service/compiler.h" #include "xla/service/executable.h" #include "xla/service/hlo_cost_analysis.h" #include "xla/service/hlo_module_config.h" #include "xla/stream_executor/stream_executor.h" #include "tsl/platform/status.h" namespace xla { namespace interpreter { class InterpreterCompiler : public Compiler { public: InterpreterCompiler() {} ~InterpreterCompiler() override {} absl::StatusOr<std::unique_ptr<HloModule>> RunHloPasses( std::unique_ptr<HloModule> hlo_module, se::StreamExecutor* stream_exec, const CompileOptions& options) override; absl::StatusOr<std::unique_ptr<Executable>> RunBackend( std::unique_ptr<HloModule> hlo_module, se::StreamExecutor* stream_exec, const CompileOptions& options) override; absl::StatusOr<std::vector<std::unique_ptr<Executable>>> Compile( std::unique_ptr<HloModuleGroup> module_group, std::vector<std::vector<se::StreamExecutor*>> stream_exec, const CompileOptions& options) override; absl::StatusOr<std::vector<std::unique_ptr<AotCompilationResult>>> CompileAheadOfTime(std::unique_ptr<HloModuleGroup> module_group, const AotCompilationOptions& aot_options) override; HloCostAnalysis::ShapeSizeFunction ShapeSizeBytesFunction() const override; se::Platform::Id PlatformId() const override; private: absl::Status RunHloOptimization(HloModule* hlo_module); InterpreterCompiler(const InterpreterCompiler&) = delete; InterpreterCompiler& operator=(const InterpreterCompiler&) = delete; }; } } #endif #include "xla/backends/interpreter/compiler.h" #include <memory> #include <string> #include <utility> #include <vector> #include "absl/log/log.h" #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/types/span.h" #include "xla/backends/interpreter/executable.h" #include "xla/backends/interpreter/platform_id.h" #include "xla/hlo/evaluator/hlo_evaluator.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/hlo/ir/hlo_module_group.h" #include "xla/literal.h" #include "xla/service/batchnorm_expander.h" #include "xla/service/cholesky_expander.h" #include "xla/service/comparison_expander.h" #include "xla/service/compiler.h" #include "xla/service/computation_placer.h" #include "xla/service/custom_call_target_registry.h" #include "xla/service/dynamic_dimension_inference.h" #include "xla/service/dynamic_index_splitter.h" #include "xla/service/eigh_expander.h" #include "xla/service/executable.h" #include "xla/service/hlo_cost_analysis.h" #include "xla/service/hlo_pass_pipeline.h" #include "xla/service/layout_assignment.h" #include "xla/service/qr_expander.h" #include "xla/service/topk_rewriter.h" #include "xla/service/triangular_solve_expander.h" #include "xla/status_macros.h" #include "xla/stream_executor/platform.h" #include "xla/stream_executor/stream_executor.h" #include "xla/util.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace interpreter { namespace { absl::StatusOr<Literal> HandleEvaluatorCustomCall( const HloInstruction* custom_call, absl::Span<const Literal*> operands) { auto* registry = CustomCallTargetRegistry::Global(); void* target_fn = registry->Lookup(custom_call->custom_call_target(), "Host"); if (!target_fn) { return NotFound("Custom call target '%s' was not registered", custom_call->custom_call_target()); } std::vector<const void*> operand_data; operand_data.reserve(operands.size()); for (const auto* literal : operands) { operand_data.push_back(literal->untyped_data()); } auto output = Literal::CreateFromShape(custom_call->shape()); void* output_data = output.untyped_data(); auto* typed_fn = reinterpret_cast<void (*)(void*, const void**)>(target_fn); (*typed_fn)(output_data, operand_data.data()); return std::move(output); } } absl::Status InterpreterCompiler::RunHloOptimization(HloModule* hlo_module) { HloPassPipeline pipeline("Interpreter"); pipeline.AddPass<TopkDecomposer>(); pipeline.AddPass<DynamicIndexSplitter>(); pipeline.AddPass<CholeskyExpander>(); pipeline.AddPass<QrExpander>(); pipeline.AddPass<EighExpander>(); pipeline.AddPass<TriangularSolveExpander>(); pipeline.AddPass<BatchNormExpander>( true, true, true); pipeline.AddPass<LayoutAssignment>( hlo_module->mutable_entry_computation_layout()); return pipeline.Run(hlo_module).status(); } absl::StatusOr<std::unique_ptr<HloModule>> InterpreterCompiler::RunHloPasses( std::unique_ptr<HloModule> hlo_module, se::StreamExecutor* , const CompileOptions& ) { VLOG(1) << "Run hlo passes on graph " << hlo_module->name(); TF_RETURN_IF_ERROR(RunHloOptimization(hlo_module.get())); return std::move(hlo_module); } absl::StatusOr<std::unique_ptr<Executable>> InterpreterCompiler::RunBackend( std::unique_ptr<HloModule> hlo_module, se::StreamExecutor* stream_exec, const CompileOptions& ) { TF_RET_CHECK(stream_exec != nullptr); VLOG(1) << "Run backend " << hlo_module->name(); TF_ASSIGN_OR_RETURN( DynamicDimensionInference dynamic_dimension_inference, DynamicDimensionInference::Run( hlo_module.get(), [&](HloInstruction* hlo) { return OpDynamismSupport::kOptional; })); auto evaluator = std::make_unique<HloEvaluator>(); evaluator->set_use_fast_path( hlo_module->config().debug_options().xla_hlo_evaluator_use_fast_path()); evaluator->set_custom_call_handler(HandleEvaluatorCustomCall); std::unique_ptr<Executable> executable = std::make_unique<InterpreterExecutable>( std::move(hlo_module), std::move(evaluator), std::move(dynamic_dimension_inference)); return std::move(executable); } absl::StatusOr<std::vector<std::unique_ptr<Executable>>> InterpreterCompiler::Compile( std::unique_ptr<HloModuleGroup> module_group, std::vector<std::vector<se::StreamExecutor*>> stream_exec, const CompileOptions& options) { if (module_group->empty()) { return std::vector<std::unique_ptr<Executable>>(); } if (module_group->size() > 1) { return tsl::errors::Unimplemented( "Compilation of multiple HLO modules is not supported on Interpreter."); } if (stream_exec.size() != 1 || stream_exec[0].size() != 1) { return tsl::errors::Unimplemented("Unexpected number of StreamExecutor's."); } auto hlo_modules = module_group->ConsumeModules(); TF_ASSIGN_OR_RETURN(auto module, RunHloPasses(std::move(hlo_modules[0]), stream_exec[0][0], options)); TF_ASSIGN_OR_RETURN(auto executable, RunBackend(std::move(module), stream_exec[0][0], options)); std::vector<std::unique_ptr<Executable>> ret; ret.push_back(std::move(executable)); return std::move(ret); } absl::StatusOr<std::vector<std::unique_ptr<AotCompilationResult>>> InterpreterCompiler::CompileAheadOfTime( std::unique_ptr<HloModuleGroup> module_group, const AotCompilationOptions& aot_options) { return tsl::errors::InvalidArgument( "AOT compilation not supported on Interpreter"); } se::Platform::Id InterpreterCompiler::PlatformId() const { return se::interpreter::kXlaInterpreterPlatformId; } HloCostAnalysis::ShapeSizeFunction InterpreterCompiler::ShapeSizeBytesFunction() const { return InterpreterExecutable::ShapeSizeBytes; } static bool InitModule() { xla::Compiler::RegisterCompilerFactory( se::interpreter::kXlaInterpreterPlatformId, []() { return std::make_unique<xla::interpreter::InterpreterCompiler>(); }); xla::ComputationPlacer::RegisterComputationPlacer( se::interpreter::kXlaInterpreterPlatformId, []() { return std::make_unique<xla::ComputationPlacer>(); }); return true; } static bool module_initialized = InitModule(); } }
#include "xla/service/compiler.h" #include <gtest/gtest.h> #include "xla/autotune_results.pb.h" #include "xla/stream_executor/device_description.pb.h" #include "xla/stream_executor/gpu/gpu_init.h" #include "xla/stream_executor/stream_executor.h" #include "xla/tests/test_macros.h" #include "tsl/lib/core/status_test_util.h" #include "tsl/platform/statusor.h" namespace xla { namespace { TEST(TargetConfigTest, DISABLED_ON_CPU(ExecutorConstructorFillsAllFields)) { TF_ASSERT_OK(stream_executor::ValidateGPUMachineManager()); TF_ASSERT_OK_AND_ASSIGN( stream_executor::StreamExecutor * executor, stream_executor::GPUMachineManager()->ExecutorForDevice(0)); Compiler::TargetConfig config(executor); stream_executor::GpuTargetConfigProto target = config.ToProto(); EXPECT_GT(target.dnn_version_info().major(), 0) << target.DebugString(); EXPECT_GT(target.gpu_device_info().threads_per_block_limit(), 0) << target.DebugString(); EXPECT_NE(target.device_description_str(), "") << target.DebugString(); EXPECT_NE(target.platform_name(), "") << target.DebugString(); EXPECT_EQ(target.autotune_results().version(), 0); EXPECT_EQ(5, stream_executor::GpuTargetConfigProto::descriptor()->field_count()) << "Make sure all the fields in GpuTargetConfigProto are set and " "validated!"; } TEST(TargetConfigTest, ProtoConstructorFillsAllFields) { stream_executor::GpuTargetConfigProto config_proto; config_proto.set_platform_name("platform"); config_proto.mutable_dnn_version_info()->set_major(2); config_proto.mutable_gpu_device_info()->set_threads_per_block_limit(5); config_proto.set_device_description_str("foo"); Compiler::TargetConfig config(config_proto); stream_executor::GpuTargetConfigProto target = config.ToProto(); EXPECT_EQ(target.dnn_version_info().major(), config_proto.dnn_version_info().major()) << target.DebugString(); EXPECT_EQ(target.gpu_device_info().threads_per_block_limit(), 5) << target.DebugString(); EXPECT_EQ(target.device_description_str(), "foo") << target.DebugString(); EXPECT_EQ(target.platform_name(), "platform") << target.DebugString(); EXPECT_EQ(target.autotune_results().version(), 0); EXPECT_EQ(5, stream_executor::GpuTargetConfigProto::descriptor()->field_count()) << "Make sure all the fields in GpuTargetConfigProto are set and " "validated!"; } } }
1,036
cpp
tensorflow/tensorflow
runtime
tensorflow/cc/experimental/libtf/runtime/runtime.cc
tensorflow/cc/experimental/libtf/tests/runtime_test.cc
#ifndef TENSORFLOW_CC_EXPERIMENTAL_LIBTF_RUNTIME_RUNTIME_H_ #define TENSORFLOW_CC_EXPERIMENTAL_LIBTF_RUNTIME_RUNTIME_H_ #include <sys/types.h> #include "absl/strings/str_cat.h" #include "absl/types/span.h" #include "tensorflow/c/eager/c_api.h" #include "tensorflow/c/eager/tfe_context_internal.h" #include "tensorflow/c/tf_status_helper.h" #include "tensorflow/c/tf_status_internal.h" #include "tensorflow/cc/experimental/libtf/object.h" #include "tensorflow/cc/experimental/libtf/value.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/statusor.h" namespace tf { namespace libtf { namespace runtime { class Runtime : public Object { public: explicit Runtime(tensorflow::AbstractContext* ctx); tensorflow::StatusOr<Object> Load(const String& name); template <class T> tensorflow::StatusOr<Tensor> CreateHostTensor(absl::Span<const int64_t> shape, int dtype, absl::Span<const T> data); }; template <class T> tensorflow::StatusOr<Tensor> Runtime::CreateHostTensor( absl::Span<const int64_t> shape, int dtype, absl::Span<const T> data) { size_t num_elements = 1; for (int dim = 0; dim < shape.size(); dim++) { if (shape[dim] < 0) { return tensorflow::errors::InvalidArgument(absl::StrCat( "Shape must be fully-defined, got: shape[", dim, "] = ", shape[dim])); } num_elements *= shape[dim]; } if (data.size() != num_elements) { return tensorflow::errors::InvalidArgument(absl::StrCat( "Mismatched shape and data size: \n", "Shape num_elements: ", num_elements, "\n", "Data size: ", data.size(), "\n")); } auto maybe_capsule = Get<internal::Capsule>(String("ctx")); if (!maybe_capsule.status().ok()) { return maybe_capsule.status(); } auto capsule = maybe_capsule.value(); auto ctx = capsule.cast<tensorflow::ImmediateExecutionContext*>(); tensorflow::AbstractTensorPtr t( ctx->CreateTensor(static_cast<tensorflow::DataType>(dtype), shape)); if (t->ByteSize() != sizeof(T) * data.size()) { return tensorflow::errors::InvalidArgument(absl::StrCat( "Invalid number of bytes in data buffer\n", "Expected bytes: ", t->ByteSize(), "\n", "Actual bytes: ", sizeof(T) * data.size())); } memcpy(t->Data(), data.data(), t->ByteSize()); return Tensor(Convert(TaggedValue( impl::TaggedValueTensor(ctx->CreateLocalHandle(t.get()), false)))); } } } } #endif #include "tensorflow/cc/experimental/libtf/runtime/runtime.h" #include <string> #include <utility> #include "absl/container/flat_hash_map.h" #include "tensorflow/c/eager/abstract_context.h" #include "tensorflow/c/eager/c_api.h" #include "tensorflow/c/eager/c_api_experimental.h" #include "tensorflow/c/eager/graph_function.h" #include "tensorflow/c/eager/immediate_execution_context.h" #include "tensorflow/c/eager/tfe_context_internal.h" #include "tensorflow/c/tf_status_helper.h" #include "tensorflow/c/tf_status_internal.h" #include "tensorflow/cc/experimental/libexport/load.h" #include "tensorflow/cc/experimental/libtf/function.h" #include "tensorflow/cc/experimental/libtf/object.h" #include "tensorflow/cc/experimental/libtf/value.h" #include "tensorflow/core/framework/function.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/protobuf/saved_object_graph.pb.h" #include "tensorflow/core/protobuf/struct.pb.h" #include "tensorflow/core/protobuf/trackable_object_graph.pb.h" namespace tf { namespace libtf { namespace runtime { using tensorflow::AbstractContext; using tensorflow::AbstractFunctionPtr; using tensorflow::DataType; using tensorflow::FunctionDef; using tensorflow::PartialTensorShape; using tensorflow::SavedConcreteFunction; using tensorflow::SavedObjectGraph; using tensorflow::Status; using tensorflow::StructuredValue; using tensorflow::TensorSpecProto; using tensorflow::libexport::TFPackage; using tensorflow::protobuf::RepeatedPtrField; using tensorflow::tracing::graph::GraphFunction; TaggedValue MakeCallable(const std::string& fn_name, Function fn, AbstractContext* ctx) { auto CallFn = [fn_name, fn, ctx](TaggedValue args_, TaggedValue kwargs_) -> TaggedValue { std::cout << "Calling " << fn_name << std::endl; tensorflow::StatusOr<TaggedValue> v = fn.Execute(ctx, args_); return v.value(); }; return TaggedValue(CallFn); } static tensorflow::StatusOr<TaggedValue> ImportModule(String name, AbstractContext* ctx) { tensorflow::StatusOr<TFPackage> tf_package = TFPackage::Load(name.get()); if (!tf_package.status().ok()) { return tf_package.status(); } TaggedValue module = TaggedValue::Dict(); const RepeatedPtrField<FunctionDef> function_defs = tf_package->GetFunctionDefs(); absl::flat_hash_map<std::string, AbstractFunctionPtr> traces; for (auto& fdef : function_defs) { AbstractFunctionPtr trace(new GraphFunction(fdef), false); traces[fdef.signature().name()] = trace; } const SavedObjectGraph object_graph = tf_package->GetObjectGraph(); auto& nodes = object_graph.nodes(); auto& concrete_functions = object_graph.concrete_functions(); auto& root = nodes.at(0); for (auto& child : root.children()) { auto& child_node = nodes.at(child.node_id()); auto child_name = child.local_name().c_str(); if (child_node.kind_case() == tensorflow::SavedObject::kFunction) { Function tf_function; for (const std::string& fn_name : child_node.function().concrete_functions()) { SavedConcreteFunction saved_concrete_function = concrete_functions.at(fn_name); TaggedValue input_signature = TaggedValue::Tuple(); const RepeatedPtrField<StructuredValue>& args = saved_concrete_function.canonicalized_input_signature() .tuple_value() .values(0) .tuple_value() .values(); for (const StructuredValue& arg : args) { PartialTensorShape shape = arg.tensor_spec_value().shape(); DataType dtype = arg.tensor_spec_value().dtype(); TaggedValue tensor_spec(shape, dtype); input_signature.tuple().emplace_back(tensor_spec); } TensorSpecProto output_tensor_spec_proto = saved_concrete_function.output_signature().tensor_spec_value(); PartialTensorShape output_shape = output_tensor_spec_proto.shape(); DataType output_dtype = output_tensor_spec_proto.dtype(); TaggedValue output_tensor_spec(output_shape, output_dtype); auto& trace = traces[fn_name]; Status status = tf_function.RegisterTrace( std::move(trace), input_signature, output_tensor_spec); } TaggedValue callable = MakeCallable(child_name, tf_function, ctx); module.dict()[TaggedValue(child_name)] = callable; } } return module; } Runtime::Runtime(AbstractContext* ctx) { TaggedValue ctx_capsule = TaggedValue::Capsule(static_cast<void*>(ctx), [](void* p) { auto ctx = static_cast<AbstractContext*>(p); ctx->Release(); }); Set(String("ctx"), Handle(ctx_capsule)); auto Load = [](Object self, String name) -> Object { auto ctx_capsule = self.Get<internal::Capsule>(String("ctx")).value(); auto ctx = ctx_capsule.cast<AbstractContext*>(); return *Cast<Object>(Handle(*ImportModule(name, ctx))); }; Set(String("Load"), Callable(TFLIB_CALLABLE_ADAPTOR(Load))); } tensorflow::StatusOr<Object> Runtime::Load(const String& name) { return Get<Callable>(String("Load"))->Call<Object>(*this, name); } } } }
#include "tensorflow/cc/experimental/libtf/tests/runtime_test.h" namespace tf { namespace libtf { namespace runtime { using ::tensorflow::testing::StatusIs; using ::testing::HasSubstr; using ::tf::libtf::impl::TaggedValueTensor; constexpr char kSimpleModel[] = "tensorflow/cc/experimental/libtf/tests/testdata/simple-model"; TEST_P(RuntimeTest, SimpleModelCallableFloatTest) { Runtime runtime = RuntimeTest::GetParam()(); const std::string module_path = tensorflow::GetDataDependencyFilepath(kSimpleModel); TF_ASSERT_OK_AND_ASSIGN(Object module, runtime.Load(String(module_path.c_str()))); std::cout << "Module imported." << std::endl; TF_ASSERT_OK_AND_ASSIGN(Callable fn, module.Get<Callable>(String("test_float"))); TF_ASSERT_OK_AND_ASSIGN( Tensor tensor, runtime.CreateHostTensor<float>({}, TF_FLOAT, {2.0f})); TF_ASSERT_OK_AND_ASSIGN(Tensor result, fn.Call<Tensor>(Tensor(tensor))); float out_val[1]; TF_ASSERT_OK(result.GetValue(absl::MakeSpan(out_val))); EXPECT_EQ(out_val[0], 6.0); } TEST_P(RuntimeTest, SimpleModelCallableIntTest) { Runtime runtime = RuntimeTest::GetParam()(); const std::string module_path = tensorflow::GetDataDependencyFilepath(kSimpleModel); TF_ASSERT_OK_AND_ASSIGN(Object module, runtime.Load(String(module_path.c_str()))); TF_ASSERT_OK_AND_ASSIGN(Callable fn, module.Get<Callable>(String("test_int"))); TF_ASSERT_OK_AND_ASSIGN(Tensor host_tensor, runtime.CreateHostTensor<int>({}, TF_INT32, {2})); TF_ASSERT_OK_AND_ASSIGN(Tensor tensor, fn.Call<Tensor>(Tensor(host_tensor))); int out_val[1]; TF_ASSERT_OK(tensor.GetValue(absl::MakeSpan(out_val))); EXPECT_EQ(out_val[0], 6); } TEST_P(RuntimeTest, SimpleModelCallableMultipleArgsTest) { Runtime runtime = RuntimeTest::GetParam()(); const std::string module_path = tensorflow::GetDataDependencyFilepath(kSimpleModel); TF_ASSERT_OK_AND_ASSIGN(Object module, runtime.Load(String(module_path.c_str()))); TF_ASSERT_OK_AND_ASSIGN(Callable fn, module.Get<Callable>(String("test_add"))); TF_ASSERT_OK_AND_ASSIGN(Tensor tensor1, runtime.CreateHostTensor<float>({}, TF_FLOAT, {2.0f})) TF_ASSERT_OK_AND_ASSIGN(Tensor tensor2, runtime.CreateHostTensor<float>({}, TF_FLOAT, {3.0f})) TF_ASSERT_OK_AND_ASSIGN(Tensor result_tensor, fn.Call<Tensor>(tensor1, tensor2)); float out_val[1]; TF_ASSERT_OK(result_tensor.GetValue(absl::MakeSpan(out_val))); EXPECT_EQ(out_val[0], 5.0f); } TEST_P(RuntimeTest, CreateHostTensorIncompatibleShape) { Runtime runtime = RuntimeTest::GetParam()(); EXPECT_THAT(runtime.CreateHostTensor<float>({2}, TF_FLOAT, {2.0f}), StatusIs(absl::StatusCode::kInvalidArgument, HasSubstr("Mismatched shape and data size"))); } TEST_P(RuntimeTest, CreateHostTensorNonFullyDefinedShapeRaises) { Runtime runtime = RuntimeTest::GetParam()(); EXPECT_THAT(runtime.CreateHostTensor<float>({-1}, TF_FLOAT, {2.0f}), StatusIs(absl::StatusCode::kInvalidArgument, HasSubstr("Shape must be fully-defined"))); } TEST_P(RuntimeTest, CreateHostTensorIncompatibleDataType) { Runtime runtime = RuntimeTest::GetParam()(); EXPECT_THAT(runtime.CreateHostTensor<float>({1}, TF_BOOL, {2.0f}), StatusIs(absl::StatusCode::kInvalidArgument, HasSubstr("Invalid number of bytes in data buffer"))); } TEST_P(RuntimeTest, TensorCopyInvalidSize) { Runtime runtime = RuntimeTest::GetParam()(); TF_ASSERT_OK_AND_ASSIGN( Tensor tensor, runtime.CreateHostTensor<float>({1}, TF_FLOAT, {2.0f})) float val[2]; EXPECT_THAT(tensor.GetValue(absl::MakeSpan(val)), StatusIs(absl::StatusCode::kInvalidArgument, HasSubstr("Mismatched number of elements"))); } } } }
1,037
cpp
tensorflow/tensorflow
compiled_node
tensorflow/lite/delegates/gpu/gl/compiler/compiled_node.cc
tensorflow/lite/delegates/gpu/gl/compiler/compiled_node_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_COMPILER_COMPILED_NODE_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_GL_COMPILER_COMPILED_NODE_H_ #include <vector> #include "tensorflow/lite/delegates/gpu/common/model.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/gl/node_shader.h" #include "tensorflow/lite/delegates/gpu/gl/object.h" namespace tflite { namespace gpu { namespace gl { struct CompiledNodeAttributes { std::vector<Object> inputs; std::vector<Object> outputs; GeneratedCode code; std::vector<NodeId> node_indices; }; absl::Status MergeCode(CompiledNodeAttributes* attr, CompiledNodeAttributes* merged_attr); } } } #endif #include "tensorflow/lite/delegates/gpu/gl/compiler/compiled_node.h" #include <algorithm> #include <string> #include "absl/container/flat_hash_set.h" #include "absl/strings/str_cat.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/gl/compiler/rename.h" namespace tflite { namespace gpu { namespace gl { absl::Status MergeCode(CompiledNodeAttributes* attr, CompiledNodeAttributes* merged_attr) { absl::flat_hash_set<std::string> known_names; for (const auto& parameter : merged_attr->code.parameters) { known_names.insert(parameter.name); } for (const auto& object : merged_attr->code.objects) { known_names.insert(object.first); } int index = merged_attr->code.parameters.size() + merged_attr->code.objects.size(); RETURN_IF_ERROR(Rename( [&](absl::string_view name) -> std::string { std::string n(name.begin(), name.end()); std::string ret = n; while (known_names.find(ret) != known_names.end()) { ret = absl::StrCat(n, index++); } known_names.insert(ret); return ret; }, &attr->code)); std::move(attr->code.objects.begin(), attr->code.objects.end(), std::back_inserter(merged_attr->code.objects)); std::move(attr->code.parameters.begin(), attr->code.parameters.end(), std::back_inserter(merged_attr->code.parameters)); std::move(attr->node_indices.begin(), attr->node_indices.end(), std::back_inserter(merged_attr->node_indices)); return absl::OkStatus(); } } } }
#include "tensorflow/lite/delegates/gpu/gl/compiler/compiled_node.h" #include <memory> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/lite/delegates/gpu/gl/variable.h" namespace tflite { namespace gpu { namespace gl { namespace { bool VariableDuplicates(std::vector<Variable> variables) { std::sort( std::begin(variables), std::end(variables), [](const auto& lhs, const auto& rhs) { return lhs.name < rhs.name; }); for (int i = 0; i < variables.size() - 1; ++i) { if (variables[i].name == variables[i + 1].name) return true; } return false; } TEST(CompiledNodeTest, NoDuplicates) { Variable scalar; scalar.name = "scalar"; Variable scalar1; scalar1.name = "scalar1"; CompiledNodeAttributes attr; CompiledNodeAttributes merged_attr; attr.code.parameters = {scalar, scalar1}; merged_attr.code.parameters = {scalar}; ASSERT_OK(MergeCode(&attr, &merged_attr)); EXPECT_FALSE(VariableDuplicates(merged_attr.code.parameters)); } TEST(CompiledNodeTest, NameConvergenceConflict) { Variable scalar; scalar.name = "scalar"; Variable scalar1; scalar1.name = "scalar1"; CompiledNodeAttributes attr; CompiledNodeAttributes merged_attr; attr.code.parameters = {scalar1, scalar}; merged_attr.code.parameters = {scalar}; ASSERT_OK(MergeCode(&attr, &merged_attr)); EXPECT_FALSE(VariableDuplicates(merged_attr.code.parameters)); } } } } }
1,038
cpp
tensorflow/tensorflow
object_accessor
tensorflow/lite/delegates/gpu/gl/compiler/object_accessor.cc
tensorflow/lite/delegates/gpu/gl/compiler/object_accessor_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_COMPILER_OBJECT_ACCESSOR_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_GL_COMPILER_OBJECT_ACCESSOR_H_ #include <map> #include <string> #include <vector> #include "tensorflow/lite/delegates/gpu/gl/compiler/preprocessor.h" #include "tensorflow/lite/delegates/gpu/gl/compiler/variable_accessor.h" #include "tensorflow/lite/delegates/gpu/gl/object.h" namespace tflite { namespace gpu { namespace gl { class ObjectAccessor : public InlineRewrite { public: ObjectAccessor(bool is_mali, VariableAccessor* variable_accessor) : ObjectAccessor(is_mali, false, variable_accessor) { } ObjectAccessor(bool is_mali, bool sampler_textures, VariableAccessor* variable_accessor) : is_mali_(is_mali), sampler_textures_(sampler_textures), variable_accessor_(variable_accessor) {} RewriteStatus Rewrite(absl::string_view input, std::string* output) final; bool AddObject(const std::string& name, Object object); std::string GetObjectDeclarations() const; std::string GetFunctionsDeclarations() const; std::vector<Object> GetObjects() const; private: RewriteStatus RewriteRead(absl::string_view location, std::string* output); RewriteStatus RewriteWrite(absl::string_view location, absl::string_view value, std::string* output); std::map<std::string, Object> name_to_object_; const bool is_mali_; const bool sampler_textures_; VariableAccessor* variable_accessor_; }; namespace object_accessor_internal { struct IndexedElement { absl::string_view object_name; std::vector<absl::string_view> indices; }; IndexedElement ParseElement(absl::string_view input); } } } } #endif #include "tensorflow/lite/delegates/gpu/gl/compiler/object_accessor.h" #include <string> #include <utility> #include <variant> #include "absl/strings/ascii.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" #include "absl/strings/str_join.h" #include "absl/strings/str_split.h" #include "absl/types/variant.h" #include "tensorflow/lite/delegates/gpu/common/data_type.h" #include "tensorflow/lite/delegates/gpu/common/types.h" namespace tflite { namespace gpu { namespace gl { namespace object_accessor_internal { IndexedElement ParseElement(absl::string_view input) { auto i = input.find('['); if (i == std::string::npos || input.back() != ']') { return {}; } return {input.substr(0, i), absl::StrSplit(input.substr(i + 1, input.size() - i - 2), ',', absl::SkipWhitespace())}; } } namespace { void MaybeConvertToHalf(DataType data_type, absl::string_view value, std::string* output) { if (data_type == DataType::FLOAT16) { absl::StrAppend(output, "Vec4ToHalf(", value, ")"); } else { absl::StrAppend(output, value); } } void MaybeConvertFromHalf(DataType data_type, absl::string_view value, std::string* output) { if (data_type == DataType::FLOAT16) { absl::StrAppend(output, "Vec4FromHalf(", value, ")"); } else { absl::StrAppend(output, value); } } struct ReadFromTextureGenerator { RewriteStatus operator()(size_t) const { if (element.indices.size() != 1) { result->append("WRONG_NUMBER_OF_INDICES"); return RewriteStatus::ERROR; } if (sampler_textures) { absl::StrAppend(result, "texelFetch(", element.object_name, ", ivec2(", element.indices[0], ", 0), 0)"); } else { absl::StrAppend(result, "imageLoad(", element.object_name, ", ivec2(", element.indices[0], ", 0))"); } return RewriteStatus::SUCCESS; } template <typename Shape> RewriteStatus operator()(const Shape&) const { if (element.indices.size() != Shape::size()) { result->append("WRONG_NUMBER_OF_INDICES"); return RewriteStatus::ERROR; } if (sampler_textures) { absl::StrAppend(result, "texelFetch(", element.object_name, ", ivec", Shape::size(), "(", absl::StrJoin(element.indices, ", "), "), 0)"); } else { absl::StrAppend(result, "imageLoad(", element.object_name, ", ivec", Shape::size(), "(", absl::StrJoin(element.indices, ", "), "))"); } return RewriteStatus::SUCCESS; } const object_accessor_internal::IndexedElement& element; const bool sampler_textures; std::string* result; }; struct ReadFromBufferGenerator { RewriteStatus operator()(size_t) const { if (element.indices.size() != 1) { result->append("WRONG_NUMBER_OF_INDICES"); return RewriteStatus::ERROR; } MaybeConvertFromHalf( data_type, absl::StrCat(element.object_name, ".data[", element.indices[0], "]"), result); return RewriteStatus::SUCCESS; } RewriteStatus operator()(const uint2& size) const { if (element.indices.size() == 1) { return (*this)(1U); } if (element.indices.size() != 2) { result->append("WRONG_NUMBER_OF_INDICES"); return RewriteStatus::ERROR; } MaybeConvertFromHalf( data_type, absl::StrCat(element.object_name, ".data[", element.indices[0], " + $", element.object_name, "_w$ * (", element.indices[1], ")]"), result); *requires_sizes = true; return RewriteStatus::SUCCESS; } RewriteStatus operator()(const uint3& size) const { if (element.indices.size() == 1) { return (*this)(1U); } if (element.indices.size() != 3) { result->append("WRONG_NUMBER_OF_INDICES"); return RewriteStatus::ERROR; } MaybeConvertFromHalf( data_type, absl::StrCat(element.object_name, ".data[", element.indices[0], " + $", element.object_name, "_w$ * (", element.indices[1], " + $", element.object_name, "_h$ * (", element.indices[2], "))]"), result); *requires_sizes = true; return RewriteStatus::SUCCESS; } DataType data_type; const object_accessor_internal::IndexedElement& element; std::string* result; bool* requires_sizes; }; RewriteStatus GenerateReadAccessor( const Object& object, const object_accessor_internal::IndexedElement& element, bool sampler_textures, std::string* result, bool* requires_sizes) { switch (object.object_type) { case ObjectType::BUFFER: return std::visit(ReadFromBufferGenerator{object.data_type, element, result, requires_sizes}, object.size); case ObjectType::TEXTURE: return std::visit( ReadFromTextureGenerator{element, sampler_textures, result}, object.size); case ObjectType::UNKNOWN: return RewriteStatus::ERROR; } } struct WriteToBufferGenerator { RewriteStatus operator()(size_t) const { if (element.indices.size() != 1) { result->append("WRONG_NUMBER_OF_INDICES"); return RewriteStatus::ERROR; } absl::StrAppend(result, element.object_name, ".data[", element.indices[0], "] = "); MaybeConvertToHalf(data_type, value, result); return RewriteStatus::SUCCESS; } RewriteStatus operator()(const uint2& size) const { if (element.indices.size() == 1) { return (*this)(1U); } if (element.indices.size() != 2) { result->append("WRONG_NUMBER_OF_INDICES"); return RewriteStatus::ERROR; } absl::StrAppend(result, element.object_name, ".data[", element.indices[0], " + $", element.object_name, "_w$ * (", element.indices[1], ")] = "); MaybeConvertToHalf(data_type, value, result); *requires_sizes = true; return RewriteStatus::SUCCESS; } RewriteStatus operator()(const uint3& size) const { if (element.indices.size() == 1) { return (*this)(1U); } if (element.indices.size() != 3) { result->append("WRONG_NUMBER_OF_INDICES"); return RewriteStatus::ERROR; } absl::StrAppend(result, element.object_name, ".data[", element.indices[0], " + $", element.object_name, "_w$ * (", element.indices[1], " + $", element.object_name, "_h$ * (", element.indices[2], "))] = "); MaybeConvertToHalf(data_type, value, result); *requires_sizes = true; return RewriteStatus::SUCCESS; } DataType data_type; const object_accessor_internal::IndexedElement& element; absl::string_view value; std::string* result; bool* requires_sizes; }; struct WriteToTextureGenerator { RewriteStatus operator()(size_t) const { if (element.indices.size() != 1) { result->append("WRONG_NUMBER_OF_INDICES"); return RewriteStatus::ERROR; } absl::StrAppend(result, "imageStore(", element.object_name, ", ivec2(", element.indices[0], ", 0), ", value, ")"); return RewriteStatus::SUCCESS; } template <typename Shape> RewriteStatus operator()(const Shape&) const { if (element.indices.size() != Shape::size()) { result->append("WRONG_NUMBER_OF_INDICES"); return RewriteStatus::ERROR; } absl::StrAppend(result, "imageStore(", element.object_name, ", ivec", Shape::size(), "(", absl::StrJoin(element.indices, ", "), "), ", value, ")"); return RewriteStatus::SUCCESS; } const object_accessor_internal::IndexedElement& element; absl::string_view value; std::string* result; }; RewriteStatus GenerateWriteAccessor( const Object& object, const object_accessor_internal::IndexedElement& element, absl::string_view value, std::string* result, bool* requires_sizes) { switch (object.object_type) { case ObjectType::BUFFER: return std::visit(WriteToBufferGenerator{object.data_type, element, value, result, requires_sizes}, object.size); case ObjectType::TEXTURE: return std::visit(WriteToTextureGenerator{element, value, result}, object.size); case ObjectType::UNKNOWN: return RewriteStatus::ERROR; } } std::string ToAccessModifier(AccessType access, bool use_readonly_modifier) { switch (access) { case AccessType::READ: return use_readonly_modifier ? " readonly" : ""; case AccessType::WRITE: return " writeonly"; case AccessType::READ_WRITE: return " restrict"; } return " unknown_access"; } std::string ToBufferType(DataType data_type) { switch (data_type) { case DataType::UINT8: case DataType::UINT16: case DataType::UINT32: return "uvec4"; case DataType::UINT64: return "u64vec4_not_available_in_glsl"; case DataType::INT8: case DataType::INT16: case DataType::INT32: return "ivec4"; case DataType::INT64: return "i64vec4_not_available_in_glsl"; case DataType::FLOAT16: return "uvec2"; case DataType::BOOL: case DataType::FLOAT32: return "vec4"; case DataType::FLOAT64: return "dvec4"; case DataType::UNKNOWN: return "unknown_buffer_type"; } } struct TextureImageTypeGetter { std::string operator()(size_t) const { return (*this)(uint2()); } std::string operator()(const uint2&) const { switch (type) { case DataType::UINT16: case DataType::UINT32: return "uimage2D"; case DataType::INT16: case DataType::INT32: return "iimage2D"; case DataType::FLOAT16: case DataType::FLOAT32: return "image2D"; default: return "unknown_image_2d"; } } std::string operator()(const uint3&) const { switch (type) { case DataType::UINT16: case DataType::UINT32: return "uimage2DArray"; case DataType::INT16: case DataType::INT32: return "iimage2DArray"; case DataType::FLOAT16: case DataType::FLOAT32: return "image2DArray"; default: return "unknown_image_2d_array"; } } DataType type; }; struct TextureSamplerTypeGetter { std::string operator()(size_t) const { return (*this)(uint2()); } std::string operator()(const uint2&) const { switch (type) { case DataType::FLOAT16: case DataType::FLOAT32: return "sampler2D"; case DataType::INT32: case DataType::INT16: return "isampler2D"; case DataType::UINT32: case DataType::UINT16: return "usampler2D"; default: return "unknown_sampler2D"; } } std::string operator()(const uint3&) const { switch (type) { case DataType::FLOAT16: case DataType::FLOAT32: return "sampler2DArray"; case DataType::INT32: case DataType::INT16: return "isampler2DArray"; case DataType::UINT32: case DataType::UINT16: return "usampler2DArray"; default: return "unknown_sampler2DArray"; } } DataType type; }; std::string ToImageType(const Object& object, bool sampler_textures) { if (sampler_textures && (object.access == AccessType::READ)) { return std::visit(TextureSamplerTypeGetter{object.data_type}, object.size); } else { return std::visit(TextureImageTypeGetter{object.data_type}, object.size); } } std::string ToImageLayoutQualifier(DataType type) { switch (type) { case DataType::UINT16: return "rgba16ui"; case DataType::UINT32: return "rgba32ui"; case DataType::INT16: return "rgba16i"; case DataType::INT32: return "rgba32i"; case DataType::FLOAT16: return "rgba16f"; case DataType::FLOAT32: return "rgba32f"; default: return "unknown_image_layout"; } } std::string ToImagePrecision(DataType type) { switch (type) { case DataType::UINT16: case DataType::INT16: case DataType::FLOAT16: return "mediump"; case DataType::UINT32: case DataType::INT32: case DataType::FLOAT32: return "highp"; default: return "unknown_image_precision"; } } struct SizeParametersAdder { void operator()(size_t) const {} void operator()(const uint2& size) const { variable_accessor->AddUniformParameter( {absl::StrCat(object_name, "_w"), static_cast<int32_t>(size.x)}); } void operator()(const uint3& size) const { variable_accessor->AddUniformParameter( {absl::StrCat(object_name, "_w"), static_cast<int32_t>(size.x)}); variable_accessor->AddUniformParameter( {absl::StrCat(object_name, "_h"), static_cast<int32_t>(size.y)}); } absl::string_view object_name; VariableAccessor* variable_accessor; }; void AddSizeParameters(absl::string_view object_name, const Object& object, VariableAccessor* parameters) { std::visit(SizeParametersAdder{object_name, parameters}, object.size); } void GenerateObjectDeclaration(absl::string_view name, const Object& object, std::string* declaration, bool is_mali, bool sampler_textures) { switch (object.object_type) { case ObjectType::BUFFER: absl::StrAppend(declaration, "layout(binding = ", object.binding, ")", ToAccessModifier(object.access, !is_mali), " buffer B", object.binding, " { ", ToBufferType(object.data_type), " data[]; } ", name, ";\n"); break; case ObjectType::TEXTURE: if (sampler_textures && (object.access == AccessType::READ)) { absl::StrAppend(declaration, "layout(binding = ", object.binding, ") uniform ", ToImagePrecision(object.data_type), " ", ToImageType(object, sampler_textures), " ", name, ";\n"); } else { absl::StrAppend( declaration, "layout(", ToImageLayoutQualifier(object.data_type), ", binding = ", object.binding, ")", ToAccessModifier(object.access, true), " uniform ", ToImagePrecision(object.data_type), " ", ToImageType(object, sampler_textures), " ", name, ";\n"); } break; case ObjectType::UNKNOWN: break; } } } RewriteStatus ObjectAccessor::Rewrite(absl::string_view input, std::string* output) { std::pair<absl::string_view, absl::string_view> n = absl::StrSplit(input, absl::MaxSplits('=', 1), absl::SkipWhitespace()); if (n.first.empty()) { return RewriteStatus::NOT_RECOGNIZED; } if (n.second.empty()) { return RewriteRead(absl::StripAsciiWhitespace(n.first), output); } return RewriteWrite(absl::StripAsciiWhitespace(n.first), absl::StripAsciiWhitespace(n.second), output); } RewriteStatus ObjectAccessor::RewriteRead(absl::string_view location, std::string* output) { auto element = object_accessor_internal::ParseElement(location); if (element.object_name.empty()) { return RewriteStatus::NOT_RECOGNIZED; } auto it = name_to_object_.find( std::string(element.object_name.data(), element.object_name.size())); if (it == name_to_object_.end()) { return RewriteStatus::NOT_RECOGNIZED; } bool requires_sizes = false; auto status = GenerateReadAccessor(it->second, element, sampler_textures_, output, &requires_sizes); if (requires_sizes) { AddSizeParameters(it->first, it->second, variable_accessor_); } return status; } RewriteStatus ObjectAccessor::RewriteWrite(absl::string_view location, absl::string_view value, std::string* output) { auto element = object_accessor_internal::ParseElement(location); if (element.object_name.empty()) { return RewriteStatus::NOT_RECOGNIZED; } auto it = name_to_object_.find( std::string(element.object_name.data(), element.object_name.size())); if (it == name_to_object_.end()) { return RewriteStatus::NOT_RECOGNIZED; } bool requires_sizes = false; auto status = GenerateWriteAccessor(it->second, element, value, output, &requires_sizes); if (requires_sizes) { AddSizeParameters(it->first, it->second, variable_accessor_); } return status; } bool ObjectAccessor::AddObject(const std::string& name, Object object) { if (object.object_type == ObjectType::UNKNOWN) { return false; } return name_to_object_.insert({name, std::move(object)}).second; } std::string ObjectAccessor::GetObjectDeclarations() const { std::string declarations; for (auto& o : name_to_object_) { GenerateObjectDeclaration(o.first, o.second, &declarations, is_mali_, sampler_textures_); } return declarations; } std::string ObjectAccessor::GetFunctionsDeclarations() const { for (const auto& o : name_to_object_) { if (o.second.data_type == DataType::FLOAT16 && o.second.object_type == ObjectType::BUFFER) { return absl::StrCat( "#define Vec4FromHalf(v) vec4(unpackHalf2x16(v.x), " "unpackHalf2x16(v.y))\n", "#define Vec4ToHalf(v) uvec2(packHalf2x16(v.xy), " "packHalf2x16(v.zw))"); } } return ""; } std::vector<Object> ObjectAccessor::GetObjects() const { std::vector<Object> objects; objects.reserve(name_to_object_.size()); for (auto& o : name_to_object_) { objects.push_back(o.second); } return objects; } } } }
#include "tensorflow/lite/delegates/gpu/gl/compiler/object_accessor.h" #include <string> #include <variant> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/types/variant.h" #include "tensorflow/lite/delegates/gpu/common/types.h" #include "tensorflow/lite/delegates/gpu/gl/compiler/variable_accessor.h" #include "tensorflow/lite/delegates/gpu/gl/variable.h" namespace tflite { namespace gpu { namespace gl { struct ParameterComparator { template <typename T> bool operator()(const T& t) const { const T* v = std::get_if<T>(&p.value); return v && t == *v; } const Variable& p; }; bool operator==(const Variable& l, const Variable& r) { return l.name == r.name && std::visit(ParameterComparator{l}, r.value); } namespace { TEST(Preprocessor, CornerCases) { VariableAccessor variable_accessor(false); ObjectAccessor accessor(false, &variable_accessor); std::string result; ASSERT_EQ(accessor.Rewrite("", &result), RewriteStatus::NOT_RECOGNIZED); ASSERT_EQ(accessor.Rewrite("=", &result), RewriteStatus::NOT_RECOGNIZED); } TEST(Preprocessor, ReadFromBuffer) { VariableAccessor variable_accessor(false); ObjectAccessor accessor(false, &variable_accessor); ASSERT_TRUE( accessor.AddObject("obj", MakeReadonlyBuffer(std::vector<float>{1.0}))); std::string result; EXPECT_EQ(accessor.Rewrite("obj[i]", &result), RewriteStatus::SUCCESS); EXPECT_TRUE(variable_accessor.GetUniformParameters().empty()); ASSERT_EQ(result, "obj.data[i]"); } TEST(Preprocessor, ReadFromBufferLinear) { VariableAccessor variable_accessor(false); ObjectAccessor accessor(false, &variable_accessor); ASSERT_TRUE(accessor.AddObject( "obj", MakeReadonlyBuffer(uint3(1, 2, 3), std::vector<float>{1.0}))); std::string result; EXPECT_EQ(accessor.Rewrite("obj[i]", &result), RewriteStatus::SUCCESS); EXPECT_TRUE(variable_accessor.GetUniformParameters().empty()); ASSERT_EQ(result, "obj.data[i]"); } TEST(Preprocessor, ReadFromBufferByIndex) { VariableAccessor variable_accessor(false); ObjectAccessor accessor(false, &variable_accessor); ASSERT_TRUE(accessor.AddObject( "obj", MakeReadonlyBuffer(uint3(1, 2, 3), std::vector<float>{1.0}))); std::string result; EXPECT_EQ(accessor.Rewrite("obj[x,y + 5,z]", &result), RewriteStatus::SUCCESS); EXPECT_THAT(variable_accessor.GetUniformParameters(), testing::UnorderedElementsAre(Variable{"obj_w", 1}, Variable{"obj_h", 2})); ASSERT_EQ(result, "obj.data[x + $obj_w$ * (y + 5 + $obj_h$ * (z))]"); } TEST(Preprocessor, ReadFromTexture) { VariableAccessor variable_accessor(false); ObjectAccessor accessor(false, &variable_accessor); ASSERT_TRUE(accessor.AddObject( "obj", MakeReadonlyTexture(uint3(1, 2, 3), {1.0, 2.0, 3.0, 4.0}))); std::string result; EXPECT_EQ(accessor.Rewrite("obj[i,j,k]", &result), RewriteStatus::SUCCESS); EXPECT_TRUE(variable_accessor.GetUniformParameters().empty()); ASSERT_EQ(result, "imageLoad(obj, ivec3(i, j, k))"); } TEST(Preprocessor, ReadFromTexture1D) { VariableAccessor variable_accessor(false); ObjectAccessor accessor(false, &variable_accessor); ASSERT_TRUE( accessor.AddObject("obj", MakeReadonlyTexture({1.0, 2.0, 3.0, 4.0}))); std::string result; EXPECT_EQ(accessor.Rewrite("obj[i]", &result), RewriteStatus::SUCCESS); EXPECT_TRUE(variable_accessor.GetUniformParameters().empty()); ASSERT_EQ(result, "imageLoad(obj, ivec2(i, 0))"); } TEST(Preprocessor, WriteToBuffer) { VariableAccessor variable_accessor(false); ObjectAccessor accessor(false, &variable_accessor); ASSERT_TRUE( accessor.AddObject("obj", MakeReadonlyBuffer(std::vector<float>{1.0}))); std::string result; EXPECT_EQ(accessor.Rewrite(" obj[i] =value", &result), RewriteStatus::SUCCESS); EXPECT_TRUE(variable_accessor.GetUniformParameters().empty()); ASSERT_EQ(result, "obj.data[i] = value"); } TEST(Preprocessor, WriteToBufferByIndex) { VariableAccessor variable_accessor(false); ObjectAccessor accessor(false, &variable_accessor); ASSERT_TRUE(accessor.AddObject( "obj", MakeReadonlyBuffer(uint3(1, 2, 3), {1.0, 2.0, 3.0, 4.0}))); std::string result; EXPECT_EQ(accessor.Rewrite(" obj[i,j,k] =value", &result), RewriteStatus::SUCCESS); EXPECT_THAT(variable_accessor.GetUniformParameters(), testing::UnorderedElementsAre(Variable{"obj_w", 1}, Variable{"obj_h", 2})); ASSERT_EQ(result, "obj.data[i + $obj_w$ * (j + $obj_h$ * (k))] = value"); } TEST(Preprocessor, WriteToTexture) { VariableAccessor variable_accessor(false); ObjectAccessor accessor(false, &variable_accessor); ASSERT_TRUE(accessor.AddObject( "obj", MakeReadonlyTexture(uint3(1, 1, 1), {1.0, 2.0, 3.0, 4.0}))); std::string result; EXPECT_EQ(accessor.Rewrite("obj[i,j,k]= value ", &result), RewriteStatus::SUCCESS); ASSERT_EQ(result, "imageStore(obj, ivec3(i, j, k), value)"); } TEST(Preprocessor, WriteToTexture1D) { VariableAccessor variable_accessor(false); ObjectAccessor accessor(false, &variable_accessor); ASSERT_TRUE( accessor.AddObject("obj", MakeReadonlyTexture({1.0, 2.0, 3.0, 4.0}))); std::string result; EXPECT_EQ(accessor.Rewrite("obj[i]= value ", &result), RewriteStatus::SUCCESS); EXPECT_TRUE(variable_accessor.GetUniformParameters().empty()); ASSERT_EQ(result, "imageStore(obj, ivec2(i, 0), value)"); } TEST(Preprocessor, FailedWriteToBuffer) { VariableAccessor variable_accessor(false); ObjectAccessor accessor(false, &variable_accessor); ASSERT_TRUE( accessor.AddObject("obj", MakeReadonlyBuffer(std::vector<float>{1.0}))); std::string result; EXPECT_EQ(accessor.Rewrite(" obj[i,j] =value", &result), RewriteStatus::ERROR); ASSERT_EQ(result, "WRONG_NUMBER_OF_INDICES"); } TEST(Preprocessor, FailedWriteToTexture) { VariableAccessor variable_accessor(false); ObjectAccessor accessor(false, &variable_accessor); ASSERT_TRUE(accessor.AddObject( "obj", MakeReadonlyTexture(uint3(1, 1, 1), {1.0, 2.0, 3.0, 4.0}))); std::string result; EXPECT_EQ(accessor.Rewrite("obj[i]= value ", &result), RewriteStatus::ERROR); ASSERT_EQ(result, "WRONG_NUMBER_OF_INDICES"); } TEST(Preprocessor, DeclareTexture) { VariableAccessor variable_accessor(false); ObjectAccessor accessor(false, &variable_accessor); ASSERT_TRUE(accessor.AddObject( "obj", MakeReadonlyTexture(uint3(1, 1, 1), {1.0, 2.0, 3.0, 4.0}))); ASSERT_EQ(accessor.GetObjectDeclarations(), "layout(rgba32f, binding = 0) readonly uniform highp image2DArray " "obj;\n"); } TEST(Preprocessor, DeclareBuffer) { VariableAccessor variable_accessor(false); ObjectAccessor accessor(true, &variable_accessor); ASSERT_TRUE( accessor.AddObject("obj", MakeReadonlyBuffer(std::vector<float>{1.0}))); ASSERT_EQ(accessor.GetObjectDeclarations(), "layout(binding = 0) buffer B0 { vec4 data[]; } obj;\n"); } } } } }
1,039
cpp
tensorflow/tensorflow
variable_accessor
tensorflow/lite/delegates/gpu/gl/compiler/variable_accessor.cc
tensorflow/lite/delegates/gpu/gl/compiler/variable_accessor_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_COMPILER_VARIABLE_ACCESSOR_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_GL_COMPILER_VARIABLE_ACCESSOR_H_ #include <set> #include <string> #include <vector> #include "absl/container/flat_hash_map.h" #include "tensorflow/lite/delegates/gpu/gl/compiler/preprocessor.h" #include "tensorflow/lite/delegates/gpu/gl/variable.h" namespace tflite { namespace gpu { namespace gl { class VariableAccessor : public InlineRewrite { public: explicit VariableAccessor(bool inline_values, bool vulkan_support = false) : inline_values_(inline_values), vulkan_support_(vulkan_support) {} RewriteStatus Rewrite(absl::string_view input, std::string* output) final; bool AddSharedVariable(Variable&& variable); bool AddUniformParameter(Variable&& variable); bool IsEmptyVariableLength(const Variable& variable) const; std::string GetConstDeclarations() const; std::string GetSharedVariableDeclarations() const; std::string GetUniformParameterDeclarations() const; std::vector<Variable> GetUniformParameters() const; private: const bool inline_values_; const bool vulkan_support_; absl::flat_hash_map<std::string, Variable> name_to_variable_; std::set<std::string> shared_variables_; std::set<std::string> uniform_parameters_; }; namespace variable_accessor_internal { struct VariableReference { absl::string_view name; absl::string_view index; absl::string_view field; }; VariableReference Parse(absl::string_view input); } } } } #endif #include "tensorflow/lite/delegates/gpu/gl/compiler/variable_accessor.h" #include <string> #include <utility> #include <variant> #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" #include "absl/strings/str_join.h" #include "absl/types/variant.h" #include "tensorflow/lite/delegates/gpu/common/types.h" namespace tflite { namespace gpu { namespace gl { namespace variable_accessor_internal { VariableReference Parse(absl::string_view input) { VariableReference ref; auto start_index = input.find('['); if (start_index != std::string::npos) { auto end_index = input.rfind(']'); if (end_index == std::string::npos) { return ref; } ref.index = input.substr(start_index + 1, end_index - start_index - 1); ref.name = input.substr(0, start_index); ref.field = input.substr(end_index + 1); } else { auto dot = input.find('.'); if (dot != std::string::npos) { ref.name = input.substr(0, dot); ref.field = input.substr(dot); } else { ref.name = input; } } return ref; } } namespace { struct VariableTypeGetter { std::string operator()(int) const { return "int"; } std::string operator()(const int2&) const { return "ivec2"; } std::string operator()(const std::vector<int2>&) const { return "ivec2"; } std::string operator()(const int4&) const { return "ivec4"; } std::string operator()(unsigned int) const { return "uint"; } std::string operator()(const uint4&) const { return "uvec4"; } std::string operator()(float) const { return "float"; } std::string operator()(const float2&) const { return "vec2"; } std::string operator()(const float4&) const { return "vec4"; } std::string operator()(const std::vector<float4>&) const { return "vec4"; } }; std::string GetVariableType(const Variable::ValueType& value) { return std::visit(VariableTypeGetter(), value); } struct LengthGetter { template <typename T> int operator()(const T& param) const { return 1; } template <typename T> int operator()(const std::vector<T>& param) const { return param.size(); } }; int GetLength(const Variable::ValueType& value) { return std::visit(LengthGetter(), value); } template <typename T> void FormatValue(std::string* result, T t) { absl::StrAppend(result, t); } template <> void FormatValue(std::string* result, float t) { absl::StrAppend(result, absl::StrFormat("%.9ff", t)); } template <typename T, int N> std::vector<std::string> ToString(const std::array<T, N>& data) { std::vector<std::string> result(N); for (int i = 0; i < N; ++i) { FormatValue(&result[i], data[i]); } return result; } struct ConstGenerator { template <typename T> void operator()(T t) const { FormatValue(result, t); } template <typename T> void operator()(const Vec2<T>& v) const { absl::StrAppend(result, VariableTypeGetter()(v), "(", absl::StrJoin(ToString<T, 2>(v.data_), ","), ")"); } template <typename T> void operator()(const Vec3<T>& v) const { absl::StrAppend(result, VariableTypeGetter()(v), "(", absl::StrJoin(ToString<T, 3>(v.data_), ","), ")"); } template <typename T> void operator()(const Vec4<T>& v) const { absl::StrAppend(result, VariableTypeGetter()(v), "(", absl::StrJoin(ToString<T, 4>(v.data_), ","), ")"); } template <typename T> void operator()(const std::vector<T>& v) const { std::string type = VariableTypeGetter()(v); absl::StrAppend(result, type, "[", v.size(), "]("); bool first = true; for (const auto& i : v) { if (first) { first = false; } else { absl::StrAppend(result, ","); } (*this)(i); } absl::StrAppend(result, ")"); } std::string* result; }; void GetValue(const Variable::ValueType& value, std::string* result) { std::visit(ConstGenerator{result}, value); } struct SharedVariableDeclarationGenerator { template <typename T> void operator()(const T&) const { absl::StrAppend(result, "shared highp ", GetVariableType(variable.value), " ", variable.name, ";\n"); } template <typename T> void operator()(const std::vector<T>& v) const { absl::StrAppend(result, "shared highp ", GetVariableType(variable.value), " ", variable.name); if (v.empty()) { absl::StrAppend( result, "[gl_WorkGroupSize.z * gl_WorkGroupSize.y * gl_WorkGroupSize.x];\n"); } else { absl::StrAppend(result, "[", v.size(), "];\n"); } } const Variable& variable; std::string* result; }; void GenerateSharedVariableDeclaration(const Variable& variable, std::string* result) { std::visit(SharedVariableDeclarationGenerator{variable, result}, variable.value); } struct UniformParameterDeclarationGenerator { template <typename T> void operator()(const T&) const { absl::StrAppend(result, "uniform ", GetVariableType(variable.value), " ", variable.name, ";\n"); } template <typename T> void operator()(const std::vector<T>& v) const { absl::StrAppend(result, "uniform ", GetVariableType(variable.value), " ", variable.name, "[", v.size(), "];\n"); } const Variable& variable; std::string* result; }; void GenerateUniformParameterDeclaration(const Variable& variable, std::string* result) { std::visit(UniformParameterDeclarationGenerator{variable, result}, variable.value); } struct VulkanPushConstantGenerator { template <typename T> void operator()(const T&) const { absl::StrAppend(result, " ", GetVariableType(variable.value), " ", variable.name, ";\n"); } template <typename T> void operator()(const std::vector<T>& v) const { absl::StrAppend(result, " ", GetVariableType(variable.value), " ", variable.name, "[", v.size(), "];\n"); } const Variable& variable; std::string* result; }; void GenerateVulkanPushConstant(const Variable& variable, std::string* result) { std::visit(VulkanPushConstantGenerator{variable, result}, variable.value); } struct VariableLengthGetter { template <typename T> bool operator()(const T&) const { return false; } template <typename T> bool operator()(const std::vector<T>&) const { return true; } }; struct VulkanConstantGenerator { template <typename T> void operator()(const T&) const { const std::string variable_type = GetVariableType(variable.value); if (variable_type == "int" || variable_type == "uint" || variable_type == "float") { absl::StrAppend(result, "layout(constant_id = ", *constant_id, ") const ", variable_type, " ", variable.name, " = "); absl::StrAppend(result, (variable_type == "float" ? "0.0" : "0"), ";\n"); (*constant_id)++; } else { non_scalar_variables->push_back(variable); } } template <typename T> void operator()(const std::vector<T>& v) const { non_scalar_variables->push_back(variable); } const Variable& variable; int* const constant_id; std::vector<Variable>* non_scalar_variables; std::string* result; }; void GenerateVulkanConstant(const Variable& variable, int* constant_id, std::vector<Variable>* non_scalar_variables, std::string* result) { std::visit(VulkanConstantGenerator{variable, constant_id, non_scalar_variables, result}, variable.value); } class VulkanConstantsProcessor { public: void ProcessVulkanConstant(const Variable& variable, std::string* result) { GenerateVulkanConstant(variable, &constant_id_, &non_scalar_variables_, result); } void GeneratePushConstantsDeclarations(std::string* result) { if (!non_scalar_variables_.empty()) { *result += "\nlayout(push_constant) uniform pushConstants {\n"; for (const auto& variable : non_scalar_variables_) { GenerateVulkanPushConstant(variable, result); } *result += "};\n"; } } protected: int constant_id_ = 3; std::vector<Variable> non_scalar_variables_; }; bool IsVariableLength(const Variable::ValueType& value) { return std::visit(VariableLengthGetter(), value); } enum Field : uint8_t { UNKNOWN = 4, X = 0, Y = 1, Z = 2, W = 3 }; Field ToField(absl::string_view field_name) { if (field_name.size() == 2 && field_name[0] == '.') { switch (field_name[1]) { case 'x': return Field::X; case 'y': return Field::Y; case 'z': return Field::Z; case 'w': return Field::W; } } return Field::UNKNOWN; } struct FieldAccessor { template <typename T> void operator()(const T&) const {} template <typename T> void operator()(const Vec2<T>& v) const { FormatValue(result, v[field]); } template <typename T> void operator()(const Vec3<T>& v) const { FormatValue(result, v[field]); } template <typename T> void operator()(const Vec4<T>& v) const { FormatValue(result, v[field]); } Field field; std::string* result; }; void GetValue(const Variable::ValueType& value, Field field, std::string* result) { std::visit(FieldAccessor{field, result}, value); } struct FieldChecker { template <typename T> bool operator()(const T&) const { return false; } template <typename T> bool operator()(const Vec2<T>& v) const { return field < v.size(); } template <typename T> bool operator()(const Vec3<T>& v) const { return field < v.size(); } template <typename T> bool operator()(const Vec4<T>& v) const { return field < v.size(); } template <typename T> bool operator()(const std::vector<T>&) const { T t; return (*this)(t); } Field field; }; bool HasField(const Variable::ValueType& value, Field field) { return std::visit(FieldChecker{field}, value); } void AssembleAccessor(absl::string_view name, absl::string_view index, absl::string_view field, std::string* result) { if (index.empty()) { absl::StrAppend(result, name, field); } else { absl::StrAppend(result, name, "[", index, "]", field); } } } RewriteStatus VariableAccessor::Rewrite(absl::string_view input, std::string* output) { auto ref = variable_accessor_internal::Parse(input); if (ref.name.empty()) { absl::StrAppend(output, "INVALID_SYNTAX"); return RewriteStatus::ERROR; } auto it = name_to_variable_.find(std::string(ref.name.data(), ref.name.size())); if (it == name_to_variable_.end()) { return RewriteStatus::NOT_RECOGNIZED; } const auto& value = it->second.value; if (!ref.index.empty() && !IsVariableLength(value)) { absl::StrAppend(output, "INVALID_ACCESS_BY_INDEX"); return RewriteStatus::ERROR; } Field f = ToField(ref.field); if (!ref.field.empty() && !HasField(value, f)) { absl::StrAppend(output, "INVALID_ACCESS_BY_FIELD"); return RewriteStatus::ERROR; } if (!inline_values_ || IsVariableLength(value)) { AssembleAccessor(it->second.name, ref.index, ref.field, output); } else { if (f != Field::UNKNOWN) { GetValue(value, f, output); } else { GetValue(value, output); } } return RewriteStatus::SUCCESS; } bool VariableAccessor::AddSharedVariable(Variable&& variable) { const std::string name = variable.name; if (!name_to_variable_.insert({name, std::move(variable)}).second) { return false; } shared_variables_.insert(name); return true; } bool VariableAccessor::AddUniformParameter(Variable&& variable) { const std::string name = variable.name; if (!name_to_variable_.insert({name, std::move(variable)}).second) { return false; } uniform_parameters_.insert(name); return true; } bool VariableAccessor::IsEmptyVariableLength(const Variable& variable) const { const auto& value = variable.value; return IsVariableLength(value) && GetLength(value) == 0; } std::string VariableAccessor::GetConstDeclarations() const { std::string declarations; for (const auto& variable : name_to_variable_) { const std::string& variable_name = variable.second.name; if (shared_variables_.find(variable_name) != shared_variables_.end()) { continue; } const auto& value = variable.second.value; if (IsVariableLength(value)) { absl::StrAppend(&declarations, "const ", GetVariableType(value), " ", variable_name, "[] = "); GetValue(value, &declarations); absl::StrAppend(&declarations, ";\n"); } } return declarations; } std::string VariableAccessor::GetSharedVariableDeclarations() const { std::string declarations; for (const auto& name : shared_variables_) { const auto& variable = name_to_variable_.at(name); GenerateSharedVariableDeclaration(variable, &declarations); } return declarations; } std::string VariableAccessor::GetUniformParameterDeclarations() const { std::string declarations; if (!inline_values_) { if (vulkan_support_) { VulkanConstantsProcessor processor; for (const auto& name : uniform_parameters_) { const auto& variable = name_to_variable_.at(name); processor.ProcessVulkanConstant(variable, &declarations); } processor.GeneratePushConstantsDeclarations(&declarations); } else { for (const auto& name : uniform_parameters_) { const auto& variable = name_to_variable_.at(name); GenerateUniformParameterDeclaration(variable, &declarations); } } } return declarations; } std::vector<Variable> VariableAccessor::GetUniformParameters() const { std::vector<Variable> variables; if (!inline_values_) { variables.reserve(name_to_variable_.size()); for (const auto& name : uniform_parameters_) { const auto& variable = name_to_variable_.at(name); variables.push_back(variable); } } return variables; } } } }
#include "tensorflow/lite/delegates/gpu/gl/compiler/variable_accessor.h" #include <string> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/lite/delegates/gpu/common/types.h" namespace tflite { namespace gpu { namespace gl { namespace { TEST(PreprocessorTest, CornerCases) { VariableAccessor variable_accessor(true); std::string result; EXPECT_EQ(variable_accessor.Rewrite("unknown", &result), RewriteStatus::NOT_RECOGNIZED); } TEST(PreprocessorTest, Value) { VariableAccessor variable_accessor(true); ASSERT_TRUE(variable_accessor.AddUniformParameter({"var", int32_t(1)})); std::string result; ASSERT_EQ(variable_accessor.Rewrite("var", &result), RewriteStatus::SUCCESS); EXPECT_EQ(result, "1"); } TEST(PreprocessorTest, ValueVec) { VariableAccessor variable_accessor(true); ASSERT_TRUE(variable_accessor.AddUniformParameter({"var", int2(1, 2)})); std::string result; ASSERT_EQ(variable_accessor.Rewrite("var", &result), RewriteStatus::SUCCESS); EXPECT_EQ(result, "ivec2(1,2)"); } TEST(PreprocessorTest, Field) { VariableAccessor variable_accessor(true); ASSERT_TRUE( variable_accessor.AddUniformParameter({"var", float2(1.0, 2.1234567)})); std::string result; ASSERT_EQ(variable_accessor.Rewrite("var.y", &result), RewriteStatus::SUCCESS); EXPECT_EQ(result, "2.123456717f"); } TEST(PreprocessorTest, FieldFail) { VariableAccessor variable_accessor(true); ASSERT_TRUE(variable_accessor.AddUniformParameter({"var", 1.0f})); ASSERT_TRUE(variable_accessor.AddUniformParameter({"vec", float2(1.0, 1.0)})); std::string result; ASSERT_EQ(variable_accessor.Rewrite("var.y", &result), RewriteStatus::ERROR); EXPECT_EQ(result, "INVALID_ACCESS_BY_FIELD"); result.clear(); ASSERT_EQ(variable_accessor.Rewrite("vec.z", &result), RewriteStatus::ERROR); EXPECT_EQ(result, "INVALID_ACCESS_BY_FIELD"); } TEST(PreprocessorTest, Variable) { VariableAccessor variable_accessor(true); std::vector<int2> v; v.push_back(int2(1, 2)); ASSERT_TRUE(variable_accessor.AddUniformParameter({"var", v})); std::string result; ASSERT_EQ(variable_accessor.Rewrite("var[i].y", &result), RewriteStatus::SUCCESS); ASSERT_EQ(result, "var[i].y"); EXPECT_EQ(variable_accessor.GetConstDeclarations(), "const ivec2 var[] = ivec2[1](ivec2(1,2));\n"); } TEST(PreprocessorTest, InlineVariableFail) { VariableAccessor variable_accessor(true); ASSERT_TRUE(variable_accessor.AddUniformParameter({"var", 1})); std::string result; ASSERT_EQ(variable_accessor.Rewrite("var[i]", &result), RewriteStatus::ERROR); EXPECT_EQ(result, "INVALID_ACCESS_BY_INDEX"); } } } } }
1,040
cpp
tensorflow/tensorflow
fuse_auto_input
tensorflow/lite/delegates/gpu/gl/compiler/fuse_auto_input.cc
tensorflow/lite/delegates/gpu/gl/compiler/fuse_auto_input_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_COMPILER_FUSE_AUTO_INPUT_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_GL_COMPILER_FUSE_AUTO_INPUT_H_ #include "tensorflow/lite/delegates/gpu/common/model.h" #include "tensorflow/lite/delegates/gpu/common/model_transformer.h" namespace tflite { namespace gpu { namespace gl { class FuseAutoInput : public NodeTransformation { public: TransformResult ApplyToNode(Node* node, GraphFloat32* graph) final; }; } } } #endif #include "tensorflow/lite/delegates/gpu/gl/compiler/fuse_auto_input.h" #include <any> #include <string> #include <vector> #include "absl/container/flat_hash_set.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_replace.h" #include "absl/types/any.h" #include "absl/types/variant.h" #include "tensorflow/lite/delegates/gpu/common/model.h" #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/common/types.h" #include "tensorflow/lite/delegates/gpu/gl/compiler/compiled_node.h" namespace tflite { namespace gpu { namespace gl { namespace { std::pair<std::string, std::string> MakeValueReplacement(int n, int k) { return {absl::StrCat("value_", n), absl::StrCat("value_", k)}; } std::pair<std::string, std::string> MakeDataReplacement(int n, int k) { return {absl::StrCat("input_data_", n), absl::StrCat("input_data_", k)}; } } TransformResult FuseAutoInput::ApplyToNode(Node* node, GraphFloat32* graph) { auto& node_attr = std::any_cast<CompiledNodeAttributes&>(node->operation.attributes); auto& node_code = node_attr.code; if (node_code.input != IOStructure::AUTO) { return {TransformStatus::SKIPPED, ""}; } uint3 workgroup = node_code.workgroup; auto node_outputs = graph->FindOutputs(node->id); std::vector<std::pair<Node*, int>> nodes_to_fuse; std::vector<std::pair<ValueId, int>> input_values; int input_num = -1; for (auto input_value : graph->FindInputs(node->id)) { input_num++; const ValueId input_id = input_value->id; input_values.push_back({input_id, input_num}); if (graph->FindConsumers(input_id).size() > 1) { continue; } Node* input_producer = graph->FindProducer(input_id); if (input_producer == nullptr) { continue; } if (graph->FindOutputs(input_producer->id).size() != 1) { continue; } auto& input_producer_attr = std::any_cast<const CompiledNodeAttributes&>( input_producer->operation.attributes); if (input_producer_attr.code.output != IOStructure::AUTO) { continue; } if (input_producer_attr.code.workload != node_code.workload && uint3() != input_producer_attr.code.workload) { continue; } if (input_producer_attr.code.workgroup != uint3()) { if (workgroup != uint3()) { continue; } workgroup = input_producer_attr.code.workgroup; } nodes_to_fuse.push_back({input_producer, input_num}); input_values.pop_back(); } if (nodes_to_fuse.empty()) { return {TransformStatus::SKIPPED, ""}; } { absl::flat_hash_set<ValueId> all_inputs; for (const auto& node_to_fuse : nodes_to_fuse) { for (const auto& input : graph->FindInputs(node_to_fuse.first->id)) { if (all_inputs.find(input->id) != all_inputs.end()) { return {TransformStatus::SKIPPED, ""}; } all_inputs.insert(input->id); } } for (const auto& input : graph->FindInputs(node->id)) { if (all_inputs.find(input->id) != all_inputs.end()) { return {TransformStatus::SKIPPED, ""}; } all_inputs.insert(input->id); } } for (auto value : graph->FindInputs(node->id)) { if (!graph->RemoveConsumer(node->id, value->id).ok()) { return {TransformStatus::INVALID, ""}; } } std::string operation_type; std::string source_code; std::string values; std::swap(source_code, node_code.source_code); int extra_input_num = input_num; input_num = 0; for (auto input_and_num : nodes_to_fuse) { auto& input = input_and_num.first; auto& attr = std::any_cast<CompiledNodeAttributes&>(input->operation.attributes); auto super_inputs = graph->FindInputs(input->id); std::vector<std::pair<std::string, std::string>> replacements; for (int i = 0; i < super_inputs.size(); ++i) { int value_index = i == 0 ? input_and_num.second : ++extra_input_num; replacements.push_back(MakeValueReplacement(i, value_index)); replacements.push_back(MakeDataReplacement(i, input_num)); if (attr.code.input == IOStructure::AUTO) { absl::StrAppend(&values, " value_", value_index, " = $input_data_", input_num, "[gid.x, gid.y, gid.z]$;\n"); } if (!graph->AddConsumer(node->id, super_inputs[i]->id).ok()) { return {TransformStatus::INVALID, ""}; } input_num++; } for (auto& param : attr.code.parameters) { param.name = absl::StrReplaceAll(param.name, replacements); } attr.code.source_code = absl::StrReplaceAll(attr.code.source_code, replacements); if (!MergeCode(&attr, &node_attr).ok()) { return {TransformStatus::INVALID, "Unable to merge the code"}; } absl::StrAppend(&node_attr.code.source_code, "{\n", attr.code.source_code, "\n}"); if (!operation_type.empty()) { operation_type += ","; } operation_type += input->operation.type; if (!graph->DeleteNode(input->id).ok()) { return {TransformStatus::INVALID, ""}; } } for (int i = 0; i < input_values.size(); i++) { if (node_code.input == IOStructure::AUTO) { absl::StrAppend(&values, " value_", input_values[i].second, " = $input_data_", input_num, "[gid.x, gid.y, gid.z]$;\n"); } if (!graph->AddConsumer(node->id, input_values[i].first).ok()) { return {TransformStatus::INVALID, ""}; } input_num++; } node_code.input = IOStructure::ONLY_DEFINITIONS; absl::StrAppend(&node->operation.type, "(", operation_type, ")"); node_code.source_code = absl::StrCat(values, node_code.source_code, "{ node->operation.type, "\n", source_code, "\n}"); return {TransformStatus::APPLIED, ""}; } } } }
#include "tensorflow/lite/delegates/gpu/gl/compiler/fuse_auto_input.h" #include <utility> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/types/any.h" #include "tensorflow/lite/delegates/gpu/gl/compiler/compiled_node.h" namespace tflite { namespace gpu { namespace gl { namespace { TEST(FuseAutoInputTest, SkipsDiamond) { GraphFloat32 graph; auto* v0 = graph.NewValue(); auto* v1 = graph.NewValue(); auto* v2 = graph.NewValue(); auto* v3 = graph.NewValue(); auto* n1 = graph.NewNode(); CompiledNodeAttributes a1; a1.code.output = IOStructure::AUTO; n1->operation.attributes = std::move(a1); ASSERT_OK(graph.AddConsumer(n1->id, v0->id)); ASSERT_OK(graph.SetProducer(n1->id, v1->id)); auto* n2 = graph.NewNode(); CompiledNodeAttributes a2; a2.code.output = IOStructure::AUTO; n2->operation.attributes = std::move(a2); ASSERT_OK(graph.AddConsumer(n2->id, v0->id)); ASSERT_OK(graph.SetProducer(n2->id, v2->id)); auto* n3 = graph.NewNode(); CompiledNodeAttributes a3; a3.code.input = IOStructure::AUTO; n3->operation.attributes = std::move(a3); ASSERT_OK(graph.AddConsumer(n3->id, v1->id)); ASSERT_OK(graph.AddConsumer(n3->id, v2->id)); ASSERT_OK(graph.SetProducer(n3->id, v3->id)); FuseAutoInput fuse_auto_input; EXPECT_EQ(fuse_auto_input.ApplyToNode(n3, &graph).status, TransformStatus::SKIPPED); } TEST(FuseAutoInputTest, SkipsTriangle) { GraphFloat32 graph; auto* v0 = graph.NewValue(); auto* v1 = graph.NewValue(); auto* v2 = graph.NewValue(); auto* n1 = graph.NewNode(); CompiledNodeAttributes a1; a1.code.output = IOStructure::AUTO; n1->operation.attributes = std::move(a1); ASSERT_OK(graph.AddConsumer(n1->id, v0->id)); ASSERT_OK(graph.SetProducer(n1->id, v1->id)); auto* n2 = graph.NewNode(); CompiledNodeAttributes a2; a2.code.input = IOStructure::AUTO; n2->operation.attributes = std::move(a2); ASSERT_OK(graph.AddConsumer(n2->id, v0->id)); ASSERT_OK(graph.AddConsumer(n2->id, v1->id)); ASSERT_OK(graph.SetProducer(n2->id, v2->id)); FuseAutoInput fuse_auto_input; EXPECT_EQ(fuse_auto_input.ApplyToNode(n2, &graph).status, TransformStatus::SKIPPED); } } } } }
1,041
cpp
tensorflow/tensorflow
preprocessor
tensorflow/lite/delegates/gpu/gl/compiler/preprocessor.cc
tensorflow/lite/delegates/gpu/gl/compiler/preprocessor_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_COMPILER_PREPROCESSOR_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_GL_COMPILER_PREPROCESSOR_H_ #include <memory> #include <string> #include <vector> #include "absl/strings/string_view.h" #include "tensorflow/lite/delegates/gpu/common/status.h" namespace tflite { namespace gpu { namespace gl { enum class RewriteStatus { SUCCESS = 0, NOT_RECOGNIZED = 1, ERROR = 2, }; class InlineRewrite { public: virtual ~InlineRewrite() = default; virtual RewriteStatus Rewrite(absl::string_view input, std::string* output) = 0; }; class TextPreprocessor { public: TextPreprocessor(char inline_delimiter, bool keep_unknown_rewrites) : inline_delimiter_(inline_delimiter), keep_unknown_rewrites_(keep_unknown_rewrites) {} void AddRewrite(InlineRewrite* rewrite) { inline_rewrites_.push_back(rewrite); } absl::Status Rewrite(const std::string& input, std::string* output); private: const char inline_delimiter_; const bool keep_unknown_rewrites_; std::vector<InlineRewrite*> inline_rewrites_; }; } } } #endif #include "tensorflow/lite/delegates/gpu/gl/compiler/preprocessor.h" #include <string> #include <utility> #include "absl/strings/str_cat.h" #include "tensorflow/lite/delegates/gpu/common/status.h" namespace tflite { namespace gpu { namespace gl { namespace { absl::string_view FindInlineBlock(absl::string_view s, char delimiter) { size_t start = s.find(delimiter); if (start != absl::string_view::npos) { size_t end = s.find(delimiter, start + 1); if (end != std::string::npos) { return s.substr(start, end - start + 1); } return s.substr(start, 1); } return s.substr(s.size(), 0); } absl::string_view PastSubstr(absl::string_view s, absl::string_view subs) { return s.substr(subs.data() + subs.size() - s.data()); } } absl::Status TextPreprocessor::Rewrite(const std::string& input, std::string* output) { absl::string_view s = input; std::string result; while (true) { absl::string_view inline_block = FindInlineBlock(s, inline_delimiter_); result.append(s.data(), inline_block.data() - s.data()); if (inline_block.empty()) { break; } if (inline_block.size() == 1) { return absl::NotFoundError("Unable to find end of inline block"); } s = PastSubstr(s, inline_block); bool processed = false; for (auto& rewrite : inline_rewrites_) { if (processed) { break; } switch (rewrite->Rewrite(inline_block.substr(1, inline_block.size() - 2), &result)) { case RewriteStatus::NOT_RECOGNIZED: break; case RewriteStatus::SUCCESS: processed = true; break; case RewriteStatus::ERROR: return absl::InternalError(absl::StrCat("Error while rewriting '", inline_block, "': ", result)); } } if (!processed) { if (!keep_unknown_rewrites_) { return absl::NotFoundError(absl::StrCat( "Didn't find inline rewrite for '", inline_block, "'")); } absl::StrAppend(&result, inline_block); } } *output = std::move(result); return absl::OkStatus(); } } } }
#include "tensorflow/lite/delegates/gpu/gl/compiler/preprocessor.h" #include <string> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> namespace tflite { namespace gpu { namespace gl { namespace { class AccuInlineRewrite : public InlineRewrite { public: explicit AccuInlineRewrite(std::vector<std::string>* blocks) : blocks_(blocks) {} RewriteStatus Rewrite(absl::string_view input, std::string* output) final { blocks_->push_back(std::string(input.data(), input.size())); output->append("r:"); output->append(input.data(), input.size()); return RewriteStatus::SUCCESS; } std::vector<std::string>* blocks_; }; std::vector<std::string> ParseInlines(const std::string& text) { std::vector<std::string> blocks; TextPreprocessor preprocessor('$', false); AccuInlineRewrite rewrite(&blocks); preprocessor.AddRewrite(&rewrite); std::string discard; preprocessor.Rewrite(text, &discard).IgnoreError(); return blocks; } TEST(Preprocessor, CornerCases) { EXPECT_THAT(ParseInlines(""), testing::ElementsAre()); EXPECT_THAT(ParseInlines("text text"), testing::ElementsAre()); EXPECT_THAT(ParseInlines("$$"), testing::ElementsAre("")); } TEST(Preprocessor, One) { EXPECT_THAT(ParseInlines("$text$"), testing::ElementsAre("text")); EXPECT_THAT(ParseInlines(" $text$ "), testing::ElementsAre("text")); } TEST(Preprocessor, More) { EXPECT_THAT(ParseInlines("Test $inline1$\n$inline2$ test $inline3$ "), testing::ElementsAre("inline1", "inline2", "inline3")); } std::string RewriteInlines(const std::string& text) { std::vector<std::string> blocks; TextPreprocessor preprocessor('$', false); AccuInlineRewrite rewrite(&blocks); preprocessor.AddRewrite(&rewrite); std::string out; preprocessor.Rewrite(text, &out).IgnoreError(); return out; } TEST(Preprocessor, RewriteCornerCases) { EXPECT_EQ(RewriteInlines(""), ""); EXPECT_EQ(RewriteInlines("text text"), "text text"); EXPECT_EQ(RewriteInlines("$$"), "r:"); } TEST(Preprocessor, RewriteOne) { EXPECT_EQ(RewriteInlines("$text$"), "r:text"); EXPECT_EQ(RewriteInlines(" $text$ "), " r:text "); } TEST(Preprocessor, RewriteMore) { EXPECT_EQ(RewriteInlines("Test $inline1$\n$inline2$ test $inline3$ "), "Test r:inline1\nr:inline2 test r:inline3 "); } class SingleRewrite : public InlineRewrite { public: RewriteStatus Rewrite(absl::string_view input, std::string* output) final { if (input == "foo") { output->append("bla"); return RewriteStatus::SUCCESS; } return RewriteStatus::NOT_RECOGNIZED; } std::vector<std::string>* blocks_; }; TEST(Preprocessor, KeepUnknownRewrites) { TextPreprocessor preprocessor('$', true); SingleRewrite rewrite; preprocessor.AddRewrite(&rewrite); std::string out; ASSERT_TRUE(preprocessor.Rewrite("Good morning, $name$! $foo$", &out).ok()); EXPECT_EQ("Good morning, $name$! bla", out); } TEST(Preprocessor, KeepUnknownRewrites_Fail) { TextPreprocessor preprocessor('$', false); SingleRewrite rewrite; preprocessor.AddRewrite(&rewrite); std::string out; EXPECT_FALSE(preprocessor.Rewrite("Good morning, $name$! $foo$", &out).ok()); } } } } }
1,042
cpp
tensorflow/tensorflow
mean
tensorflow/lite/delegates/gpu/gl/kernels/mean.cc
tensorflow/lite/delegates/gpu/gl/kernels/mean_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_MEAN_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_MEAN_H_ #include <memory> #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/gl/node_shader.h" namespace tflite { namespace gpu { namespace gl { std::unique_ptr<NodeShader> NewMeanNodeShader(); } } } #endif #include "tensorflow/lite/delegates/gpu/gl/kernels/mean.h" #include <algorithm> #include <any> #include <cstdint> #include <cstring> #include <memory> #include <string> #include <utility> #include <vector> #include "absl/memory/memory.h" #include "absl/status/status.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/types.h" #include "tensorflow/lite/delegates/gpu/common/util.h" namespace tflite { namespace gpu { namespace gl { namespace { bool UseSubgroupBasedImpl(const GpuInfo& gpu_info) { return gpu_info.IsApiVulkan() && (gpu_info.vulkan_info.api_version_major > 1 || gpu_info.vulkan_info.api_version_minor >= 1) && gpu_info.vulkan_info.subgroup_size >= 32 && gpu_info.vulkan_info.supports_subgroup_arithmetic; } void GenerateSubgroupBasedMean(const NodeShader::GenerationContext& ctx, GeneratedCode* generated_code) { int height = ctx.input_shapes[0][1]; int width = ctx.input_shapes[0][2]; int depth = ctx.input_shapes[0][3]; std::vector<Variable> parameters = { {"input_data_0_h", height}, {"input_data_0_w", width}, {"output_data_0_h", 1}, {"output_data_0_w", 1}, }; std::string source = R"( const uint columns_per_invocation = ($input_data_0_w$ + (gl_WorkGroupSize.x - 1))/gl_WorkGroupSize.x; const uint rows_per_invocation = ($input_data_0_h$ + (gl_WorkGroupSize.y - 1))/gl_WorkGroupSize.y; const uint first_row = gl_GlobalInvocationID.y*rows_per_invocation; const uint first_col = gl_GlobalInvocationID.x*columns_per_invocation; const uint last_row_exclusive = min(first_row+rows_per_invocation, $input_data_0_h$); const uint last_column_exclusive = min(first_col+columns_per_invocation, $input_data_0_w$); vec4 value = vec4(0); for (uint h = first_row; h < last_row_exclusive; ++h) { for (uint w = first_col; w < last_column_exclusive; ++w) { value += $input_data_0[w, h, gid.z]$; } } highp vec4 subgroup_sum = subgroupAdd(value); if(subgroupElect()) { subgroup_sums[gl_SubgroupID] = subgroup_sum; } memoryBarrierShared(); barrier(); if(gl_SubgroupID == 0) { highp vec4 subtotal = vec4(0); if (gl_SubgroupInvocationID < gl_NumSubgroups) { subtotal = subgroup_sums[gl_SubgroupInvocationID]; } highp vec4 grand_total = subgroupAdd(subtotal); if(subgroupElect()) { highp vec4 result = grand_total / $input_data_0_w$ / $input_data_0_h$; $output_data_0[0, 0, gid.z] = result$; } } )"; const uint32_t subgroup_size = ctx.gpu_info->vulkan_info.subgroup_size; const uint32_t max_wg_size_x = ctx.gpu_info->GetMaxWorkGroupSizeForX(); const uint32_t max_wg_size_y = ctx.gpu_info->GetMaxWorkGroupSizeForY(); const uint32_t max_wg_size = std::min(static_cast<uint32_t>(ctx.gpu_info->GetMaxWorkGroupTotalSize()), subgroup_size * subgroup_size); const uint32_t max_number_of_subgroups = max_wg_size / subgroup_size; uint32_t wg_size_x = 0; uint32_t wg_size_y = 0; if (width * height <= max_wg_size && width <= max_wg_size_x && height <= max_wg_size_y) { wg_size_x = width; wg_size_y = height; } else { wg_size_x = std::min({static_cast<uint32_t>(std::sqrt(max_wg_size)), max_wg_size_x, static_cast<uint32_t>(width)}); wg_size_y = std::min({max_wg_size / wg_size_x, max_wg_size_y, static_cast<uint32_t>(height)}); } std::vector<Variable> shared_variables = { {"subgroup_sums", std::vector<float4>(max_number_of_subgroups)}, }; *generated_code = { std::move(parameters), {}, {std::move(shared_variables)}, uint3(wg_size_x, wg_size_y, uint32_t(DivideRoundUp(depth, 4))), uint3(wg_size_x, wg_size_y, 1u), std::move(source), IOStructure::ONLY_DEFINITIONS, IOStructure::ONLY_DEFINITIONS, }; } void GenerateTrivialMean(const NodeShader::GenerationContext& ctx, GeneratedCode* generated_code) { std::vector<Variable> parameters = { {"input_data_0_h", static_cast<int>(ctx.input_shapes[0][1])}, {"input_data_0_w", static_cast<int>(ctx.input_shapes[0][2])}}; std::string source = R"( highp vec4 sum = vec4(0.0); highp float size = float($input_data_0_w$ * $input_data_0_h$); for (int w = 0; w < $input_data_0_w$; w++) { for (int h = 0; h < $input_data_0_h$; h++) { sum += $input_data_0[w, h, gid.z]$; } } value_0 = sum / size; )"; *generated_code = { std::move(parameters), {}, {}, uint3(), uint3(1, 1, 4), std::move(source), IOStructure::ONLY_DEFINITIONS, IOStructure::AUTO, }; } constexpr uint3 kTileSize = {8, 8, 1}; inline bool UseTiledImpl(const NodeShader::GenerationContext& ctx) { const int h = ctx.input_shapes[0][1]; const int w = ctx.input_shapes[0][2]; const int c = ctx.input_shapes[0][3]; return h % kTileSize.y == 0 && w % kTileSize.x == 0 && c % 4 == 0 && (h / kTileSize.y) * (w / kTileSize.x) * c * sizeof(float) <= 32768; } void GenerateTiledMean(const NodeShader::GenerationContext& ctx, GeneratedCode* generated_code) { const int h = ctx.input_shapes[0][1]; const int w = ctx.input_shapes[0][2]; const int s = DivideRoundUp(ctx.input_shapes[0][3], 4); std::vector<Variable> parameters = { {"input_data_0_h", h}, {"input_data_0_w", w}, {"tile_size_h", kTileSize.y}, {"tile_size_w", kTileSize.x}, }; std::vector<Variable> shared_variables = { {"tile_sum", std::vector<float4>((w / kTileSize.x) * (h / kTileSize.y) * s)}}; std::string source = R"( ivec2 tile_size = ivec2($tile_size_w$, $tile_size_h$); ivec2 num_tiles = ivec2($input_data_0_w$, $input_data_0_h$) / tile_size; highp vec4 partial_sum = vec4(0.0); for (int x = gid.x * tile_size.x; x < (gid.x + 1) * tile_size.x; ++x) { for (int y = gid.y * tile_size.y; y < (gid.y + 1) * tile_size.y; ++y) { partial_sum += $input_data_0[x, y, gid.z]$; } } $tile_sum$[num_tiles.x * num_tiles.y * gid.z + num_tiles.x * gid.y + gid.x] = partial_sum; memoryBarrierShared(); barrier(); if (gid.x == 0 && gid.y == 0) { highp vec4 sum = vec4(0.0); for (int i = 0; i < num_tiles.x * num_tiles.y; ++i) { sum += $tile_sum$[num_tiles.x * num_tiles.y * gid.z + i]; } highp vec4 mean = sum / float($input_data_0_w$ * $input_data_0_h$); $output_data_0[0, 0, gid.z] = mean$; } )"; *generated_code = { std::move(parameters), {}, std::move(shared_variables), uint3(kTileSize.x, kTileSize.y, static_cast<uint32_t>(s)), kTileSize, std::move(source), IOStructure::ONLY_DEFINITIONS, IOStructure::ONLY_DEFINITIONS, }; } class Mean : public NodeShader { public: absl::Status GenerateCode(const GenerationContext& ctx, GeneratedCode* generated_code) const final { const auto& attr = std::any_cast<const MeanAttributes&>(ctx.op_attr); if (attr.dims != std::set<Axis>({Axis::HEIGHT, Axis::WIDTH})) { return absl::InvalidArgumentError( "Mean calculation is supported only for height and width."); } if (!(ctx.input_shapes.size() == 1 && ctx.output_shapes.size() == 1 && ctx.output_shapes[0][1] == 1 && ctx.output_shapes[0][2] == 1 && ctx.output_shapes[0][3] == ctx.input_shapes[0][3])) { return absl::InvalidArgumentError( "Mean calculation is supported for one input and one 1x1 output with " "the same channel count."); } if (UseSubgroupBasedImpl(*ctx.gpu_info)) { GenerateSubgroupBasedMean(ctx, generated_code); } else if (UseTiledImpl(ctx)) { GenerateTiledMean(ctx, generated_code); } else { GenerateTrivialMean(ctx, generated_code); } return absl::OkStatus(); } }; } std::unique_ptr<NodeShader> NewMeanNodeShader() { return std::make_unique<Mean>(); } } } }
#include "tensorflow/lite/delegates/gpu/gl/kernels/mean.h" #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/gl/kernels/test_util.h" using ::testing::FloatNear; using ::testing::Pointwise; namespace tflite { namespace gpu { namespace gl { namespace { TEST(MeanTest, TestTrivialImpl) { TensorRef<BHWC> input; input.type = DataType::FLOAT32; input.ref = 0; input.shape = BHWC(1, 2, 2, 1); TensorRef<BHWC> output; output.type = DataType::FLOAT32; output.ref = 2; output.shape = BHWC(1, 1, 1, 1); MeanAttributes attr; attr.dims = {Axis::HEIGHT, Axis::WIDTH}; SingleOpModel model({ToString(OperationType::MEAN), attr}, {input}, {output}); ASSERT_TRUE(model.PopulateTensor(0, {1.0, 2.0, 3.0, 4.0})); ASSERT_OK(model.Invoke(*NewMeanNodeShader())); EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {2.5})); } TEST(MeanTest, TestTiledImpl) { TensorRef<BHWC> input; input.type = DataType::FLOAT32; input.ref = 0; input.shape = BHWC(1, 16, 16, 8); TensorRef<BHWC> output; output.type = DataType::FLOAT32; output.ref = 1; output.shape = BHWC(1, 1, 1, 8); MeanAttributes attr; attr.dims = {Axis::HEIGHT, Axis::WIDTH}; SingleOpModel model({ToString(OperationType::MEAN), attr}, {input}, {output}); std::vector<float> input_data; input_data.reserve(1 * 16 * 16 * 8); for (int i = 0; i < 1 * 16 * 16 * 8; ++i) input_data.push_back(i % 8); ASSERT_TRUE(model.PopulateTensor(0, std::move(input_data))); ASSERT_OK(model.Invoke(*NewMeanNodeShader())); EXPECT_THAT(model.GetOutput(0), Pointwise(FloatNear(1e-6), {0, 1, 2, 3, 4, 5, 6, 7})); } } } } }
1,043
cpp
tensorflow/tensorflow
concat
tensorflow/lite/delegates/gpu/gl/kernels/concat.cc
third_party/xla/xla/tests/concat_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_CONCAT_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_GL_KERNELS_CONCAT_H_ #include <memory> #include "tensorflow/lite/delegates/gpu/common/operations.h" #include "tensorflow/lite/delegates/gpu/gl/node_shader.h" namespace tflite { namespace gpu { namespace gl { std::unique_ptr<NodeShader> NewAlignedConcatNodeShader(); std::unique_ptr<NodeShader> NewConcatNodeShader(); std::unique_ptr<NodeShader> NewFlatConcatNodeShader(); } } } #endif #include "tensorflow/lite/delegates/gpu/gl/kernels/concat.h" #include <algorithm> #include <any> #include <cstdint> #include <cstring> #include <memory> #include <string> #include <utility> #include <vector> #include "absl/memory/memory.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/types.h" #include "tensorflow/lite/delegates/gpu/gl/variable.h" namespace tflite { namespace gpu { namespace gl { namespace { class AlignedConcatByChannels : public NodeShader { public: static bool IsSupported(const GenerationContext& ctx) { const auto& attr = std::any_cast<const ConcatAttributes&>(ctx.op_attr); if (attr.axis != Axis::CHANNELS) return false; if (ctx.input_shapes.size() != 2) return false; for (int i = 1; i < ctx.input_shapes.size(); i++) { if (ctx.input_shapes[0][1] != ctx.input_shapes[i][1] || ctx.input_shapes[0][2] != ctx.input_shapes[i][2]) { return false; } } for (const auto& shape : ctx.input_shapes) { if (shape[3] % 4 != 0) return false; } return true; } absl::Status GenerateCode(const GenerationContext& ctx, GeneratedCode* generated_code) const final { if (!IsSupported(ctx)) { return absl::InvalidArgumentError( "This case is not supported by aligned concat"); } std::string source = R"( if (gid.z < $border$) { value_0 = $input_data_0[gid.x, gid.y, gid.z]$; } else { int z = gid.z - $border$; value_0 = $input_data_1[gid.x, gid.y, z]$; } )"; *generated_code = { { {"border", static_cast<int>(ctx.input_shapes[0][3]) / 4}}, {}, {}, uint3(), uint3(), std::move(source), IOStructure::ONLY_DEFINITIONS, IOStructure::AUTO, }; return absl::OkStatus(); } }; class ConcatByAnyChannel : public NodeShader { public: static bool IsSupported(const GenerationContext& ctx) { const auto& attr = std::any_cast<const ConcatAttributes&>(ctx.op_attr); if (attr.axis != Axis::CHANNELS) return false; if (ctx.input_shapes.size() <= 1) return false; for (int i = 1; i < ctx.input_shapes.size(); i++) { if (ctx.input_shapes[0][1] != ctx.input_shapes[i][1] || ctx.input_shapes[0][2] != ctx.input_shapes[i][2]) { return false; } } return true; } absl::Status GenerateCode(const GenerationContext& ctx, GeneratedCode* generated_code) const final { if (!IsSupported(ctx)) { return absl::UnimplementedError("This case is not supported by concat"); } std::string code = DeclareVariables(); int already_written = 0; int t = 0; for (int current_input_id = 0; current_input_id < ctx.input_shapes.size(); current_input_id++) { int in_ch = ctx.input_shapes[current_input_id][3]; code += PrintStartMessage(current_input_id, in_ch, already_written); std::string input = "input_data_" + std::to_string(current_input_id); int reminder = already_written % 4; if (reminder == 0) { code += AlignedCase(in_ch, input); } else { code += UnalignedCase(reminder, in_ch, input, &t); } already_written += in_ch; } *generated_code = { {}, {}, {}, uint3(static_cast<int>(ctx.output_shapes[0][2]), static_cast<int>(ctx.output_shapes[0][1]), 1), uint3(), std::move(code), IOStructure::ONLY_DEFINITIONS, IOStructure::ONLY_DEFINITIONS, }; return absl::OkStatus(); } private: std::string temp(int t) const { return "temp" + std::to_string(t); } std::string DeclareVariables() const { return R"( int z = gid.z; vec4 val = vec4(0.0f); )"; } std::string PrintStartMessage(int current_input_id, int in_ch, int already_written) const { return " " tensor with " + std::to_string(in_ch) + " channels\n std::to_string(already_written) + " elements\n\n"; } std::string AlignedCase(int in_ch, const std::string& input) const { std::string code; int blocks_amount = DivideRoundUp<int>(in_ch, 4); code += " code += " " write(s)\n\n"; for (int block = 0; block < blocks_amount; block++) { code += "val = $" + input + "[gid.x, gid.y, " + std::to_string(block) + "]$;\n" + "$output_data_0[gid.x, gid.y, z] = val$;\n" + "z++; \n\n"; } return code; } std::string UnalignedCase(int reminder, int in_ch, const std::string& input, int* t) const { std::string code = " int shift = 4 - reminder; if (shift > in_ch) { shift = in_ch; } code += "\n code += "vec4 " + temp(*t) + " = $" + input + "[gid.x, gid.y, 0]$;\n"; for (int i = 0; i < shift; i++) { code += "val[" + std::to_string(reminder + i) + "] = " + temp(*t) + "[" + std::to_string(i) + "];\n"; } code += "$output_data_0[gid.x, gid.y, z - 1] = val$;\n"; (*t)++; int left_blocks = (in_ch - shift) / 4; if ((in_ch - shift) % 4 != 0) { left_blocks++; } if (left_blocks) { code += "\n for (int block = 0; block < left_blocks; block++) { for (int elem = 0; elem < 4; elem++) { if (shift % 4 == 0) { code += "vec4 " + temp(*t) + " = $" + input + "[gid.x, gid.y, " + std::to_string(block + 1) + "]$;\n"; (*t)++; } code += "val[" + std::to_string(elem) + "] = " + temp(*t - 1) + "[" + std::to_string(shift % 4) + "];\n"; if (shift == in_ch) { break; } shift++; } code += "$output_data_0[gid.x, gid.y, z] = val$;\n"; code += "z++;\n"; } } else { code += " } return code; } }; class FlatConcatByHeight : public NodeShader { public: static bool IsSupported(const GenerationContext& ctx) { const auto& attr = std::any_cast<const ConcatAttributes&>(ctx.op_attr); if (attr.axis != Axis::HEIGHT) return false; if (ctx.input_shapes.size() <= 1) return false; for (int i = 1; i < ctx.input_shapes.size(); i++) { if (ctx.input_shapes[0][3] != ctx.input_shapes[i][3] || ctx.input_shapes[0][2] != ctx.input_shapes[i][2]) { return false; } } return true; } absl::Status GenerateCode(const GenerationContext& ctx, GeneratedCode* generated_code) const final { std::string code; std::vector<Variable> params; for (int i = 0, shift = 0; i < ctx.input_shapes.size(); shift += ctx.input_shapes[i][1], i++) { code += "if ("; if (i != 0) { code += "$input_data_" + std::to_string(i - 1) + "_h$ <= gid.y && "; } code += "gid.y < " + std::to_string(shift + ctx.input_shapes[i][1]) + ") {\n"; code += "if (gid.y - " + std::to_string(shift) + " >= $input_data_" + std::to_string(i) + "_h$) return;\n"; code += "value_0 = $input_data_" + std::to_string(i) + "[gid.x, gid.y - " + std::to_string(shift) + ", gid.z]$;\n}\n"; if (i != ctx.input_shapes.size() - 1) { code += " else "; } params.push_back({"input_data_" + std::to_string(i) + "_h", static_cast<int>(ctx.input_shapes[i][1])}); } *generated_code = { std::move(params), {}, {}, uint3(), uint3(), std::move(code), IOStructure::ONLY_DEFINITIONS, IOStructure::AUTO, }; return absl::OkStatus(); } }; class FlatConcatByWidth : public NodeShader { public: static bool IsSupported(const GenerationContext& ctx) { const auto& attr = std::any_cast<const ConcatAttributes&>(ctx.op_attr); if (attr.axis != Axis::WIDTH) return false; if (ctx.input_shapes.size() <= 1) return false; for (int i = 1; i < ctx.input_shapes.size(); i++) { if (ctx.input_shapes[0][3] != ctx.input_shapes[i][3] || ctx.input_shapes[0][1] != ctx.input_shapes[i][1]) { return false; } } return true; } absl::Status GenerateCode(const GenerationContext& ctx, GeneratedCode* generated_code) const final { std::string code; std::vector<Variable> params; for (int i = 0, shift = 0; i < ctx.input_shapes.size(); shift += ctx.input_shapes[i][2], i++) { code += "if ("; if (i != 0) { code += "$input_data_" + std::to_string(i - 1) + "_w$ <= gid.x && "; } code += "gid.x < " + std::to_string(shift + ctx.input_shapes[i][2]) + ") {\n"; code += "if (gid.x - " + std::to_string(shift) + " >= $input_data_" + std::to_string(i) + "_w$) return;\n"; code += "value_0 = $input_data_" + std::to_string(i) + "[gid.x - " + std::to_string(shift) + ", gid.y, gid.z]$;\n}\n"; if (i != ctx.input_shapes.size() - 1) { code += " else "; } params.push_back({"input_data_" + std::to_string(i) + "_w", static_cast<int>(ctx.input_shapes[i][2])}); } *generated_code = { std::move(params), {}, {}, uint3(), uint3(), std::move(code), IOStructure::ONLY_DEFINITIONS, IOStructure::AUTO, }; return absl::OkStatus(); } }; class FlatConcat : public NodeShader { public: absl::Status GenerateCode(const GenerationContext& ctx, GeneratedCode* generated_code) const final { if (FlatConcatByHeight::IsSupported(ctx)) { return flat_concat_by_height_.GenerateCode(ctx, generated_code); } if (FlatConcatByWidth::IsSupported(ctx)) { return flat_concat_by_width_.GenerateCode(ctx, generated_code); } return absl::InvalidArgumentError( "This case is not supported by flat concat"); } private: FlatConcatByHeight flat_concat_by_height_; FlatConcatByWidth flat_concat_by_width_; }; } std::unique_ptr<NodeShader> NewAlignedConcatNodeShader() { return std::make_unique<AlignedConcatByChannels>(); } std::unique_ptr<NodeShader> NewConcatNodeShader() { return std::make_unique<ConcatByAnyChannel>(); } std::unique_ptr<NodeShader> NewFlatConcatNodeShader() { return std::make_unique<FlatConcat>(); } } } }
#include <memory> #include <vector> #include "absl/status/statusor.h" #include "xla/array2d.h" #include "xla/array3d.h" #include "xla/client/local_client.h" #include "xla/client/xla_builder.h" #include "xla/client/xla_computation.h" #include "xla/literal_util.h" #include "xla/reference_util.h" #include "xla/test.h" #include "xla/test_helpers.h" #include "xla/tests/client_library_test_base.h" #include "xla/tests/hlo_test_base.h" #include "xla/tests/literal_test_util.h" #include "xla/tests/test_macros.h" #include "tsl/platform/test.h" namespace xla { namespace { using ConcatTest = ClientLibraryTestBase; using ConcatTestHlo = HloTestBase; using ::testing::HasSubstr; XLA_TEST_F(ConcatTest, Concat_Nothing) { XlaBuilder builder(TestName()); ConcatInDim(&builder, {}, 0); absl::StatusOr<XlaComputation> computation_status = builder.Build(); ASSERT_FALSE(computation_status.ok()); EXPECT_THAT(computation_status.status().ToString(), HasSubstr("Concatenate expects at least one argument")); } XLA_TEST_F(ConcatTest, Concat_R1_With_Nothing) { XlaBuilder builder(TestName()); auto a = ConstantR1<float>(&builder, {42.0, 64.0}); ConcatInDim(&builder, {a}, 0); std::vector<float> expected = {42, 64}; ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001)); } XLA_TEST_F(ConcatTest, Concat_R1_L0_With_Nothing) { XlaBuilder builder(TestName()); auto a = ConstantR1<float>(&builder, {}); ConcatInDim(&builder, {a}, 0); std::vector<float> expected = {}; ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001)); } XLA_TEST_F(ConcatTest, CannotConcatR0WithR0) { XlaBuilder builder(TestName()); auto a = ConstantR0<float>(&builder, 42.0); auto b = ConstantR0<float>(&builder, 64.0); ConcatInDim(&builder, {a, b}, 0); absl::StatusOr<XlaComputation> computation_status = builder.Build(); ASSERT_FALSE(computation_status.ok()); EXPECT_THAT(computation_status.status().ToString(), HasSubstr("out of bounds: 0")); } XLA_TEST_F(ConcatTest, Concat_R1_L0_With_R1_L0) { XlaBuilder builder(TestName()); auto a = ConstantR1<float>(&builder, {}); auto b = ConstantR1<float>(&builder, {}); ConcatInDim(&builder, {a, b}, 0); std::vector<float> expected = {}; ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001)); } XLA_TEST_F(ConcatTest, Concat_R1_L0_With_R1_L1) { XlaBuilder builder(TestName()); auto a = ConstantR1<float>(&builder, {}); auto b = ConstantR1<float>(&builder, {256.0}); ConcatInDim(&builder, {a, b}, 0); std::vector<float> expected = {256}; ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001)); } XLA_TEST_F(ConcatTest, Concat_R1_L2_With_R1_L0) { XlaBuilder builder(TestName()); auto a = ConstantR1<float>(&builder, {42.0, 64.0}); auto b = ConstantR1<float>(&builder, {}); ConcatInDim(&builder, {a, b}, 0); std::vector<float> expected = {42, 64}; ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001)); } XLA_TEST_F(ConcatTest, Concat_R1_L2_With_R1_L1) { XlaBuilder builder(TestName()); auto a = ConstantR1<float>(&builder, {42.0, 64.0}); auto b = ConstantR1<float>(&builder, {256.0}); ConcatInDim(&builder, {a, b}, 0); std::vector<float> expected = {42, 64, 256}; ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001)); } XLA_TEST_F(ConcatTest, Concat_R1_L253_With_R1_L7) { std::vector<float> lhs(253); std::vector<float> rhs(7); std::vector<float> expected(253 + 7); for (int i = 0; i < 253; ++i) { expected[i] = lhs[i] = i + 1; } for (int i = 0; i < 7; ++i) { expected[253 + i] = rhs[i] = 253 + i + 1; } XlaBuilder builder(TestName()); auto a = ConstantR1<float>(&builder, lhs); auto b = ConstantR1<float>(&builder, rhs); ConcatInDim(&builder, {a, b}, 0); ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001)); } XLA_TEST_F(ConcatTest, Concat_0x0_With_0x0) { for (int dim : {0, 1}) { XlaBuilder builder(TestName()); auto a = ConstantR2FromArray2D(&builder, Array2D<float>(0, 0)); auto b = ConstantR2FromArray2D(&builder, Array2D<float>(0, 0)); ConcatInDim(&builder, {a, b}, dim); ComputeAndCompareR2<float>(&builder, Array2D<float>(0, 0), {}, ErrorSpec(0.0001)); } } XLA_TEST_F(ConcatTest, Concat_1x1_With_1x1_InDim0) { XlaBuilder builder(TestName()); auto a_array = CreatePatternedMatrix(1, 1); auto b_array = CreatePatternedMatrix(1, 1, 64.0); auto a = ConstantR2FromArray2D(&builder, *a_array); auto b = ConstantR2FromArray2D(&builder, *b_array); ConcatInDim(&builder, {a, b}, 0); Array2D<float> expected({ {0}, {64}, }); ComputeAndCompareR2<float>(&builder, expected, {}, ErrorSpec(0.0001)); } XLA_TEST_F(ConcatTest, Concat_1x1_With_1x1_InDim1) { XlaBuilder builder(TestName()); auto a_array = CreatePatternedMatrix(1, 1); auto b_array = CreatePatternedMatrix(1, 1, 64.0); auto a = ConstantR2FromArray2D(&builder, *a_array); auto b = ConstantR2FromArray2D(&builder, *b_array); ConcatInDim(&builder, {a, b}, 1); Array2D<float> expected({ {0, 64}, }); ComputeAndCompareR2<float>(&builder, expected, {}, ErrorSpec(0.0001)); } XLA_TEST_F(ConcatTest, Concat2x0With2x5) { XlaBuilder builder(TestName()); auto b_array = CreatePatternedMatrix(2, 5, 64.0); auto a = ConstantR2FromArray2D(&builder, Array2D<float>(2, 0)); auto b = ConstantR2FromArray2D(&builder, *b_array); ConcatInDim(&builder, {a, b}, 1); ComputeAndCompareR2<float>(&builder, *b_array, {}, ErrorSpec(0.0001)); } XLA_TEST_F(ConcatTest, Concat2x3With2x5) { XlaBuilder builder(TestName()); auto a_array = CreatePatternedMatrix(2, 3); auto b_array = CreatePatternedMatrix(2, 5, 64.0); auto a = ConstantR2FromArray2D(&builder, *a_array); auto b = ConstantR2FromArray2D(&builder, *b_array); ConcatInDim(&builder, {a, b}, 1); Array2D<float> expected({ {0, 1, 2, 64, 65, 66, 67, 68}, {1000, 1001, 1002, 1064, 1065, 1066, 1067, 1068}, }); ComputeAndCompareR2<float>(&builder, expected, {}, ErrorSpec(0.0001)); } XLA_TEST_F(ConcatTest, Concat3x2With0x2) { XlaBuilder builder(TestName()); auto a_array = CreatePatternedMatrix(3, 2); auto a = ConstantR2FromArray2D(&builder, *a_array); auto b = ConstantR2FromArray2D(&builder, Array2D<float>(0, 2)); ConcatInDim(&builder, {a, b}, 0); ComputeAndCompareR2<float>(&builder, *a_array, {}, ErrorSpec(0.0001)); } XLA_TEST_F(ConcatTest, Concat3x2With5x2) { XlaBuilder builder(TestName()); auto a_array = CreatePatternedMatrix(3, 2); auto b_array = CreatePatternedMatrix(5, 2, 64.0); auto a = ConstantR2FromArray2D(&builder, *a_array); auto b = ConstantR2FromArray2D(&builder, *b_array); ConcatInDim(&builder, {a, b}, 0); Array2D<float> expected({ {0, 1}, {1000, 1001}, {2000, 2001}, {64, 65}, {1064, 1065}, {2064, 2065}, {3064, 3065}, {4064, 4065}, }); ComputeAndCompareR2<float>(&builder, expected, {}, ErrorSpec(0.0001)); } XLA_TEST_F(ConcatTest, Concat_R3_3x0x2_3x0x1) { XlaBuilder builder(TestName()); auto a = ConstantR3FromArray3D(&builder, Array3D<float>(3, 0, 2)); auto b = ConstantR3FromArray3D(&builder, Array3D<float>(3, 0, 1)); ConcatInDim(&builder, {a, b}, 2); ComputeAndCompareR3<float>(&builder, Array3D<float>(3, 0, 3), {}, ErrorSpec(0.0001)); } XLA_TEST_F(ConcatTest, Concat_R3_3x1x2_3x1x1) { XlaBuilder builder(TestName()); Array3D<float> a_array({ {{0, 1}}, {{2, 3}}, {{4, 5}}, }); Array3D<float> b_array({ {{6}}, {{7}}, {{8}}, }); auto a = ConstantR3FromArray3D(&builder, a_array); auto b = ConstantR3FromArray3D(&builder, b_array); ConcatInDim(&builder, {a, b}, 2); Array3D<float> expected({ {{0, 1, 6}}, {{2, 3, 7}}, {{4, 5, 8}}, }); ComputeAndCompareR3<float>(&builder, expected, {}, ErrorSpec(0.0001)); } XLA_TEST_F(ConcatTest, Concat_R1_1x1_1x1_1x1) { XlaBuilder builder(TestName()); auto a = ConstantR1<float>(&builder, {42.0}); auto b = ConstantR1<float>(&builder, {64.0}); auto c = ConstantR1<float>(&builder, {256.0}); ConcatInDim(&builder, {a, b, c}, 0); std::vector<float> expected = {42, 64, 256}; ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001)); } XLA_TEST_F(ConcatTest, Concat_R3_3x1x2_3x1x1_3x1x1) { XlaBuilder builder(TestName()); Array3D<float> a_array({ {{0, 1}}, {{4, 5}}, {{8, 9}}, }); Array3D<float> b_array({ {{2}}, {{6}}, {{10}}, }); Array3D<float> c_array({ {{3}}, {{7}}, {{11}}, }); auto a = ConstantR3FromArray3D(&builder, a_array); auto b = ConstantR3FromArray3D(&builder, b_array); auto c = ConstantR3FromArray3D(&builder, c_array); ConcatInDim(&builder, {a, b, c}, 2); Array3D<float> expected({ {{0, 1, 2, 3}}, {{4, 5, 6, 7}}, {{8, 9, 10, 11}}, }); ComputeAndCompareR3<float>(&builder, expected, {}, ErrorSpec(0.0001)); } XLA_TEST_F(ConcatTest, DoubleConcatLeftAssociative) { XlaBuilder builder(TestName()); auto a = ConstantR1<float>(&builder, {42.0}); auto b = ConstantR1<float>(&builder, {64.0}); auto c = ConstantR1<float>(&builder, {256.0}); ConcatInDim(&builder, {ConcatInDim(&builder, {a, b}, 0), c}, 0); std::vector<float> expected = {42, 64, 256}; ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001)); } XLA_TEST_F(ConcatTest, DoubleConcatRightAssociative) { XlaBuilder builder(TestName()); auto a = ConstantR1<float>(&builder, {42.0}); auto b = ConstantR1<float>(&builder, {64.0}); auto c = ConstantR1<float>(&builder, {256.0}); ConcatInDim(&builder, {a, ConcatInDim(&builder, {b, c}, 0)}, 0); std::vector<float> expected = {42, 64, 256}; ComputeAndCompareR1<float>(&builder, expected, {}, ErrorSpec(0.0001)); } XLA_TEST_F(ConcatTest, Concat_1x1024_With_1x1024_InDim0) { Array2D<float> lhs(1, 1024); Array2D<float> rhs(1, 1024); for (int i = 0; i < 1024; ++i) { lhs(0, i) = i; rhs(0, i) = i + 1024; } XlaBuilder builder(TestName()); auto a = ConstantR2FromArray2D<float>(&builder, lhs); auto b = ConstantR2FromArray2D<float>(&builder, rhs); ConcatInDim(&builder, {a, b}, 0); Array2D<float> expected(2, 1024); for (int i = 0; i < 1024; ++i) { expected(0, i) = i; expected(1, i) = i + 1024; } ComputeAndCompareR2<float>(&builder, expected, {}, ErrorSpec(0.0001)); } XLA_TEST_F(ConcatTest, Concat_1x1024_With_1x1024_InDim1) { Array2D<float> lhs(1, 1024); Array2D<float> rhs(1, 1024); for (int i = 0; i < 1024; ++i) { lhs(0, i) = i; rhs(0, i) = i + 1024; } XlaBuilder builder(TestName()); auto a = ConstantR2FromArray2D<float>(&builder, lhs); auto b = ConstantR2FromArray2D<float>(&builder, rhs); ConcatInDim(&builder, {a, b}, 1); Array2D<float> expected(1, 2048); for (int i = 0; i < 1024; ++i) { expected(0, i) = i; expected(0, i + 1024) = i + 1024; } ComputeAndCompareR2<float>(&builder, expected, {}, ErrorSpec(0.0001)); } XLA_TEST_F(ConcatTest, Concat_64x64_With_64x2) { Array2D<float> lhs(64, 64); Array2D<float> rhs(64, 2); for (int i0 = 0; i0 < 64; ++i0) { for (int i1 = 0; i1 < 64; ++i1) { lhs(i0, i1) = (i0 << 10) | i1; } for (int i1 = 0; i1 < 2; ++i1) { rhs(i0, i1) = (i0 << 10) | (i1 + 64); } } XlaBuilder builder(TestName()); auto a = ConstantR2FromArray2D<float>(&builder, lhs); auto b = ConstantR2FromArray2D<float>(&builder, rhs); ConcatInDim(&builder, {a, b}, 1); Array2D<float> expected(64, 66); for (int i0 = 0; i0 < 64; ++i0) { for (int i1 = 0; i1 < 66; ++i1) { expected(i0, i1) = (i0 << 10) | i1; } } ComputeAndCompareR2<float>(&builder, expected, {}, ErrorSpec(0.0001)); } XLA_TEST_F(ConcatTest, CannotConcatOpaques) { XlaBuilder builder(TestName()); auto opaque_shape = ShapeUtil::MakeOpaqueShape(); auto r1f32 = xla::ShapeUtil::MakeShape(xla::F32, {1}); auto x = Parameter(&builder, 0, r1f32, "x"); auto y = Parameter(&builder, 1, opaque_shape, "y"); ConcatInDim(&builder, {x, y}, 0); absl::StatusOr<XlaComputation> computation_status = builder.Build(); ASSERT_FALSE(computation_status.ok()); EXPECT_THAT( computation_status.status().ToString(), HasSubstr("Expected array argument for operand of concatenation")); } XLA_TEST_F(ConcatTest, CannotConcatTokens) { XlaBuilder builder(TestName()); auto token_shape = ShapeUtil::MakeTokenShape(); auto r1f32 = xla::ShapeUtil::MakeShape(xla::F32, {1}); auto x = Parameter(&builder, 0, r1f32, "x"); auto y = Parameter(&builder, 1, token_shape, "y"); ConcatInDim(&builder, {x, y}, 0); absl::StatusOr<XlaComputation> computation_status = builder.Build(); ASSERT_FALSE(computation_status.ok()); EXPECT_THAT( computation_status.status().ToString(), HasSubstr("Expected array argument for operand of concatenation")); } XLA_TEST_F(ConcatTest, ConcatSeveralBoxedPredicates) { XlaBuilder builder(TestName()); auto p0 = ConstantR1<bool>(&builder, {true}); auto p1 = ConstantR1<bool>(&builder, {false}); auto p2 = ConstantR1<bool>(&builder, {true}); ConcatInDim(&builder, {p0, p1, p2}, 0); bool expected[] = {true, false, true}; ComputeAndCompareR1<bool>(&builder, expected, {}); } XLA_TEST_F(ConcatTest, ConcatSeveralR1S32s) { XlaBuilder builder(TestName()); auto a0 = ConstantR1<int32_t>(&builder, {1}); auto a1 = ConstantR1<int32_t>(&builder, {2, 3}); auto a2 = ConstantR1<int32_t>(&builder, {4, 5, 6}); auto a3 = ConstantR1<int32_t>(&builder, {7, 8, 9, 10}); ConcatInDim(&builder, {a0, a1, a2, a3}, 0); std::vector<int32_t> expected(10); std::iota(expected.begin(), expected.end(), 1); ComputeAndCompareR1<int32_t>(&builder, expected, {}); } XLA_TEST_F(ConcatTest, ConcatR3WeirdDims) { XlaBuilder builder(TestName()); Array3D<float> arr0(9, 17, 1); arr0.Fill(1); Array3D<float> arr1(9, 17, 256); arr1.Fill(2); Array3D<float> expected(9, 17, arr0.n3() + arr1.n3()); for (int64_t i = 0; i < expected.n1(); ++i) { for (int64_t j = 0; j < expected.n2(); ++j) { int64_t kk = 0; for (const Array3D<float>& arr : {arr0, arr1}) { for (int64_t k = 0; k < arr.n3(); ++k, ++kk) { expected(i, j, kk) = arr(i, j, k); } } } } XlaOp h0; auto p0 = CreateR3Parameter<float>(arr0, 0, "p0", &builder, &h0); XlaOp h1; auto p1 = CreateR3Parameter<float>(arr1, 1, "p1", &builder, &h1); ConcatInDim(&builder, {h0, h1}, 2); ComputeAndCompareR3<float>(&builder, expected, {p0.get(), p1.get()}); } XLA_TEST_F(ConcatTest, ConcatDeeplyNested) { XlaBuilder builder(TestName()); auto a_literal = LiteralUtil::CreateR1<float>({256.0}); auto a = Parameter(&builder, 0, a_literal.shape(), "x"); auto b = ConcatInDim(&builder, {a, a}, 0); auto c = ConcatInDim(&builder, {b, b}, 0); auto d = ConcatInDim(&builder, {c, c}, 0); auto e = ConcatInDim(&builder, {d, d}, 0); auto f = ConcatInDim(&builder, {e, e}, 0); auto g = ConcatInDim(&builder, {f, f}, 0); auto h = ConcatInDim(&builder, {g, g}, 0); auto i = ConcatInDim(&builder, {h, h}, 0); auto j = ConcatInDim(&builder, {i, i}, 0); auto k = ConcatInDim(&builder, {j, j}, 0); auto l = ConcatInDim(&builder, {k, k}, 0); auto m = ConcatInDim(&builder, {l, l}, 0); auto n = ConcatInDim(&builder, {m, m}, 0); auto o = ConcatInDim(&builder, {n, n}, 0); auto p = ConcatInDim(&builder, {o, o}, 0); auto q = ConcatInDim(&builder, {p, p}, 0); ConcatInDim(&builder, {q, q}, 0); std::vector<float> expected(131072, 256.0); auto a_data = client_->TransferToServer(a_literal).value(); ComputeAndCompareR1<float>(&builder, expected, {a_data.get()}); } XLA_TEST_F(ConcatTestHlo, ConcatWithBitcast) { auto module = ParseAndReturnVerifiedModule(R"( HloModule jit_broken.874 primitive_computation_add.866 { parameter.867 = f32[] parameter(0) parameter.868 = f32[] parameter(1) ROOT add.869 = f32[] add(parameter.867, parameter.868) } ENTRY jit_broken.874 { parameter.38 = f32[4,2]{1,0} parameter(0) reshape.723 = f32[4,2,1]{2,1,0} reshape(parameter.38) reshape.724 = f32[4,2,1]{2,1,0} reshape(parameter.38) concatenate.42 = f32[4,2,2]{2,1,0} concatenate(reshape.723, reshape.724), dimensions={2} slice.351 = f32[4,1,2]{2,1,0} slice(concatenate.42), slice={[0:4], [0:1], [0:2]} reshape.1058 = f32[4,2]{1,0} reshape(slice.351) slice.352 = f32[4,1]{1,0} slice(reshape.1058), slice={[0:4], [1:2]} reshape.1059 = f32[4]{0} reshape(slice.352) slice.353 = f32[4,1,1]{2,1,0} slice(concatenate.42), slice={[0:4], [0:1], [1:2]} reshape.1060 = f32[4]{0} reshape(slice.353) add.124 = f32[4]{0} add(reshape.1059, reshape.1060) slice.354 = f32[4,1]{1,0} slice(reshape.1058), slice={[0:4], [0:1]} reshape.1061 = f32[4]{0} reshape(slice.354) slice.379 = f32[4,1,1]{2,1,0} slice(concatenate.42), slice={[0:4], [0:1], [0:1]} reshape.1062 = f32[4]{0} reshape(slice.379) add.89 = f32[4]{0} add(reshape.1061, reshape.1062) subtract.126 = f32[4]{0} subtract(add.124, add.89) is-finite.127 = pred[4]{0} is-finite(subtract.126) not.128 = pred[4]{0} not(is-finite.127) abs.129 = f32[4]{0} abs(subtract.126) constant.130 = f32[] constant(inf) broadcast.131 = f32[4]{0} broadcast(constant.130), dimensions={} compare.132 = pred[4]{0} compare(abs.129, broadcast.131), direction=EQ not.133 = pred[4]{0} not(compare.132) and.134 = pred[4]{0} and(not.128, not.133) add.135 = f32[4]{0} add(add.124, add.89) maximum.125 = f32[4]{0} maximum(add.124, add.89) abs.136 = f32[4]{0} abs(subtract.126) negate.137 = f32[4]{0} negate(abs.136) exponential.138 = f32[4]{0} exponential(negate.137) log-plus-one.139 = f32[4]{0} log-plus-one(exponential.138) add.140 = f32[4]{0} add(maximum.125, log-plus-one.139) select.141 = f32[4]{0} select(and.134, add.135, add.140) slice.356 = f32[4,1,1]{2,1,0} slice(concatenate.42), slice={[0:4], [0:1], [1:2]} reshape.1064 = f32[4]{0} reshape(slice.356) add.214 = f32[4]{0} add(select.141, reshape.1064) slice.380 = f32[4,1,1]{2,1,0} slice(concatenate.42), slice={[0:4], [0:1], [0:1]} reshape.1066 = f32[4]{0} reshape(slice.380) add.179 = f32[4]{0} add(select.141, reshape.1066) subtract.216 = f32[4]{0} subtract(add.214, add.179) is-finite.217 = pred[4]{0} is-finite(subtract.216) not.218 = pred[4]{0} not(is-finite.217) abs.219 = f32[4]{0} abs(subtract.216) constant.220 = f32[] constant(inf) broadcast.221 = f32[4]{0} broadcast(constant.220), dimensions={} compare.222 = pred[4]{0} compare(abs.219, broadcast.221), direction=EQ not.223 = pred[4]{0} not(compare.222) and.224 = pred[4]{0} and(not.218, not.223) add.225 = f32[4]{0} add(add.214, add.179) maximum.215 = f32[4]{0} maximum(add.214, add.179) abs.226 = f32[4]{0} abs(subtract.216) negate.227 = f32[4]{0} negate(abs.226) exponential.228 = f32[4]{0} exponential(negate.227) log-plus-one.229 = f32[4]{0} log-plus-one(exponential.228) add.230 = f32[4]{0} add(maximum.215, log-plus-one.229) select.231 = f32[4]{0} select(and.224, add.225, add.230) slice.359 = f32[4,1,1]{2,1,0} slice(concatenate.42), slice={[0:4], [0:1], [1:2]} reshape.1068 = f32[4]{0} reshape(slice.359) add.304 = f32[4]{0} add(select.231, reshape.1068) slice.381 = f32[4,1,1]{2,1,0} slice(concatenate.42), slice={[0:4], [0:1], [0:1]} reshape.1070 = f32[4]{0} reshape(slice.381) add.269 = f32[4]{0} add(select.231, reshape.1070) subtract.306 = f32[4]{0} subtract(add.304, add.269) is-finite.307 = pred[4]{0} is-finite(subtract.306) not.308 = pred[4]{0} not(is-finite.307) abs.309 = f32[4]{0} abs(subtract.306) constant.310 = f32[] constant(inf) broadcast.311 = f32[4]{0} broadcast(constant.310), dimensions={} compare.312 = pred[4]{0} compare(abs.309, broadcast.311), direction=EQ not.313 = pred[4]{0} not(compare.312) and.314 = pred[4]{0} and(not.308, not.313) add.315 = f32[4]{0} add(add.304, add.269) maximum.305 = f32[4]{0} maximum(add.304, add.269) abs.316 = f32[4]{0} abs(subtract.306) negate.317 = f32[4]{0} negate(abs.316) exponential.318 = f32[4]{0} exponential(negate.317) log-plus-one.319 = f32[4]{0} log-plus-one(exponential.318) add.320 = f32[4]{0} add(maximum.305, log-plus-one.319) select.321 = f32[4]{0} select(and.314, add.315, add.320) slice.362 = f32[4,1,1]{2,1,0} slice(concatenate.42), slice={[0:4], [0:1], [1:2]} reshape.1072 = f32[4]{0} reshape(slice.362) add.394 = f32[4]{0} add(select.321, reshape.1072) slice.382 = f32[4,1,1]{2,1,0} slice(concatenate.42), slice={[0:4], [0:1], [0:1]} reshape.1074 = f32[4]{0} reshape(slice.382) add.359 = f32[4]{0} add(select.321, reshape.1074) subtract.396 = f32[4]{0} subtract(add.394, add.359) is-finite.397 = pred[4]{0} is-finite(subtract.396) not.398 = pred[4]{0} not(is-finite.397) abs.399 = f32[4]{0} abs(subtract.396) constant.400 = f32[] constant(inf) broadcast.401 = f32[4]{0} broadcast(constant.400), dimensions={} compare.402 = pred[4]{0} compare(abs.399, broadcast.401), direction=EQ not.403 = pred[4]{0} not(compare.402) and.404 = pred[4]{0} and(not.398, not.403) add.405 = f32[4]{0} add(add.394, add.359) maximum.395 = f32[4]{0} maximum(add.394, add.359) abs.406 = f32[4]{0} abs(subtract.396) negate.407 = f32[4]{0} negate(abs.406) exponential.408 = f32[4]{0} exponential(negate.407) log-plus-one.409 = f32[4]{0} log-plus-one(exponential.408) add.410 = f32[4]{0} add(maximum.395, log-plus-one.409) select.411 = f32[4]{0} select(and.404, add.405, add.410) slice.365 = f32[4,1,1]{2,1,0} slice(concatenate.42), slice={[0:4], [0:1], [1:2]} reshape.1076 = f32[4]{0} reshape(slice.365) add.484 = f32[4]{0} add(select.411, reshape.1076) slice.383 = f32[4,1,1]{2,1,0} slice(concatenate.42), slice={[0:4], [0:1], [0:1]} reshape.1078 = f32[4]{0} reshape(slice.383) add.449 = f32[4]{0} add(select.411, reshape.1078) subtract.486 = f32[4]{0} subtract(add.484, add.449) is-finite.487 = pred[4]{0} is-finite(subtract.486) not.488 = pred[4]{0} not(is-finite.487) abs.489 = f32[4]{0} abs(subtract.486) constant.490 = f32[] constant(inf) broadcast.491 = f32[4]{0} broadcast(constant.490), dimensions={} compare.492 = pred[4]{0} compare(abs.489, broadcast.491), direction=EQ not.493 = pred[4]{0} not(compare.492) and.494 = pred[4]{0} and(not.488, not.493) add.495 = f32[4]{0} add(add.484, add.449) maximum.485 = f32[4]{0} maximum(add.484, add.449) abs.496 = f32[4]{0} abs(subtract.486) negate.497 = f32[4]{0} negate(abs.496) exponential.498 = f32[4]{0} exponential(negate.497) log-plus-one.499 = f32[4]{0} log-plus-one(exponential.498) add.500 = f32[4]{0} add(maximum.485, log-plus-one.499) select.501 = f32[4]{0} select(and.494, add.495, add.500) slice.368 = f32[4,1,1]{2,1,0} slice(concatenate.42), slice={[0:4], [0:1], [1:2]} reshape.1080 = f32[4]{0} reshape(slice.368) add.574 = f32[4]{0} add(select.501, reshape.1080) slice.384 = f32[4,1,1]{2,1,0} slice(concatenate.42), slice={[0:4], [0:1], [0:1]} reshape.1082 = f32[4]{0} reshape(slice.384) add.539 = f32[4]{0} add(select.501, reshape.1082) subtract.576 = f32[4]{0} subtract(add.574, add.539) is-finite.577 = pred[4]{0} is-finite(subtract.576) not.578 = pred[4]{0} not(is-finite.577) abs.579 = f32[4]{0} abs(subtract.576) constant.580 = f32[] constant(inf) broadcast.581 = f32[4]{0} broadcast(constant.580), dimensions={} compare.582 = pred[4]{0} compare(abs.579, broadcast.581), direction=EQ not.583 = pred[4]{0} not(compare.582) and.584 = pred[4]{0} and(not.578, not.583) add.585 = f32[4]{0} add(add.574, add.539) maximum.575 = f32[4]{0} maximum(add.574, add.539) abs.586 = f32[4]{0} abs(subtract.576) negate.587 = f32[4]{0} negate(abs.586) exponential.588 = f32[4]{0} exponential(negate.587) log-plus-one.589 = f32[4]{0} log-plus-one(exponential.588) add.590 = f32[4]{0} add(maximum.575, log-plus-one.589) select.591 = f32[4]{0} select(and.584, add.585, add.590) slice.371 = f32[4,1,1]{2,1,0} slice(concatenate.42), slice={[0:4], [0:1], [1:2]} reshape.1084 = f32[4]{0} reshape(slice.371) add.664 = f32[4]{0} add(select.591, reshape.1084) slice.385 = f32[4,1,1]{2,1,0} slice(concatenate.42), slice={[0:4], [0:1], [0:1]} reshape.1086 = f32[4]{0} reshape(slice.385) add.629 = f32[4]{0} add(select.591, reshape.1086) subtract.666 = f32[4]{0} subtract(add.664, add.629) is-finite.667 = pred[4]{0} is-finite(subtract.666) not.668 = pred[4]{0} not(is-finite.667) abs.669 = f32[4]{0} abs(subtract.666) constant.670 = f32[] constant(inf) broadcast.671 = f32[4]{0} broadcast(constant.670), dimensions={} compare.672 = pred[4]{0} compare(abs.669, broadcast.671), direction=EQ not.673 = pred[4]{0} not(compare.672) and.674 = pred[4]{0} and(not.668, not.673) add.675 = f32[4]{0} add(add.664, add.629) maximum.665 = f32[4]{0} maximum(add.664, add.629) abs.676 = f32[4]{0} abs(subtract.666) negate.677 = f32[4]{0} negate(abs.676) exponential.678 = f32[4]{0} exponential(negate.677) log-plus-one.679 = f32[4]{0} log-plus-one(exponential.678) add.680 = f32[4]{0} add(maximum.665, log-plus-one.679) select.681 = f32[4]{0} select(and.674, add.675, add.680) slice.374 = f32[4,1,1]{2,1,0} slice(concatenate.42), slice={[0:4], [0:1], [1:2]} reshape.1088 = f32[4]{0} reshape(slice.374) add.754 = f32[4]{0} add(select.681, reshape.1088) slice.386 = f32[4,1,1]{2,1,0} slice(concatenate.42), slice={[0:4], [0:1], [0:1]} reshape.1090 = f32[4]{0} reshape(slice.386) add.719 = f32[4]{0} add(select.681, reshape.1090) subtract.756 = f32[4]{0} subtract(add.754, add.719) is-finite.757 = pred[4]{0} is-finite(subtract.756) not.758 = pred[4]{0} not(is-finite.757) abs.759 = f32[4]{0} abs(subtract.756) constant.760 = f32[] constant(inf) broadcast.761 = f32[4]{0} broadcast(constant.760), dimensions={} compare.762 = pred[4]{0} compare(abs.759, broadcast.761), direction=EQ not.763 = pred[4]{0} not(compare.762) and.764 = pred[4]{0} and(not.758, not.763) add.765 = f32[4]{0} add(add.754, add.719) maximum.755 = f32[4]{0} maximum(add.754, add.719) abs.766 = f32[4]{0} abs(subtract.756) negate.767 = f32[4]{0} negate(abs.766) exponential.768 = f32[4]{0} exponential(negate.767) log-plus-one.769 = f32[4]{0} log-plus-one(exponential.768) add.770 = f32[4]{0} add(maximum.755, log-plus-one.769) select.771 = f32[4]{0} select(and.764, add.765, add.770) slice.377 = f32[4,1,1]{2,1,0} slice(concatenate.42), slice={[0:4], [0:1], [1:2]} reshape.1092 = f32[4]{0} reshape(slice.377) add.844 = f32[4]{0} add(select.771, reshape.1092) slice.387 = f32[4,1,1]{2,1,0} slice(concatenate.42), slice={[0:4], [0:1], [0:1]} reshape.1094 = f32[4]{0} reshape(slice.387) add.809 = f32[4]{0} add(select.771, reshape.1094) subtract.846 = f32[4]{0} subtract(add.844, add.809) is-finite.847 = pred[4]{0} is-finite(subtract.846) not.848 = pred[4]{0} not(is-finite.847) abs.849 = f32[4]{0} abs(subtract.846) constant.850 = f32[] constant(inf) broadcast.851 = f32[4]{0} broadcast(constant.850), dimensions={} compare.852 = pred[4]{0} compare(abs.849, broadcast.851), direction=EQ not.853 = pred[4]{0} not(compare.852) and.854 = pred[4]{0} and(not.848, not.853) add.855 = f32[4]{0} add(add.844, add.809) maximum.845 = f32[4]{0} maximum(add.844, add.809) abs.856 = f32[4]{0} abs(subtract.846) negate.857 = f32[4]{0} negate(abs.856) exponential.858 = f32[4]{0} exponential(negate.857) log-plus-one.859 = f32[4]{0} log-plus-one(exponential.858) add.860 = f32[4]{0} add(maximum.845, log-plus-one.859) select.861 = f32[4]{0} select(and.854, add.855, add.860) constant.865 = f32[] constant(0) reduce.2 = f32[] reduce(select.861, constant.865), dimensions={0}, to_apply=primitive_computation_add.866 reduce.3 = f32[] reduce(select.861, constant.865), dimensions={0}, to_apply=primitive_computation_add.866 add.77 = f32[] add(reduce.2, reduce.3) constant.719 = f32[] constant(0.125) multiply = f32[] multiply(add.77, constant.719) ROOT tuple.873 = (f32[]) tuple(multiply) })") .value(); auto input_array = std::make_unique<Array2D<float>>(4, 2); input_array->FillUnique(1.0f); auto input = LiteralUtil::CreateR2FromArray2D<float>(*input_array); EXPECT_TRUE(RunAndCompare(std::move(module), {&input}, error_spec_)); } struct R2BinarySpec { int64_t lhs_dim0; int64_t lhs_dim1; int64_t rhs_dim0; int64_t rhs_dim1; int64_t concat_dimension; }; class ConcatR2BinaryTest : public ClientLibraryTestBase, public ::testing::WithParamInterface<R2BinarySpec> { }; TEST_P(ConcatR2BinaryTest, DoIt) { const R2BinarySpec& spec = GetParam(); Array2D<int32_t> lhs(spec.lhs_dim0, spec.lhs_dim1); lhs.FillUnique(); Array2D<int32_t> rhs(spec.rhs_dim0, spec.rhs_dim1); rhs.FillUnique(1000); XlaBuilder builder(TestName()); auto a0 = ConstantR2FromArray2D<int32_t>(&builder, lhs); auto a1 = ConstantR2FromArray2D<int32_t>(&builder, rhs); ConcatInDim(&builder, {a0, a1}, spec.concat_dimension); std::unique_ptr<Array2D<int32_t>> expected = ReferenceUtil::Concat2D(lhs, rhs, spec.concat_dimension); ComputeAndCompareR2<int32_t>(&builder, *expected, {}); } XLA_TEST_F(ConcatTest, ConcatOperandsOfSameOperand) { auto f32_scalar = ShapeUtil::MakeShape(xla::F32, {}); auto x_literal = LiteralUtil::CreateR0<float>(2.f); auto y_literal = LiteralUtil::CreateR0<float>(3.f); auto x_data = client_->TransferToServer(x_literal).value(); auto y_data = client_->TransferToServer(y_literal).value(); XlaBuilder builder(TestName()); auto x = Parameter(&builder, 0, f32_scalar, "x"); auto y = Parameter(&builder, 1, f32_scalar, "y"); auto mul = Mul(x, y); auto add1 = Add(mul, ConstantR1<float>(&builder, {1.f, 2.f})); auto add2 = Add(mul, ConstantR1<float>(&builder, {3.f, 4.f})); auto add3 = Add(mul, ConstantR1<float>(&builder, {5.f, 6.f})); ConcatInDim(&builder, {add1, add2, add3}, 0); ComputeAndCompareR1<float>(&builder, {7., 8., 9., 10., 11., 12.}, {x_data.get(), y_data.get()}, ErrorSpec(1e-4)); } XLA_TEST_F(ConcatTest, ConcatBroadcastArgument) { auto f32_scalar = ShapeUtil::MakeShape(xla::F32, {}); auto x_literal = LiteralUtil::CreateR1<float>({2.0f, 3.0f, 5.0f, 6.0f}); auto y_literal = LiteralUtil::CreateR0<float>(1.5f); auto z_literal = LiteralUtil::CreateR0<float>(5.5f); auto x_data = client_->TransferToServer(x_literal).value(); auto y_data = client_->TransferToServer(y_literal).value(); auto z_data = client_->TransferToServer(z_literal).value(); XlaBuilder builder(TestName()); auto x = Parameter(&builder, 0, x_literal.shape(), "x"); auto y = Parameter(&builder, 1, f32_scalar, "y"); auto z = Parameter(&builder, 2, f32_scalar, "z"); auto bcast = Broadcast(y, {5}); auto bcast2 = Broadcast(z, {3}); auto concat = ConcatInDim(&builder, {bcast, x}, /*dim
1,044
cpp
tensorflow/tensorflow
bhwc_to_phwc4
tensorflow/lite/delegates/gpu/gl/converters/bhwc_to_phwc4.cc
tensorflow/lite/delegates/gpu/gl/converters/bhwc_to_phwc4_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_CONVERTERS_BHWC_TO_PHWC4_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_GL_CONVERTERS_BHWC_TO_PHWC4_H_ #include <utility> #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/types.h" #include "tensorflow/lite/delegates/gpu/gl/command_queue.h" #include "tensorflow/lite/delegates/gpu/gl/gl_buffer.h" #include "tensorflow/lite/delegates/gpu/gl/gl_program.h" namespace tflite { namespace gpu { namespace gl { class ConverterBhwcToPhwc4 { public: ConverterBhwcToPhwc4() : program_(), workgroup_size_() {} static absl::Status Create(ConverterBhwcToPhwc4* converter); absl::Status Convert(const BHWC& shape, const GlBuffer& source, CommandQueue* command_queue , GlBuffer* destination); private: explicit ConverterBhwcToPhwc4(GlProgram program, const uint3& workgroup_size) : program_(std::move(program)), workgroup_size_(workgroup_size) {} GlProgram program_; uint3 workgroup_size_; }; } } } #endif #include "tensorflow/lite/delegates/gpu/gl/converters/bhwc_to_phwc4.h" #include <algorithm> #include <cstdint> #include <string> #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/types.h" #include "tensorflow/lite/delegates/gpu/common/util.h" #include "tensorflow/lite/delegates/gpu/gl/converters/util.h" #include "tensorflow/lite/delegates/gpu/gl/gl_program.h" #include "tensorflow/lite/delegates/gpu/gl/gl_shader.h" #include "tensorflow/lite/delegates/gpu/gl/variable.h" namespace tflite { namespace gpu { namespace gl { absl::Status ConverterBhwcToPhwc4::Create(ConverterBhwcToPhwc4* converter) { uint3 workgroup_size = uint3(4, 4, 4); std::string shader_source = GetShaderHeader(workgroup_size) + R"( layout(std430) buffer; precision highp float; layout(binding = 0) readonly buffer B0 { float elements[]; } input_data; layout(binding = 1) writeonly buffer B1 { vec4 elements[]; } output_data; uniform ivec4 sizes_; void main() { ivec3 gid = ivec3(gl_GlobalInvocationID.xyz); if (gid.x >= sizes_.x || gid.y >= sizes_.y || gid.z >= sizes_.z) { return; } vec4 v = vec4(0); int dst_channel = gid.z * 4; int index = (gid.y * sizes_.x + gid.x) * sizes_.w + dst_channel; for (int i = 0; i < 4; ++i, ++index, ++dst_channel) { if (dst_channel >= sizes_.w) break; v[i] = input_data.elements[index]; } output_data.elements[(gid.z * sizes_.y + gid.y) * sizes_.x + gid.x] = v; })"; GlShader shader; RETURN_IF_ERROR( GlShader::CompileShader(GL_COMPUTE_SHADER, shader_source, &shader)); GlProgram program; RETURN_IF_ERROR(GlProgram::CreateWithShader(shader, &program)); *converter = ConverterBhwcToPhwc4(std::move(program), workgroup_size); return absl::OkStatus(); } absl::Status ConverterBhwcToPhwc4::Convert(const BHWC& shape, const GlBuffer& source, CommandQueue* command_queue, GlBuffer* destination) { if (source.bytes_size() < BytesForBHWC(shape)) { return absl::InvalidArgumentError( "BhwcToPhwc4: Input data size does not match expected size."); } if (destination->bytes_size() < BytesForPHWC4(shape)) { return absl::InvalidArgumentError( "BhwcToPhwc4: output data size does not match expected size."); } if (shape.b != 1) { return absl::UnimplementedError( "BhwcToPhwc4: Batch size is not equal to 1."); } uint3 workload = uint3(shape.w, shape.h, DivideRoundUp(shape.c, 4)); uint3 num_workgroups = DivideRoundUp(workload, workgroup_size_); RETURN_IF_ERROR(program_.SetParameter( {"sizes_", int4(static_cast<int32_t>(workload.x), static_cast<int32_t>(workload.y), static_cast<int32_t>(workload.z), static_cast<int32_t>(shape.c))})); RETURN_IF_ERROR(source.BindToIndex(0)); RETURN_IF_ERROR(destination->BindToIndex(1)); if (command_queue) { return command_queue->Dispatch(program_, num_workgroups); } return program_.Dispatch(num_workgroups); } } } }
#include "tensorflow/lite/delegates/gpu/gl/converters/bhwc_to_phwc4.h" #include <algorithm> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/types/span.h" #include "tensorflow/lite/delegates/gpu/common/convert.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/gl/egl_environment.h" #include "tensorflow/lite/delegates/gpu/gl/gl_buffer.h" #include "tensorflow/lite/delegates/gpu/gl/portable_gl31.h" namespace tflite { namespace gpu { namespace gl { namespace { inline std::vector<float> GenerateFloats(float multiplier, int size) { std::vector<float> v(size); for (int i = 0; i < size; ++i) { v[i] = multiplier * i * (i % 2 == 0 ? -1 : 1); } return v; } absl::Status RunTest(const BHWC& shape) { std::vector<float> input = GenerateFloats(0.01, shape.DimensionsProduct()); std::vector<float> output(GetElementsSizeForPHWC4(shape), 0); RETURN_IF_ERROR( ConvertToPHWC4(absl::MakeConstSpan(input.data(), input.size()), shape, absl::MakeSpan(output.data(), output.size()))); std::unique_ptr<EglEnvironment> env; RETURN_IF_ERROR(EglEnvironment::NewEglEnvironment(&env)); GlBuffer input_buffer; RETURN_IF_ERROR(CreateReadOnlyShaderStorageBuffer( absl::MakeConstSpan(input.data(), input.size()), &input_buffer)); GlBuffer output_buffer; RETURN_IF_ERROR(CreateReadWriteShaderStorageBuffer<float>( GetElementsSizeForPHWC4(shape), &output_buffer)); ConverterBhwcToPhwc4 converter; RETURN_IF_ERROR(ConverterBhwcToPhwc4::Create(&converter)); RETURN_IF_ERROR( converter.Convert(shape, input_buffer, nullptr, &output_buffer)); std::vector<float> converted_output(output.size(), 0); RETURN_IF_ERROR(output_buffer.Read( absl::MakeSpan(converted_output.data(), converted_output.size()))); if (output != converted_output) { return absl::InternalError("Outputs don't match"); } return absl::OkStatus(); } TEST(HwcToPhwc4, Smoke) { for (int32_t h : {1, 2, 3, 7, 20}) { for (int32_t w : {1, 2, 4, 5, 11}) { for (int32_t c : {1, 2, 4, 5, 8, 9}) { BHWC shape(1, h, w, c); EXPECT_TRUE(RunTest(shape).ok()) << shape.h << " " << shape.w << " " << shape.c; } } } } } } } }
1,045
cpp
tensorflow/tensorflow
phwc4_to_bhwc
tensorflow/lite/delegates/gpu/gl/converters/phwc4_to_bhwc.cc
tensorflow/lite/delegates/gpu/gl/converters/phwc4_to_bhwc_test.cc
#ifndef TENSORFLOW_LITE_DELEGATES_GPU_GL_CONVERTERS_PHWC4_TO_BHWC_H_ #define TENSORFLOW_LITE_DELEGATES_GPU_GL_CONVERTERS_PHWC4_TO_BHWC_H_ #include <utility> #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/types.h" #include "tensorflow/lite/delegates/gpu/gl/command_queue.h" #include "tensorflow/lite/delegates/gpu/gl/gl_buffer.h" #include "tensorflow/lite/delegates/gpu/gl/gl_program.h" namespace tflite { namespace gpu { namespace gl { class ConverterPhwc4ToBhwc { public: ConverterPhwc4ToBhwc() : program_(), workgroup_size_() {} static absl::Status Create(ConverterPhwc4ToBhwc* converter); absl::Status Convert(const BHWC& shape, const GlBuffer& source, CommandQueue* command_queue , GlBuffer* destination); private: explicit ConverterPhwc4ToBhwc(GlProgram program, const uint3& workgroup_size) : program_(std::move(program)), workgroup_size_(workgroup_size) {} GlProgram program_; uint3 workgroup_size_; }; } } } #endif #include "tensorflow/lite/delegates/gpu/gl/converters/phwc4_to_bhwc.h" #include <algorithm> #include <cstdint> #include <string> #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/common/types.h" #include "tensorflow/lite/delegates/gpu/common/util.h" #include "tensorflow/lite/delegates/gpu/gl/converters/util.h" #include "tensorflow/lite/delegates/gpu/gl/gl_program.h" #include "tensorflow/lite/delegates/gpu/gl/gl_shader.h" #include "tensorflow/lite/delegates/gpu/gl/variable.h" namespace tflite { namespace gpu { namespace gl { absl::Status ConverterPhwc4ToBhwc::Create(ConverterPhwc4ToBhwc* converter) { uint3 workgroup_size = uint3(4, 4, 4); std::string shader_source = GetShaderHeader(workgroup_size) + R"( layout(std430) buffer; precision highp float; layout(binding = 0) readonly buffer B0 { vec4 elements[]; } input_data; layout(binding = 1) writeonly buffer B1 { float elements[]; } output_data; uniform ivec4 sizes_; void main() { ivec3 gid = ivec3(gl_GlobalInvocationID.xyz); if (gid.x >= sizes_.x || gid.y >= sizes_.y || gid.z >= sizes_.z) { return; } output_data.elements[(gid.y * sizes_.x + gid.x) * sizes_.z + gid.z] = input_data.elements[(gid.z / 4 * sizes_.y + gid.y) * sizes_.x + gid.x][gid.z % 4]; })"; GlShader shader; RETURN_IF_ERROR( GlShader::CompileShader(GL_COMPUTE_SHADER, shader_source, &shader)); GlProgram program; RETURN_IF_ERROR(GlProgram::CreateWithShader(shader, &program)); *converter = ConverterPhwc4ToBhwc(std::move(program), workgroup_size); return absl::OkStatus(); } absl::Status ConverterPhwc4ToBhwc::Convert(const BHWC& shape, const GlBuffer& source, CommandQueue* command_queue, GlBuffer* destination) { if (source.bytes_size() < BytesForPHWC4(shape)) { return absl::InvalidArgumentError( "Phwc4ToBhwc: Input data size does not match expected size."); } if (destination->bytes_size() < BytesForBHWC(shape)) { return absl::InvalidArgumentError( "Phwc4ToBhwc: output data size does not match expected size."); } if (shape.b != 1) { return absl::UnimplementedError( "Phwc4ToBhwc: Batch size is not equal to 1."); } uint3 workload = uint3(shape.w, shape.h, shape.c); uint3 num_workgroups = DivideRoundUp(workload, workgroup_size_); RETURN_IF_ERROR(program_.SetParameter( {"sizes_", int4(static_cast<int32_t>(workload.x), static_cast<int32_t>(workload.y), static_cast<int32_t>(workload.z), 0)})); RETURN_IF_ERROR(source.BindToIndex(0)); RETURN_IF_ERROR(destination->BindToIndex(1)); if (command_queue) { return command_queue->Dispatch(program_, num_workgroups); } return program_.Dispatch(num_workgroups); } } } }
#include "tensorflow/lite/delegates/gpu/gl/converters/phwc4_to_bhwc.h" #include <algorithm> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/types/span.h" #include "tensorflow/lite/delegates/gpu/common/convert.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" #include "tensorflow/lite/delegates/gpu/common/status.h" #include "tensorflow/lite/delegates/gpu/gl/egl_environment.h" #include "tensorflow/lite/delegates/gpu/gl/gl_buffer.h" #include "tensorflow/lite/delegates/gpu/gl/portable_gl31.h" namespace tflite { namespace gpu { namespace gl { namespace { inline std::vector<float> GenerateFloats(float multiplier, int size) { std::vector<float> v(size); for (int i = 0; i < size; ++i) { v[i] = multiplier * i * (i % 2 == 0 ? -1 : 1); } return v; } absl::Status RunTest(const BHWC& shape) { std::vector<float> input = GenerateFloats(0.01, GetElementsSizeForPHWC4(shape)); std::vector<float> output(shape.DimensionsProduct(), 0); RETURN_IF_ERROR( ConvertFromPHWC4(absl::MakeConstSpan(input.data(), input.size()), shape, absl::MakeSpan(output.data(), output.size()))); std::unique_ptr<EglEnvironment> env; RETURN_IF_ERROR(EglEnvironment::NewEglEnvironment(&env)); GlBuffer input_buffer; RETURN_IF_ERROR(CreateReadOnlyShaderStorageBuffer( absl::MakeConstSpan(input.data(), input.size()), &input_buffer)); GlBuffer output_buffer; RETURN_IF_ERROR(CreateReadWriteShaderStorageBuffer<float>( shape.DimensionsProduct(), &output_buffer)); ConverterPhwc4ToBhwc converter; RETURN_IF_ERROR(ConverterPhwc4ToBhwc::Create(&converter)); RETURN_IF_ERROR( converter.Convert(shape, input_buffer, nullptr, &output_buffer)); std::vector<float> converted_output(output.size(), 0); RETURN_IF_ERROR(output_buffer.Read( absl::MakeSpan(converted_output.data(), converted_output.size()))); if (output != converted_output) { return absl::InternalError("Outputs don't match"); } return absl::OkStatus(); } TEST(Phwc4ToHwc, Smoke) { for (int32_t h : {1, 2, 3, 7, 20}) { for (int32_t w : {1, 2, 4, 5, 11}) { for (int32_t c : {1, 2, 4, 5, 8, 9}) { BHWC shape(1, h, w, c); EXPECT_TRUE(RunTest(shape).ok()) << shape.h << " " << shape.w << " " << shape.c; } } } } } } } }
1,046
cpp
tensorflow/tensorflow
dtensor_location
tensorflow/dtensor/mlir/dtensor_location.cc
tensorflow/dtensor/mlir/dtensor_location_test.cc
#ifndef TENSORFLOW_DTENSOR_MLIR_DTENSOR_LOCATION_H_ #define TENSORFLOW_DTENSOR_MLIR_DTENSOR_LOCATION_H_ #include <string> #include "mlir/IR/Location.h" #include "mlir/IR/Operation.h" #include "mlir/Support/LLVM.h" namespace tensorflow { namespace dtensor { mlir::Location DTensorLocation(mlir::Location loc, llvm::StringRef file, unsigned int line, llvm::StringRef name = ""); mlir::Location DTensorLocation(mlir::Operation* op, llvm::StringRef file, unsigned int line, llvm::StringRef name = ""); std::string DTensorLocationToString(mlir::Location loc); } } #define DT_LOC(loc) \ ::tensorflow::dtensor::DTensorLocation(loc, __FILE__, __LINE__) #define DT_LOC2(loc, name) \ ::tensorflow::dtensor::DTensorLocation(loc, __FILE__, __LINE__, name) #endif #include "tensorflow/dtensor/mlir/dtensor_location.h" #include <algorithm> #include <queue> #include <string> #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/FormatVariadic.h" #include "llvm/Support/raw_ostream.h" #include "mlir/Support/LLVM.h" #include "tensorflow/compiler/mlir/utils/name_utils.h" namespace tensorflow { namespace dtensor { namespace { std::string CreateLocalLocationString(mlir::FileLineColLoc loc) { return llvm::formatv(">> {0}:{1}:{2}", loc.getFilename(), loc.getLine(), loc.getColumn()) .str(); } } mlir::Location DTensorLocation(mlir::Location loc, llvm::StringRef file, unsigned int line, llvm::StringRef name) { auto split = file.rsplit("/"); if (!split.second.empty()) file = split.second; mlir::Location callee_loc = mlir::FileLineColLoc::get(loc.getContext(), file, line, 0); std::string new_name = GetNameFromLoc(loc); if (!new_name.empty()) { if (!name.empty()) { new_name = llvm::formatv("{0}/{1}", new_name, name).str(); } callee_loc = mlir::NameLoc::get( mlir::StringAttr::get(loc.getContext(), new_name), callee_loc); } return mlir::CallSiteLoc::get(callee_loc, loc); } mlir::Location DTensorLocation(mlir::Operation* op, llvm::StringRef file, unsigned int line, llvm::StringRef name) { return DTensorLocation(op->getLoc(), file, line, name); } std::string DTensorLocationToString(mlir::Location loc) { llvm::SmallVector<std::string, 4> stack; std::queue<mlir::Location> queue; queue.push(loc); while (!queue.empty()) { mlir::Location& front = queue.front(); if (auto name_loc = mlir::dyn_cast<mlir::NameLoc>(front)) { queue.push(name_loc.getChildLoc()); } else if (auto callsite_loc = mlir::dyn_cast<mlir::CallSiteLoc>(front)) { queue.push(callsite_loc.getCallee()); queue.push(callsite_loc.getCaller()); } else if (auto line_loc = mlir::dyn_cast<mlir::FileLineColLoc>(front)) { stack.push_back(CreateLocalLocationString(line_loc)); } queue.pop(); } std::reverse(stack.begin(), stack.end()); std::string s; llvm::raw_string_ostream ss(s); llvm::interleave(stack, ss, "\n"); return ss.str(); } } }
#include "tensorflow/dtensor/mlir/dtensor_location.h" #include "mlir/IR/Location.h" #include "mlir/IR/MLIRContext.h" #include "mlir/Support/LLVM.h" #include "tensorflow/compiler/mlir/utils/name_utils.h" #include "tensorflow/core/platform/test.h" namespace { void CheckFileLineColLocation(mlir::Location loc, unsigned line, unsigned column) { ASSERT_TRUE(mlir::isa<mlir::FileLineColLoc>(loc)); auto file_line_col_loc = mlir::cast<mlir::FileLineColLoc>(loc); EXPECT_EQ(file_line_col_loc.getFilename(), "test.cc"); EXPECT_EQ(file_line_col_loc.getLine(), line); EXPECT_EQ(file_line_col_loc.getColumn(), column); } TEST(DTensorLocationTest, HandlesEmptyLocation) { mlir::MLIRContext ctx; mlir::Location loc = mlir::FileLineColLoc::get(&ctx, "test.cc", 10, 20); loc = tensorflow::dtensor::DTensorLocation(loc, "test.cc", 21); ASSERT_TRUE(mlir::isa<mlir::CallSiteLoc>(loc)); auto callsite_loc = mlir::cast<mlir::CallSiteLoc>(loc); CheckFileLineColLocation(callsite_loc.getCallee(), 21, 0); CheckFileLineColLocation(callsite_loc.getCaller(), 10, 20); constexpr char stack[] = R"stack(>> test.cc:10:20 >> test.cc:21:0)stack"; EXPECT_EQ(tensorflow::dtensor::DTensorLocationToString(loc), stack); } TEST(DTensorLocationTest, HandlesMultipleCalls) { mlir::MLIRContext ctx; mlir::Location test_loc = mlir::FileLineColLoc::get(&ctx, "test.cc", 10, 20); test_loc = tensorflow::dtensor::DTensorLocation(test_loc, "test.cc", 21); test_loc = tensorflow::dtensor::DTensorLocation(test_loc, "test.cc", 22); test_loc = tensorflow::dtensor::DTensorLocation(test_loc, "test.cc", 23); test_loc = tensorflow::dtensor::DTensorLocation(test_loc, "test.cc", 24); auto verify_loc = test_loc; for (int i = 0; i < 4; ++i) { ASSERT_TRUE(mlir::isa<mlir::CallSiteLoc>(verify_loc)); auto callsite_loc = mlir::cast<mlir::CallSiteLoc>(verify_loc); auto callee_loc = callsite_loc.getCallee(); CheckFileLineColLocation(callee_loc, 24 - i, 0); verify_loc = callsite_loc.getCaller(); } CheckFileLineColLocation(verify_loc, 10, 20); constexpr char stack[] = R"stack(>> test.cc:10:20 >> test.cc:21:0 >> test.cc:22:0 >> test.cc:23:0 >> test.cc:24:0)stack"; EXPECT_EQ(tensorflow::dtensor::DTensorLocationToString(test_loc), stack); } TEST(DTensorLocationTest, HandlesNameLoc) { mlir::MLIRContext ctx; mlir::Location test_loc = mlir::NameLoc::get(mlir::StringAttr::get(&ctx, "op@"), mlir::FileLineColLoc::get(&ctx, "test.cc", 10, 20)); test_loc = tensorflow::dtensor::DTensorLocation(test_loc, "test.cc", 21); ASSERT_EQ(mlir::GetNameFromLoc(test_loc), "op"); ASSERT_TRUE(mlir::isa<mlir::CallSiteLoc>(test_loc)); auto callsite_loc = mlir::cast<mlir::CallSiteLoc>(test_loc); mlir::Location caller_loc = mlir::cast<mlir::CallSiteLoc>(test_loc).getCaller(); ASSERT_TRUE(mlir::isa<mlir::NameLoc>(caller_loc)); CheckFileLineColLocation(mlir::cast<mlir::NameLoc>(caller_loc).getChildLoc(), 10, 20); mlir::Location callee_loc = callsite_loc.getCallee(); ASSERT_TRUE(mlir::isa<mlir::NameLoc>(callee_loc)); CheckFileLineColLocation(mlir::cast<mlir::NameLoc>(callee_loc).getChildLoc(), 21, 0); constexpr char stack[] = R"stack(>> test.cc:10:20 >> test.cc:21:0)stack"; EXPECT_EQ(tensorflow::dtensor::DTensorLocationToString(test_loc), stack); } TEST(DTensorLocationTest, HandlesNameLocWithName) { mlir::MLIRContext ctx; mlir::Location test_loc = mlir::NameLoc::get(mlir::StringAttr::get(&ctx, "op@"), mlir::FileLineColLoc::get(&ctx, "test.cc", 10, 20)); test_loc = tensorflow::dtensor::DTensorLocation(test_loc, "test.cc", 21, "nested"); EXPECT_EQ(mlir::GetNameFromLoc(test_loc), "op/nested"); constexpr char stack[] = R"stack(>> test.cc:10:20 >> test.cc:21:0)stack"; EXPECT_EQ(tensorflow::dtensor::DTensorLocationToString(test_loc), stack); } }
1,047
cpp
tensorflow/tensorflow
spmd_expander
tensorflow/dtensor/mlir/spmd_expander.cc
tensorflow/dtensor/tests/spmd_expander_test.cc
#ifndef TENSORFLOW_DTENSOR_MLIR_SPMD_EXPANDER_H_ #define TENSORFLOW_DTENSOR_MLIR_SPMD_EXPANDER_H_ #include <memory> #include <string> #include "absl/types/optional.h" #include "mlir/IR/Builders.h" #include "mlir/IR/Operation.h" #include "mlir/IR/UseDefLists.h" #include "tensorflow/core/framework/registration/registration.h" #include "tensorflow/dtensor/cc/dstatus.h" #include "tensorflow/dtensor/cc/tensor_layout.h" #include "tensorflow/dtensor/mlir/spmd_expander_common.h" namespace tensorflow { namespace dtensor { class SPMDExpanderBase { public: virtual ~SPMDExpanderBase() = default; virtual StatusOr<mlir::Operation*> ExpandOp(mlir::Operation* op) = 0; virtual StatusOr<llvm::DenseMap<int, Layout>> ComputeLayoutForward( mlir::Operation* op, const llvm::DenseMap<int, Layout>& input_layouts); virtual StatusOr<llvm::DenseMap<int, Layout>> ComputeLayoutForward( mlir::Operation* op, const llvm::DenseMap<int, Layout>& input_layouts, const llvm::DenseMap<int, Layout>& output_layouts); virtual StatusOr<llvm::DenseMap<int, Layout>> ComputeLayoutBackward( mlir::Operation* op, const llvm::DenseMap<int, Layout>& output_layouts); virtual StatusOr<llvm::DenseMap<int, Layout>> ComputeLayoutBackward( mlir::Operation* op, const llvm::DenseMap<int, Layout>& input_layouts, const llvm::DenseMap<int, Layout>& output_layouts); Status ExpandOpAndSetLayout(mlir::Operation* op, mlir::Operation** output); }; Status RunSPMDExpansion(mlir::Operation* op, mlir::Operation** output); class SPMDExpanderRegistry { public: ~SPMDExpanderRegistry() = default; static SPMDExpanderRegistry* Global(); bool IsOpSupported(const std::string& full_op_name) { return GetPropagateFnForFullOpName(full_op_name) != nullptr; } SPMDExpanderBase* GetPropagateFnForOp(mlir::Operation* op); SPMDExpanderBase* GetPropagateFnForFullOpName( const std::string& full_op_name); InitOnStartupMarker RegisterPropagateFn( std::string opName, std::unique_ptr<SPMDExpanderBase> prop); private: absl::flat_hash_map<std::string, std::unique_ptr<SPMDExpanderBase>> op_to_propagate_fn_map_; }; #define REGISTER_SPMD(name, op, prop, ...) \ static ::tensorflow::InitOnStartupMarker const spmd_##name = \ InitOnStartupMarker{} \ << dtensor::SPMDExpanderRegistry::Global()->RegisterPropagateFn( \ mlir::op::getOperationName().str(), \ std::make_unique<prop>(__VA_ARGS__)) #define REGISTER_SPMD_BY_OP_NAME(expander_name, op_name, prop, ...) \ static ::tensorflow::InitOnStartupMarker const spmd_##expander_name = \ InitOnStartupMarker{} \ << dtensor::SPMDExpanderRegistry::Global()->RegisterPropagateFn( \ op_name, std::make_unique<prop>(__VA_ARGS__)) } } #endif #include "tensorflow/dtensor/mlir/spmd_expander.h" #include <climits> #include <cstdint> #include <iterator> #include <memory> #include <optional> #include <string> #include "absl/container/flat_hash_map.h" #include "absl/strings/str_cat.h" #include "absl/types/optional.h" #include "llvm/ADT/DenseMap.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/Operation.h" #include "mlir/IR/OperationSupport.h" #include "mlir/Support/LLVM.h" #include "tensorflow/compiler/mlir/tensorflow/ir/tf_device.h" #include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/dtensor/cc/constants.h" #include "tensorflow/dtensor/cc/dstatus.h" #include "tensorflow/dtensor/cc/dtensor_utils.h" #include "tensorflow/dtensor/cc/tensor_layout.h" #include "tensorflow/dtensor/mlir/collectives.h" #include "tensorflow/dtensor/mlir/expansions/replicated_spmd_expander.h" #include "tensorflow/dtensor/mlir/ir/tf_dtensor.h" #include "tensorflow/dtensor/mlir/layout_parsing.h" #include "tensorflow/dtensor/mlir/op_utils.h" #include "tensorflow/dtensor/mlir/shape_utils.h" #include "tensorflow/dtensor/mlir/spmd_expander_common.h" #include "tensorflow/dtensor/proto/layout.pb.h" namespace tensorflow { namespace dtensor { namespace { Status AdjustPartedLayout(const llvm::DenseMap<int, Layout>& input_layouts, llvm::DenseMap<int, Layout>* computed_layouts) { bool input_has_parted_layout = false; for (const auto& input_layout : input_layouts) { if (input_layout.second.type() == Layout::LayoutType::kParted) { input_has_parted_layout = true; break; } } if (input_has_parted_layout) { for (auto& computed_layout : *computed_layouts) { TF_ASSIGN_OR_RETURN(Layout parted, computed_layout.second.ToParted()); computed_layout.getSecond() = parted; } } return absl::OkStatus(); } bool SkipExpansionForPartedLayout(mlir::Operation* op) { if (llvm::isa<mlir::func::ReturnOp, mlir::tf_device::ReturnOp>(op)) { return false; } auto status_or_input_layouts = ExtractRequiredLayoutFromOperands(op); if (!status_or_input_layouts.ok()) { return false; } bool operand_uses_parted_layout = false; for (const auto& layout : status_or_input_layouts.value()) { if (layout.type() == Layout::LayoutType::kParted) { operand_uses_parted_layout = true; break; } } return operand_uses_parted_layout; } } SPMDExpanderRegistry* SPMDExpanderRegistry::Global() { static SPMDExpanderRegistry* registry = new SPMDExpanderRegistry(); return registry; } SPMDExpanderBase* SPMDExpanderRegistry::GetPropagateFnForFullOpName( const std::string& full_op_name) { auto key = full_op_name; auto fn = op_to_propagate_fn_map_.find(key); if (fn == op_to_propagate_fn_map_.end()) { if (EnableReplicatedSpmdAsDefault(key)) { LOG(WARNING) << full_op_name << " is defaulting to ReplicatedOpSPMDExpander. This " << " has performance implications as all inputs and outputs " << " will be replicated if they are not already. Please file a " << " feature request to TF DTensor to implement an efficient " << " SPMD for this operation."; RegisterPropagateFn(key, std::make_unique<ReplicatedOpSPMDExpander>( true)); return op_to_propagate_fn_map_.find(key)->second.get(); } else { return nullptr; } } return fn->second.get(); } SPMDExpanderBase* SPMDExpanderRegistry::GetPropagateFnForOp( mlir::Operation* op) { return GetPropagateFnForFullOpName(OpName(op)); } InitOnStartupMarker SPMDExpanderRegistry::RegisterPropagateFn( std::string opName, std::unique_ptr<SPMDExpanderBase> prop) { CHECK(op_to_propagate_fn_map_ .insert_or_assign(opName, std::move(prop)) .second); return {}; } Status SPMDExpanderBase::ExpandOpAndSetLayout(mlir::Operation* op, mlir::Operation** output) { TF_ASSIGN_OR_RETURN(std::vector<std::optional<Layout>> computed_layout, ExtractLayoutFromOp(op)); if (computed_layout.empty() && op->getNumResults() != 0) { return errors::InvalidArgument( absl::StrCat("No attached layout found for op : ", OpName(op), " This might be due to an error in layout propagation.") .c_str()); } TF_ASSIGN_OR_RETURN(const Mesh& mesh, ExtractDeviceMeshEnclosingCluster(op)); bool skip_expansion_for_parted_layout = SkipExpansionForPartedLayout(op); if (mesh.IsSingleDevice() || mesh.use_xla_spmd() || skip_expansion_for_parted_layout) { if (skip_expansion_for_parted_layout) { *output = InferSPMDExpandedLocalShape(op); } else { *output = op; } SetLayoutOnOp(*output, absl::Span<std::optional<Layout>>( computed_layout.data(), computed_layout.size())); return absl::OkStatus(); } llvm::SmallVector<llvm::SmallVector<int64_t, 4>, 4> global_output_shapes; global_output_shapes.reserve(op->getNumResults()); for (auto output_value : op->getResults()) { auto maybe_ranked = mlir::dyn_cast<mlir::RankedTensorType>(output_value.getType()); if (llvm::isa<mlir::TF::RestoreV2Op, mlir::TF::DTensorRestoreV2Op>(op) && (!maybe_ranked || !maybe_ranked.hasStaticShape())) continue; TF_ASSIGN_OR_RETURN(auto global_shape, ExtractGlobalOutputShape(output_value)); global_output_shapes.emplace_back(llvm::SmallVector<int64_t, 4>{ global_shape.begin(), global_shape.end()}); } TF_ASSIGN_OR_RETURN(*output, this->ExpandOp(op)); SetLayoutOnOp(*output, absl::Span<std::optional<Layout>>( computed_layout.data(), computed_layout.size())); for (const auto& output_layout_and_index : llvm::enumerate(llvm::zip((*output)->getResults(), computed_layout))) { const int index = output_layout_and_index.index(); const auto& output_and_layout = output_layout_and_index.value(); auto output_value = std::get<0>(output_and_layout); auto local_expanded_shape_or_status = GetShapeOfValue(output_value); if (!local_expanded_shape_or_status.ok()) continue; const auto local_expanded_shape = local_expanded_shape_or_status.value(); const auto& layout = std::get<1>(output_and_layout); const auto expected_global_shape = layout->GlobalShapeFromLocalShape(local_expanded_shape); for (const auto& expanded_and_true_global_shape : llvm::zip(global_output_shapes[index], expected_global_shape)) { const auto expanded_shape = std::get<0>(expanded_and_true_global_shape); const auto expected_shape = std::get<1>(expanded_and_true_global_shape); if (expanded_shape <= 0 || expected_shape <= 0) continue; if (expanded_shape != expected_shape) { return errors::Internal( "SPMD expansion resulted in op output inconsistent with the " "provided layout. Expected shape: <", absl::StrJoin(expected_global_shape, ","), "> got shape: <", absl::StrJoin(global_output_shapes[index], ","), ">"); } } } return absl::OkStatus(); } StatusOr<llvm::DenseMap<int, Layout>> SPMDExpanderBase::ComputeLayoutForward( mlir::Operation* op, const llvm::DenseMap<int, Layout>& input_layouts) { return errors::Unimplemented( "ComputeLayoutForward API must be implemented via the subclass."); } StatusOr<llvm::DenseMap<int, Layout>> SPMDExpanderBase::ComputeLayoutForward( mlir::Operation* op, const llvm::DenseMap<int, Layout>& input_layouts, const llvm::DenseMap<int, Layout>& output_layouts) { TF_ASSIGN_OR_RETURN(const Mesh& mesh, ExtractDeviceMeshEnclosingCluster(op)); if (mesh.IsSingleDevice()) { TF_ASSIGN_OR_RETURN( Layout layout, Layout::GetLayout(Layout::LayoutType::kSingleDevice, {}, mesh)); auto layouts = llvm::DenseMap<int, Layout>{}; for (int i = 0; i < op->getNumResults(); ++i) { layouts.insert({i, layout}); } return layouts; } TF_ASSIGN_OR_RETURN(auto layouts, ComputeLayoutForward(op, input_layouts)); TF_RETURN_IF_ERROR(AdjustPartedLayout(input_layouts, &layouts)); return layouts; } StatusOr<llvm::DenseMap<int, Layout>> SPMDExpanderBase::ComputeLayoutBackward( mlir::Operation* op, const llvm::DenseMap<int, Layout>& output_layouts) { return errors::Unimplemented( "ComputeLayoutBackward API must be implemented via the subclass."); } StatusOr<llvm::DenseMap<int, Layout>> SPMDExpanderBase::ComputeLayoutBackward( mlir::Operation* op, const llvm::DenseMap<int, Layout>& input_layouts, const llvm::DenseMap<int, Layout>& output_layouts) { TF_ASSIGN_OR_RETURN(const Mesh& mesh, ExtractDeviceMeshEnclosingCluster(op)); if (mesh.IsSingleDevice()) { TF_ASSIGN_OR_RETURN( Layout layout, Layout::GetLayout(Layout::LayoutType::kSingleDevice, {}, mesh)); auto layouts = llvm::DenseMap<int, Layout>{}; for (int i = 0; i < op->getNumOperands(); ++i) { layouts.insert({i, layout}); } return layouts; } return ComputeLayoutBackward(op, output_layouts); } Status RunSPMDExpansion(mlir::Operation* op, mlir::Operation** output) { SPMDExpanderBase* expander = SPMDExpanderRegistry::Global()->GetPropagateFnForOp(op); if (expander != nullptr) { return expander->ExpandOpAndSetLayout(op, output); } else { VLOG(1) << "No expansion found for " << OpName(op) << "\n"; *output = op; } return absl::OkStatus(); } } }
#include "tensorflow/dtensor/mlir/spmd_expander.h" #include <memory> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "llvm/ADT/DenseMap.h" #include "mlir/IR/Operation.h" #include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/dtensor/cc/dstatus.h" #include "tensorflow/dtensor/cc/tensor_layout.h" namespace tensorflow { namespace dtensor { namespace { using ::testing::IsNull; using ::testing::NotNull; class DummyExpander : public SPMDExpanderBase { StatusOr<mlir::Operation*> ExpandOp(mlir::Operation* op) override { return errors::Unimplemented(""); } StatusOr<llvm::DenseMap<int, Layout>> ComputeLayoutForward( mlir::Operation* op, const llvm::DenseMap<int, Layout>& input_layouts) override { return errors::Unimplemented(""); } StatusOr<llvm::DenseMap<int, Layout>> ComputeLayoutBackward( mlir::Operation* op, const llvm::DenseMap<int, Layout>& output_layouts) override { return errors::Unimplemented(""); } }; class SPMDExpanderRegistryTest : public ::testing::Test { public: SPMDExpanderRegistryTest() { registry_.RegisterPropagateFn(mlir::TF::AddOp::getOperationName().str(), std::make_unique<DummyExpander>()); } protected: SPMDExpanderRegistry registry_; }; TEST_F(SPMDExpanderRegistryTest, LookupFromOpName) { EXPECT_THAT(registry_.GetPropagateFnForFullOpName("tf.Add"), NotNull()); EXPECT_THAT(registry_.GetPropagateFnForFullOpName("Unknown"), IsNull()); } } } }
1,048
cpp
tensorflow/tensorflow
constant_folding
tensorflow/core/grappler/optimizers/constant_folding.cc
tensorflow/core/grappler/optimizers/constant_folding_test.cc
#ifndef TENSORFLOW_CORE_COMMON_RUNTIME_CONSTANT_FOLDING_H_ #define TENSORFLOW_CORE_COMMON_RUNTIME_CONSTANT_FOLDING_H_ #include "tensorflow/core/common_runtime/device.h" #include "tensorflow/core/framework/function.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/graph/graph.h" #include "tensorflow/core/platform/env.h" namespace tensorflow { using ConstantFoldNameGenerator = std::function<string(Graph* graph, string old_name)>; struct ConstantFoldingOptions { std::function<bool(const Node*)> consider = nullptr; const std::unordered_map<string, std::vector<PartialTensorShape>>* shape_map = nullptr; int64_t max_constant_size_in_bytes = 10 * 1024 * 1024; ConstantFoldNameGenerator generate_new_name = nullptr; }; Status ConstantFold(const ConstantFoldingOptions& opts, FunctionLibraryRuntime* function_library, Env* env, const Device* partition_device, Graph* graph, bool* was_mutated); } #endif #include "tensorflow/core/common_runtime/constant_folding.h" #include <algorithm> #include <atomic> #include <set> #include <string> #include <unordered_map> #include <vector> #include "absl/container/flat_hash_set.h" #include "tensorflow/core/common_runtime/device_factory.h" #include "tensorflow/core/common_runtime/executor.h" #include "tensorflow/core/common_runtime/function_utils.h" #include "tensorflow/core/common_runtime/graph_runner.h" #include "tensorflow/core/common_runtime/memory_types.h" #include "tensorflow/core/common_runtime/rendezvous_mgr.h" #include "tensorflow/core/framework/log_memory.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/graph/algorithm.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/graph/subgraph.h" #include "tensorflow/core/lib/core/threadpool.h" #include "tensorflow/core/lib/gtl/cleanup.h" #include "tensorflow/core/lib/gtl/flatset.h" #include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/platform/denormal.h" #include "tensorflow/core/platform/setround.h" #include "tensorflow/core/public/session_options.h" namespace tensorflow { namespace { const char kScopedAllocatorAttrName[] = "_scoped_allocator"; static absl::flat_hash_set<std::string>* kBlockList = new absl::flat_hash_set<std::string>({ "StatelessRandomGetKeyCounter", }); static absl::flat_hash_set<std::string>* kAllowList = new absl::flat_hash_set<std::string>({ "Cast", "Const", "Identity", "IdentityN", "Less", "NoOp", "StopGradient", }); bool IsShapeOp(const Node* n) { const auto& ts = n->type_string(); return ts == "Shape" || ts == "ShapeN" || ts == "Rank" || ts == "Size"; } bool ReadPartialShapesFromShapeMap( const Node* n, const std::unordered_map<string, std::vector<PartialTensorShape>>* shape_map, std::vector<PartialTensorShape>* input_shapes) { CHECK(shape_map != nullptr); input_shapes->resize(n->num_inputs()); for (const Edge* in : n->in_edges()) { if (in->IsControlEdge()) continue; const auto known_shape_iter = shape_map->find(in->src()->name()); if (known_shape_iter == shape_map->end()) { return false; } const auto& known_shape = known_shape_iter->second; CHECK_GT(known_shape.size(), in->src_output()) << known_shape_iter->first; DCHECK_GE(in->dst_input(), 0); DCHECK_LT(in->dst_input(), input_shapes->size()); (*input_shapes)[in->dst_input()] = known_shape[in->src_output()]; } return true; } bool MaybeReplaceShapeOrShapeNOp( const Node* n, const std::vector<PartialTensorShape>& input_shapes, std::unordered_map<const Node*, std::vector<Tensor>>* shape_replacement_map) { std::vector<Tensor> defined_shape; for (const auto& shape : input_shapes) { if (!shape.IsFullyDefined()) { return false; } const int rank = shape.dims(); DataType op_type = n->output_type(0); Tensor t(op_type, TensorShape({rank})); if (op_type == DT_INT64) { auto vec = t.vec<int64_t>(); for (int i = 0; i < rank; ++i) { vec(i) = shape.dim_size(i); } } else { CHECK(op_type == DT_INT32); auto vec = t.vec<int32>(); for (int i = 0; i < rank; ++i) { if (shape.dim_size(i) > INT_MAX) { VLOG(1) << "Node " << n->name() << " has input shape dimension " << i << " of " << shape.dim_size(i) << " but type INT32 " << " so not replacing as constant: this will trigger a " "runtime error later."; return false; } vec(i) = static_cast<int32>(shape.dim_size(i)); } } defined_shape.push_back(t); } shape_replacement_map->insert({n, defined_shape}); return true; } bool MaybeReplaceRankOp(const Node* n, const std::vector<PartialTensorShape>& input_shapes, std::unordered_map<const Node*, std::vector<Tensor>>* shape_replacement_map) { CHECK_EQ(input_shapes.size(), 1); if (input_shapes[0].unknown_rank()) { return false; } Tensor t(DT_INT32, TensorShape({})); t.scalar<int32>()() = input_shapes[0].dims(); shape_replacement_map->insert({n, {t}}); return true; } bool MaybeReplaceSizeOp(const Node* n, const std::vector<PartialTensorShape>& input_shapes, std::unordered_map<const Node*, std::vector<Tensor>>* shape_replacement_map) { CHECK_EQ(input_shapes.size(), 1); if (!input_shapes[0].IsFullyDefined()) { return false; } DataType op_type = n->output_type(0); Tensor t(op_type, TensorShape({})); int64_t size = input_shapes[0].num_elements(); if (op_type == DT_INT64) { t.scalar<int64_t>()() = size; } else { CHECK(op_type == DT_INT32); if (size > INT_MAX) { VLOG(1) << "Node " << n->name() << " has input shape size " << size << " but type INT32 " << " so not replacing as constant: this will trigger a runtime " "error later."; return false; } t.scalar<int32>()() = static_cast<int32>(size); } shape_replacement_map->insert({n, {t}}); return true; } bool MaybeReplaceShapeOp( const Node* n, const std::unordered_map<string, std::vector<PartialTensorShape>>* shape_map, std::unordered_map<const Node*, std::vector<Tensor>>* shape_replacement_map) { if (shape_map == nullptr || !IsShapeOp(n)) { return false; } std::vector<PartialTensorShape> input_shapes; if (!ReadPartialShapesFromShapeMap(n, shape_map, &input_shapes)) { return false; } const auto& ts = n->type_string(); if (ts == "Shape" || ts == "ShapeN") { if (!MaybeReplaceShapeOrShapeNOp(n, input_shapes, shape_replacement_map)) { return false; } } else if (ts == "Rank") { if (!MaybeReplaceRankOp(n, input_shapes, shape_replacement_map)) { return false; } } else { CHECK_EQ(ts, "Size"); if (!MaybeReplaceSizeOp(n, input_shapes, shape_replacement_map)) { return false; } } return true; } bool IsConstantFoldable( const Node* n, const std::unordered_map<string, std::vector<PartialTensorShape>>* shape_map, const std::function<bool(const Node*)>& consider, int64_t max_constant_size_in_bytes, std::unordered_map<const Node*, std::vector<Tensor>>* shape_replacement_map) { if (n->IsConstant()) { return n->output_type(0) != DT_RESOURCE; } if (MaybeReplaceShapeOp(n, shape_map, shape_replacement_map)) { return true; } if (n->op_def().is_stateful()) { return false; } if (consider && !consider(n)) { return false; } if (shape_map != nullptr) { auto shape_it = shape_map->find(n->name()); if (shape_it != shape_map->end()) { for (int64_t i = 0; i < shape_it->second.size(); ++i) { const auto& out_shape = shape_it->second[i]; if (!out_shape.IsFullyDefined() && !kAllowList->contains(n->type_string())) { return false; } if (out_shape.num_elements() * DataTypeSize(n->output_type(i)) > max_constant_size_in_bytes) { return false; } } } } if (n->IsControlFlow() || n->IsSend() || n->IsRecv()) { return false; } if (n->IsGetSessionHandle() || n->IsGetSessionTensor() || n->IsDeleteSessionTensor()) { return false; } if (n->IsSource()) { return false; } if (n->IsSink()) { return false; } if (n->IsFakeParam()) { return false; } if (!KernelDefAvailable(DeviceType(DEVICE_CPU), n->def())) { return false; } if (n->attrs().Find(kScopedAllocatorAttrName) != nullptr) { VLOG(2) << "Skip node [" << n->DebugString() << "] for constant folding due to scoped allocator"; return false; } if (kBlockList->contains(n->type_string())) { VLOG(2) << "Skip node [" << n->DebugString() << "] for constant folding, it is in constant folding block list"; return false; } return true; } void ConsiderConstantFoldableNode( Node* n, const ConstantFoldingOptions& opts, std::vector<Node*>* nodes, std::unordered_map<const Node*, gtl::FlatSet<Node*>>* constant_control_deps, std::unordered_map<const Node*, std::vector<Tensor>>* shape_replacement_map, bool* internal_node_inserted) { if (!IsConstantFoldable(n, opts.shape_map, opts.consider, opts.max_constant_size_in_bytes, shape_replacement_map)) { return; } bool all_parents_constant = true; for (const Edge* in : n->in_edges()) { if (!in->IsControlEdge() && constant_control_deps->count(in->src()) == 0) { all_parents_constant = false; break; } } if (all_parents_constant || shape_replacement_map->count(n) != 0) { gtl::FlatSet<Node*>& control_deps = (*constant_control_deps)[n]; for (const Edge* e : n->in_edges()) { if (constant_control_deps->count(e->src()) == 0) { if (!e->src()->IsSource()) { control_deps.insert(e->src()); } } else { const gtl::FlatSet<Node*>& parent_deps = (*constant_control_deps)[e->src()]; control_deps.insert(parent_deps.begin(), parent_deps.end()); } } nodes->push_back(n); if (!n->IsConstant()) { *internal_node_inserted = true; } } } void FindConstantFoldableNodes( const Graph* graph, const ConstantFoldingOptions& opts, std::vector<Node*>* nodes, std::unordered_map<const Node*, gtl::FlatSet<Node*>>* constant_control_deps, std::unordered_map<const Node*, std::vector<Tensor>>* shape_replacement_map) { bool internal_node_inserted = false; ReverseDFS( *graph, nullptr, [nodes, constant_control_deps, shape_replacement_map, &internal_node_inserted, &opts](Node* n) { ConsiderConstantFoldableNode(n, opts, nodes, constant_control_deps, shape_replacement_map, &internal_node_inserted); }, NodeComparatorName()); if (!internal_node_inserted) { nodes->clear(); constant_control_deps->clear(); } } typedef std::pair<Node*, int> NodeAndOutput; void AddNodeToConstantGraph( Node* n, std::unordered_map<Node*, std::vector<Node*>>* node_map, Graph* constant_graph) { std::vector<Node*>& added = (*node_map)[n]; added.push_back(constant_graph->CopyNode(n)); for (const Edge* in_edge : n->in_edges()) { if (!in_edge->IsControlEdge()) { Node* in = in_edge->src(); auto it = node_map->find(in); CHECK(it != node_map->end()) << n->DebugString() << " <-" << in->DebugString(); if (it->second.size() == 1) { constant_graph->AddEdge(it->second[0], in_edge->src_output(), added[0], in_edge->dst_input()); } else { constant_graph->AddEdge(it->second[in_edge->src_output()], 0, added[0], in_edge->dst_input()); } } } } void AddShapeNodeToConstantGraph( Node* n, const std::unordered_map<const Node*, std::vector<Tensor>>& shape_replacement_map, std::unordered_map<Node*, std::vector<Node*>>* node_map, const ConstantFoldNameGenerator& generate_new_name, Graph* constant_graph) { std::vector<Node*>& added = (*node_map)[n]; const string& node_name = n->name(); for (const Tensor& t : shape_replacement_map.at(n)) { auto builder = NodeDefBuilder(generate_new_name(constant_graph, node_name), "Const") .Attr("dtype", t.dtype()) .Attr("value", t); NodeDef def; CHECK(builder.Finalize(&def).ok()); Node* constant_node; CHECK(NodeBuilder(builder).Finalize(constant_graph, &constant_node).ok()); added.push_back(constant_node); } } Graph* GetConstantGraph( const Graph* orig_graph, const std::vector<Node*>& nodes, const std::unordered_map<const Node*, std::vector<Tensor>>& shape_replacement_map, std::map<NodeAndOutput, NodeAndOutput>* tensors_to_fetch, const ConstantFoldNameGenerator& generate_new_name) { Graph* constant_graph = new Graph(orig_graph->op_registry()); std::unordered_map<Node*, std::vector<Node*>> node_map; node_map[orig_graph->source_node()] = {constant_graph->source_node()}; node_map[orig_graph->sink_node()] = {constant_graph->sink_node()}; for (Node* n : nodes) { if (shape_replacement_map.count(n) == 0) { AddNodeToConstantGraph(n, &node_map, constant_graph); } else { AddShapeNodeToConstantGraph(n, shape_replacement_map, &node_map, generate_new_name, constant_graph); } } for (auto const& added_nodes : node_map) { for (const Edge* out_edge : added_nodes.first->out_edges()) { if (node_map.count(out_edge->dst()) == 0) { if (out_edge->IsControlEdge()) continue; if (added_nodes.second.size() == 1) { tensors_to_fetch->insert( {{added_nodes.second[0], out_edge->src_output()}, {added_nodes.first, out_edge->src_output()}}); } else { tensors_to_fetch->insert( {{added_nodes.second[out_edge->src_output()], 0}, {added_nodes.first, out_edge->src_output()}}); } } } } return constant_graph; } bool ReplaceTensorWithConstant( Graph* graph, const Device* partition_device, NodeAndOutput tensor, const Tensor& constant, const gtl::FlatSet<Node*>& control_deps, int64_t max_constant_size_in_bytes, const ConstantFoldNameGenerator& generate_new_name) { if (tensor.first->IsConstant()) { return false; } DeviceType device_type = partition_device ? DeviceType{partition_device->device_type()} : DEVICE_CPU; if (partition_device && device_type != DEVICE_CPU) { MemoryTypeVector input_mvec; MemoryTypeVector output_mvec; if (!MemoryTypesForNode(graph->op_registry(), device_type, tensor.first->def(), &input_mvec, &output_mvec) .ok()) { return false; } for (int i = 0; i < output_mvec.size(); i++) { MemoryType memory_type = output_mvec[i]; bool is_int32 = tensor.first->output_type(i) == DT_INT32; if ((memory_type == HOST_MEMORY && !is_int32) || (memory_type == DEVICE_MEMORY && is_int32)) { return false; } } } if (constant.TotalBytes() > max_constant_size_in_bytes) { return false; } Node* n = tensor.first; std::vector<const Edge*> edges_to_remove; for (const Edge* out_edge : n->out_edges()) { if (out_edge->src_output() == tensor.second) { edges_to_remove.push_back(out_edge); } } const string& node_name = n->name(); Node* constant_node; auto builder = NodeDefBuilder(generate_new_name(graph, node_name), "Const") .Attr("dtype", constant.dtype()) .Attr("value", constant); if (partition_device) { builder.Device(partition_device->name()); } NodeDef def; if (!builder.Finalize(&def).ok()) { return false; } const KernelDef* kdef; if (!FindKernelDef(device_type, def, &kdef, nullptr).ok()) { return false; } VLOG(1) << "Replacing " << tensor.first->name() << " :: " << tensor.second << " with a constant"; if (!NodeBuilder(builder).Finalize(graph, &constant_node).ok()) { return false; } for (auto edge : edges_to_remove) { graph->AddEdge(constant_node, 0, edge->dst(), edge->dst_input()); graph->RemoveEdge(edge); } if (control_deps.empty()) { graph->AddControlEdge(graph->source_node(), constant_node); } else { for (Node* node : control_deps) { graph->AddControlEdge(node, constant_node); } } if (partition_device) { constant_node->set_assigned_device_name(partition_device->name()); } return true; } } Status ConstantFold(const ConstantFoldingOptions& opts, FunctionLibraryRuntime* function_library, Env* env, const Device* partition_device, Graph* graph, bool* was_mutated) { port::ScopedFlushDenormal flush; port::ScopedSetRound round(FE_TONEAREST); DumpGraph("Before", graph); ConstantFoldNameGenerator generate_new_name = opts.generate_new_name; std::atomic_int_fast64_t constant_unique_id{0}; if (generate_new_name == nullptr) { generate_new_name = [&constant_unique_id](Graph* graph, string old_name) { return strings::StrCat(graph->NewName(old_name), "__cf__", constant_unique_id.fetch_add(1)); }; } std::vector<Node*> constant_foldable_nodes; std::unordered_map<const Node*, gtl::FlatSet<Node*>> constant_control_deps; std::unordered_map<const Node*, std::vector<Tensor>> shape_replacement_map; FindConstantFoldableNodes(graph, opts, &constant_foldable_nodes, &constant_control_deps, &shape_replacement_map); if (constant_foldable_nodes.empty()) { VLOG(1) << "No constant foldable nodes found"; *was_mutated = false; return absl::OkStatus(); } std::map<NodeAndOutput, NodeAndOutput> tensors_to_fetch; std::unique_ptr<Graph> constant_graph( GetConstantGraph(graph, constant_foldable_nodes, shape_replacement_map, &tensors_to_fetch, generate_new_name)); DumpGraph("Constant graph", constant_graph.get()); if (tensors_to_fetch.empty()) { VLOG(1) << "No constant nodes found that feed into the original graph."; *was_mutated = false; return absl::OkStatus(); } VLOG(1) << "Constant foldable " << constant_graph->num_node_ids() << " : " << graph->num_node_ids(); std::vector<string> tensors_to_fetch_names; std::vector<NodeAndOutput> tensors_to_replace; std::vector<std::pair<NodeAndOutput, NodeAndOutput>> tensors_to_fetch_sorted( tensors_to_fetch.begin(), tensors_to_fetch.end()); std::sort(tensors_to_fetch_sorted.begin(), tensors_to_fetch_sorted.end(), [](const std::pair<NodeAndOutput, NodeAndOutput>& n1, const std::pair<NodeAndOutput, NodeAndOutput>& n2) { return std::tie(n1.first.first->name(), n1.first.second) < std::tie(n2.first.first->name(), n2.first.second); }); for (auto n : tensors_to_fetch_sorted) { tensors_to_fetch_names.push_back( strings::StrCat(n.first.first->name(), ":", n.first.second)); tensors_to_replace.push_back(n.second); } auto graph_runner = std::unique_ptr<GraphRunner>(new GraphRunner(env)); std::vector<Tensor> outputs; auto delete_tensors = gtl::MakeCleanup([&graph_runner, &outputs] { outputs.clear(); graph_runner.reset(nullptr); }); Status s = graph_runner->Run(constant_graph.get(), function_library, {} , tensors_to_fetch_names, &outputs); if (!s.ok()) { VLOG(1) << "Could not fetch constants: " << s; *was_mutated = false; return s; } int32_t num_nodes_replaced = 0; for (size_t c = 0; c < outputs.size(); ++c) { const gtl::FlatSet<Node*>& control_deps = constant_control_deps[tensors_to_replace[c].first]; if (ReplaceTensorWithConstant( graph, partition_device, tensors_to_replace[c], outputs[c], control_deps, opts.max_constant_size_in_bytes, generate_new_name)) { ++num_nodes_replaced; } } DumpGraph("After", graph); *was_mutated = (num_nodes_replaced > 0); return absl::OkStatus(); } }
#include "tensorflow/core/common_runtime/constant_folding.h" #include <map> #include <string> #include <unordered_map> #include <vector> #include "tensorflow/cc/ops/array_ops_internal.h" #include "tensorflow/cc/ops/nn_ops.h" #include "tensorflow/cc/ops/sendrecv_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/core/common_runtime/device.h" #include "tensorflow/core/common_runtime/device_factory.h" #include "tensorflow/core/common_runtime/device_mgr.h" #include "tensorflow/core/framework/device_attributes.pb.h" #include "tensorflow/core/framework/function_testlib.h" #include "tensorflow/core/framework/node_def_util.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/core/threadpool.h" #include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/platform/null_file_system.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/public/session_options.h" namespace tensorflow { namespace { class ConstantFoldingTest : public ::testing::Test { protected: template <typename T> void ExpectNodeClose(const Node* n, gtl::ArraySlice<T> values, TensorShape shape) { EXPECT_TRUE(n->IsConstant()); const TensorProto* tensor_proto; TF_EXPECT_OK(GetNodeAttr(n->attrs(), "value", &tensor_proto)); DataType dtype; TF_EXPECT_OK(GetNodeAttr(n->attrs(), "dtype", &dtype)); Tensor t(dtype); EXPECT_TRUE(t.FromProto(*tensor_proto)); test::ExpectClose(t, test::AsTensor(values, shape)); } template <typename T> void ExpectNodeEqual(const Node* n, gtl::ArraySlice<T> values, TensorShape shape) { EXPECT_TRUE(n->IsConstant()); const TensorProto* tensor_proto; TF_EXPECT_OK(GetNodeAttr(n->attrs(), "value", &tensor_proto)); DataType dtype; TF_EXPECT_OK(GetNodeAttr(n->attrs(), "dtype", &dtype)); Tensor t(dtype); EXPECT_TRUE(t.FromProto(*tensor_proto)); test::ExpectTensorEqual<T>(t, test::AsTensor(values, shape)); } void BuildSimpleGraph(Scope* scope) { Scope& s = *scope; auto a = ops::Const<float>(s, {1.0, 0.0, 0.0, 1.0}, {2, 2}); auto b = ops::Const<float>(s, {1.0, 2.0, 3.0, 4.0}, {2, 2}); auto c = ops::Const<float>(s, {0.0, 1.0, 1.0, 0.0}, {2, 2}); auto m1 = ops::MatMul(s, a, b); auto s1 = ops::_Send(s.WithOpName("s1"), m1, "m1", "sender", 0, "receiver"); auto m2 = ops::MatMul(s.WithOpName("m2"), b, c); auto s2 = ops::_Send(s.WithOpName("s2"), m2, "m2", "sender", 0, "receiver"); } }; class FakeDevice : public Device { private: explicit FakeDevice(const DeviceAttributes& device_attributes) : Device(nullptr, device_attributes) {} public: Status Sync() override { return errors::Unimplemented("FakeDevice::Sync()"); } Allocator* GetAllocator(AllocatorAttributes attr) override { return nullptr; } static std::unique_ptr<Device> Make(const string& name, const string& type) { DeviceAttributes device_attributes; device_attributes.set_name(name); device_attributes.set_device_type(DeviceType(type).type()); return std::unique_ptr<Device>(new FakeDevice(device_attributes)); } }; TEST_F(ConstantFoldingTest, Basic) { Scope s = Scope::NewRootScope(); BuildSimpleGraph(&s); Graph g(OpRegistry::Global()); TF_ASSERT_OK(s.ToGraph(&g)); bool was_mutated; TF_ASSERT_OK(ConstantFold(ConstantFoldingOptions{}, nullptr, Env::Default(), nullptr, &g, &was_mutated)); EXPECT_TRUE(was_mutated); std::unordered_map<string, Node*> index = g.BuildNodeNameIndex(); Node* s1 = index.at("s1"); Node* s2 = index.at("s2"); EXPECT_EQ(1, s1->num_inputs()); ExpectNodeClose<float>(*(s1->in_nodes().begin()), {1.0, 2.0, 3.0, 4.0}, {2, 2}); EXPECT_EQ(1, s2->num_inputs()); ExpectNodeClose<float>(*(s2->in_nodes().begin()), {2.0, 1.0, 4.0, 3.0}, {2, 2}); } TEST_F(ConstantFoldingTest, DeterministicFolding) { auto build_graph_and_constant_folding = [](Graph& g, bool swap) -> Status { Scope s = Scope::NewRootScope(); auto a = ops::Const<float>(s, {1.0}, {}); auto b = ops::Const<float>(s, {2.0}, {}); if (swap) { auto add1 = ops::Add(s.WithOpName("add1"), a, b); auto add2 = ops::Add(s.WithOpName("add2"), a, b); auto s1 = ops::_Send(s.WithOpName("s1"), add1, "add1", "sender", 0, "receiver"); auto s2 = ops::_Send(s.WithOpName("s2"), add2, "add2", "sender", 0, "receiver"); } else { auto add2 = ops::Add(s.WithOpName("add2"), a, b); auto add1 = ops::Add(s.WithOpName("add1"), a, b); auto s1 = ops::_Send(s.WithOpName("s1"), add1, "add1", "sender", 0, "receiver"); auto s2 = ops::_Send(s.WithOpName("s2"), add2, "add2", "sender", 0, "receiver"); } TF_CHECK_OK(s.ToGraph(&g)); bool was_mutated; int64_t unique_id = 0; auto generate_new_name = [&unique_id](Graph* graph, string old_name) { return strings::StrCat(graph->NewName(old_name), "__cf__", unique_id++); }; ConstantFoldingOptions opt{}; opt.generate_new_name = generate_new_name; TF_CHECK_OK( ConstantFold(opt, nullptr, Env::Default(), nullptr, &g, &was_mutated)); return absl::OkStatus(); }; Graph g1(OpRegistry::Global()); TF_ASSERT_OK(build_graph_and_constant_folding(g1, false)); Graph g2(OpRegistry::Global()); TF_ASSERT_OK(build_graph_and_constant_folding(g2, true)); EXPECT_EQ(g1.num_nodes(), g2.num_nodes()); auto index = g2.BuildNodeNameIndex(); for (int64_t i = 0; i < g1.num_nodes(); ++i) { Node* n1 = g1.FindNodeId(i); EXPECT_GT(index.count(n1->name()), 0); } } TEST_F(ConstantFoldingTest, ConsiderFunction) { Scope s = Scope::NewRootScope(); BuildSimpleGraph(&s); Graph g(OpRegistry::Global()); TF_ASSERT_OK(s.ToGraph(&g)); ConstantFoldingOptions opts; opts.consider = [](const Node* n) { return "m2" != n->name(); }; bool was_mutated; TF_ASSERT_OK( ConstantFold(opts, nullptr, Env::Default(), nullptr, &g, &was_mutated)); EXPECT_TRUE(was_mutated); std::unordered_map<string, Node*> index = g.BuildNodeNameIndex(); Node* s1 = index.at("s1"); Node* s2 = index.at("s2"); Node* m2 = index.at("m2"); EXPECT_EQ(1, s1->num_inputs()); ExpectNodeClose<float>(*(s1->in_nodes().begin()), {1.0, 2.0, 3.0, 4.0}, {2, 2}); EXPECT_EQ(1, s2->num_inputs()); EXPECT_EQ(*(s2->in_nodes().begin()), m2); } TEST_F(ConstantFoldingTest, TestNoReplaceAnotherConstant) { Graph g(OpRegistry::Global()); { Scope s = Scope::NewRootScope(); BuildSimpleGraph(&s); auto d = ops::Const<float>(s.WithOpName("d"), {1.0, 0.0, 0.0, 1.0}, {2, 2}); auto s3 = ops::_Send(s.WithOpName("s3"), d, "d", "sender", 0, "receiver"); TF_ASSERT_OK(s.ToGraph(&g)); } bool was_mutated; TF_ASSERT_OK(ConstantFold(ConstantFoldingOptions{}, nullptr, Env::Default(), nullptr, &g, &was_mutated)); EXPECT_TRUE(was_mutated); std::unordered_map<string, Node*> index = g.BuildNodeNameIndex(); Node* d = index.at("d"); Node* s3 = index.at("s3"); EXPECT_EQ(1, s3->num_inputs()); EXPECT_EQ(*(s3->in_nodes().begin()), d); } TEST_F(ConstantFoldingTest, TwoOutputs) { Graph g(OpRegistry::Global()); { Scope s = Scope::NewRootScope(); auto s0 = ops::Const<int>(s, {1}, {1}); auto s1 = ops::Const<int>(s, {2, 2}, {2}); auto b = ops::internal::BroadcastGradientArgs(s, s0, s1); auto b0 = ops::_Send(s.WithOpName("b0"), ops::Identity(s, b.r0), "b0", "sender", 0, "receiver"); auto b1 = ops::_Send(s.WithOpName("b1"), ops::Identity(s, b.r1), "b1", "sender", 0, "receiver"); TF_ASSERT_OK(s.ToGraph(&g)); } bool was_mutated; TF_ASSERT_OK(ConstantFold(ConstantFoldingOptions{}, nullptr, Env::Default(), nullptr, &g, &was_mutated)); EXPECT_TRUE(was_mutated); std::unordered_map<string, Node*> index = g.BuildNodeNameIndex(); Node* b0 = index.at("b0"); Node* b1 = index.at("b1"); EXPECT_EQ(1, b0->num_inputs()); ExpectNodeEqual<int>(*(b0->in_nodes().begin()), {0, 1}, {2}); EXPECT_EQ(1, b1->num_inputs()); ExpectNodeEqual<int>(*(b1->in_nodes().begin()), {}, {0}); } TEST_F(ConstantFoldingTest, TwoOutputsFoldOneOutput) { Graph g(OpRegistry::Global()); { Scope s = Scope::NewRootScope(); auto s0 = ops::Const<int>(s, {1}, {1}); auto s1 = ops::Const<int>(s, {2, 2}, {2}); auto b = ops::internal::BroadcastGradientArgs(s, s0, s1); auto b0 = ops::_Send(s.WithOpName("b0"), ops::Identity(s, b.r0), "b0", "sender", 0, "receiver"); auto b1_ident = ops::Identity(s.WithOpName("b1_ident"), b.r1); auto b1 = ops::_Send(s.WithOpName("b1"), b1_ident, "b1", "sender", 0, "receiver"); TF_ASSERT_OK(s.ToGraph(&g)); } ConstantFoldingOptions opts; opts.consider = [](const Node* n) { return "b1_ident" != n->name(); }; bool was_mutated; TF_ASSERT_OK( ConstantFold(opts, nullptr, Env::Default(), nullptr, &g, &was_mutated)); EXPECT_TRUE(was_mutated); std::unordered_map<string, Node*> index = g.BuildNodeNameIndex(); Node* b0 = index.at("b0"); Node* b1 = index.at("b1"); Node* b1_ident = index.at("b1_ident"); ASSERT_EQ(1, b0->num_inputs()); ExpectNodeEqual<int>(*(b0->in_nodes().begin()), {0, 1}, {2}); ASSERT_EQ(1, b1->num_inputs()); EXPECT_EQ(*(b1->in_nodes().begin()), b1_ident); ASSERT_EQ(1, b1_ident->num_inputs()); ExpectNodeEqual<int>(*(b1_ident->in_nodes().begin()), {}, {0}); } TEST_F(ConstantFoldingTest, TestNoReplaceLargeConstant) { Graph g(OpRegistry::Global()); { Scope s = Scope::NewRootScope(); auto s0 = ops::Const<int>(s, 0, {5 * 1024 * 256}); auto s1 = ops::Const<int>(s, 0, {5 * 1024 * 256 + 1}); auto concat_dim = ops::Const<int>(s, 0); auto concat = ops::Concat(s, {s0, s1}, concat_dim); auto concat_send = ops::_Send(s.WithOpName("concat_send"), concat, "concat_send", "sender", 0, "receiver"); TF_ASSERT_OK(s.ToGraph(&g)); } bool was_mutated; TF_EXPECT_OK(ConstantFold(ConstantFoldingOptions{}, nullptr, Env::Default(), nullptr, &g, &was_mutated)); EXPECT_FALSE(was_mutated); ConstantFoldingOptions opt; opt.max_constant_size_in_bytes = 10 * 1024 * 1024 + 4; TF_EXPECT_OK( ConstantFold(opt, nullptr, Env::Default(), nullptr, &g, &was_mutated)); EXPECT_TRUE(was_mutated); } TEST_F(ConstantFoldingTest, TestNoReplaceFunctionCall) { FunctionDefLibrary flib; *flib.add_function() = test::function::XTimesTwo(); FunctionLibraryDefinition flib_def(OpRegistry::Global(), flib); Graph g(flib_def); { Scope s = Scope::NewRootScope(); auto c = ops::Const<int32>(s.WithOpName("c"), {1}, {1}); TF_EXPECT_OK(s.graph()->AddFunctionLibrary(flib)); NodeDef def; TF_ASSERT_OK( NodeDefBuilder("times_two", "XTimesTwo", s.graph()->op_registry()) .Input(c.name(), 0, DT_INT32) .Finalize(&def)); Status status; Node* times_two = s.graph()->AddNode(def, &status); TF_ASSERT_OK(status); TF_ASSERT_OK(s.DoShapeInference(times_two)); s.graph()->AddEdge(c.node(), 0, times_two, 0); auto times_two_send = ops::_Send(s.WithOpName("times_two_send"), Output(times_two), "times_two_send", "sender", 0, "receiver"); TF_ASSERT_OK(s.ToGraph(&g)); } bool was_mutated; TF_EXPECT_OK(ConstantFold(ConstantFoldingOptions{}, nullptr, Env::Default(), nullptr, &g, &was_mutated)); EXPECT_FALSE(was_mutated); } REGISTER_OP("ConstantFoldingTestOp") .Input("a: int64") .Output("b: int64") .SetShapeFn(shape_inference::UnknownShape); TEST_F(ConstantFoldingTest, TestNoReplaceNonCPUOp) { Graph g(OpRegistry::Global()); { Scope s = Scope::NewRootScope(); auto aconst = ops::Const<int64_t>(s, 0, {5}); NodeDef def; TF_ASSERT_OK(NodeDefBuilder("testop", "ConstantFoldingTestOp") .Input(aconst.name(), 0, DT_INT64) .Finalize(&def)); Status status; Node* non_cpu = s.graph()->AddNode(def, &status); TF_ASSERT_OK(status); TF_ASSERT_OK(s.DoShapeInference(non_cpu)); auto non_cpu_send = ops::_Send(s.WithOpName("non_cpu_send"), Output(non_cpu), "non_cpu_send", "sender", 0, "receiver"); TF_ASSERT_OK(s.ToGraph(&g)); } bool was_mutated; TF_EXPECT_OK(ConstantFold(ConstantFoldingOptions{}, nullptr, Env::Default(), nullptr, &g, &was_mutated)); EXPECT_FALSE(was_mutated); } TEST_F(ConstantFoldingTest, ControlDependencies) { Graph g(OpRegistry::Global()); { Scope s = Scope::NewRootScope(); auto c0 = ops::Const<int>(s, 1); auto recv1 = ops::_Recv(s.WithOpName("recv1"), DT_FLOAT, "recv1", "sender", 0, "receiver"); auto c1 = ops::Const<int>(s.WithControlDependencies(recv1), 2); auto recv2 = ops::_Recv(s.WithOpName("recv2"), DT_FLOAT, "recv2", "sender", 0, "receiver"); auto c2 = ops::Const<int>(s.WithControlDependencies(recv2), 3); auto add = ops::Add(s.WithControlDependencies(c2), c0, c1); auto send = ops::_Send(s.WithOpName("send"), add, "send", "sender", 0, "receiver"); TF_ASSERT_OK(s.ToGraph(&g)); } bool was_mutated; TF_EXPECT_OK(ConstantFold(ConstantFoldingOptions{}, nullptr, Env::Default(), nullptr, &g, &was_mutated)); EXPECT_TRUE(was_mutated); std::unordered_map<string, Node*> index = g.BuildNodeNameIndex(); Node* recv1 = index.at("recv1"); Node* recv2 = index.at("recv2"); Node* send = index.at("send"); ASSERT_EQ(1, send->num_inputs()); Node* p = *(send->in_nodes().begin()); ExpectNodeEqual<int>(p, {3}, {}); ASSERT_EQ(2, p->in_edges().size()); for (const Edge* e : p->in_edges()) { EXPECT_TRUE(e->IsControlEdge()); EXPECT_TRUE(e->src() == recv1 || e->src() == recv2) << e->src()->name(); } } TEST_F(ConstantFoldingTest, SimpleShapeKnown) { Graph g(OpRegistry::Global()); { Scope s = Scope::NewRootScope(); Output recv0 = ops::_Recv(s.WithOpName("recv0"), DT_FLOAT, "recv0", "sender", 0, "receiver"); auto shape = ops::Shape(s.WithOpName("shape"), recv0); Output recv1 = ops::_Recv(s.WithOpName("recv1"), DT_FLOAT, "recv1", "sender", 0, "receiver"); auto shape_n = ops::ShapeN(s.WithOpName("shape_n"), {recv0, recv1}); auto rank = ops::Rank(s.WithOpName("rank"), recv0); auto size = ops::Size(s.WithOpName("size"), recv1); auto recv2 = ops::_Recv(s.WithOpName("recv2"), DT_FLOAT, "recv2", "sender", 0, "receiver"); auto c = ops::Const<int>(s.WithControlDependencies(recv2), 3); auto add0 = ops::Add(s.WithControlDependencies(c), rank, size); auto add1 = ops::Add(s, shape, shape_n[0]); auto add2 = ops::Add(s, shape_n[1], shape_n[1]); auto send0 = ops::_Send(s.WithOpName("send0"), add0, "send0", "sender", 0, "receiver"); auto send1 = ops::_Send(s.WithOpName("send1"), add1, "send1", "sender", 0, "receiver"); auto send2 = ops::_Send(s.WithOpName("send2"), add2, "send2", "sender", 0, "receiver"); TF_ASSERT_OK(s.ToGraph(&g)); } std::unordered_map<string, Node*> orig_index = g.BuildNodeNameIndex(); Node* recv0 = orig_index.at("recv0"); Node* recv1 = orig_index.at("recv1"); PartialTensorShape ps0; int r0_dims[] = {1, 2}; TF_EXPECT_OK(PartialTensorShape::MakePartialShape(r0_dims, 2, &ps0)); PartialTensorShape ps1; int r1_dims[] = {2, 3, 4}; TF_EXPECT_OK(PartialTensorShape::MakePartialShape<int>(r1_dims, 3, &ps1)); std::unordered_map<string, std::vector<PartialTensorShape>> map; map[recv0->name()].push_back(ps0); map[recv1->name()].push_back(ps1); ConstantFoldingOptions opts; opts.shape_map = &map; bool was_mutated; TF_EXPECT_OK( ConstantFold(opts, nullptr, Env::Default(), nullptr, &g, &was_mutated)); EXPECT_TRUE(was_mutated); std::unordered_map<string, Node*> index = g.BuildNodeNameIndex(); Node* recv2 = index.at("recv2"); Node* send0 = index.at("send0"); Node* send1 = index.at("send1"); Node* send2 = index.at("send2"); ASSERT_EQ(1, send0->num_inputs()); Node* cf0 = *(send0->in_nodes().begin()); ExpectNodeEqual<int>(cf0, {26}, {}); ASSERT_EQ(1, send1->num_inputs()); Node* cf1 = *(send1->in_nodes().begin()); ExpectNodeEqual<int>(cf1, {2, 4}, {2}); ASSERT_EQ(1, send2->num_inputs()); Node* cf2 = *(send2->in_nodes().begin()); ExpectNodeEqual<int>(cf2, {4, 6, 8}, {3}); ASSERT_EQ(3, cf0->in_edges().size()); for (const Edge* e : cf0->in_edges()) { EXPECT_TRUE(e->IsControlEdge()); EXPECT_TRUE(e->src() == recv0 || e->src() == recv1 || e->src() == recv2) << e->src()->name(); } ASSERT_EQ(2, cf1->in_edges().size()); for (const Edge* e : cf1->in_edges()) { EXPECT_TRUE(e->IsControlEdge()); EXPECT_TRUE(e->src() == recv0 || e->src() == recv1) << e->src()->name(); } ASSERT_EQ(2, cf2->in_edges().size()); for (const Edge* e : cf2->in_edges()) { EXPECT_TRUE(e->IsControlEdge()); EXPECT_TRUE(e->src() == recv0 || e->src() == recv1) << e->src()->name(); } } TEST_F(ConstantFoldingTest, PartialShape) { Graph g(OpRegistry::Global()); { Scope s = Scope::NewRootScope(); Output recv0 = ops::_Recv(s.WithOpName("recv0"), DT_FLOAT, "recv0", "sender", 0, "receiver"); Output recv1 = ops::_Recv(s.WithOpName("recv1"), DT_FLOAT, "recv1", "sender", 0, "receiver"); auto shape = ops::Shape(s.WithOpName("shape"), recv0); auto rank0 = ops::Rank(s.WithOpName("rank0"), recv0); auto rank1 = ops::Rank(s.WithOpName("rank1"), recv1); auto size = ops::Size(s.WithOpName("size"), recv0); auto send0 = ops::_Send(s.WithOpName("send0"), rank0, "send0", "sender", 0, "receiver"); auto send1 = ops::_Send(s.WithOpName("send1"), shape, "send1", "sender", 0, "receiver"); auto send2 = ops::_Send(s.WithOpName("send2"), size, "send2", "sender", 0, "receiver"); auto send3 = ops::_Send(s.WithOpName("send3"), rank1, "send3", "sender", 0, "receiver"); TF_ASSERT_OK(s.ToGraph(&g)); } std::unordered_map<string, Node*> orig_index = g.BuildNodeNameIndex(); Node* recv0 = orig_index.at("recv0"); Node* recv1 = orig_index.at("recv1"); PartialTensorShape ps0; int r0_dims[] = {-1, -1}; TF_EXPECT_OK(PartialTensorShape::MakePartialShape(r0_dims, 2, &ps0)); PartialTensorShape ps1; std::unordered_map<string, std::vector<PartialTensorShape>> map; map[recv0->name()].push_back(ps0); map[recv1->name()].push_back(ps1); ConstantFoldingOptions opts; opts.shape_map = &map; bool was_mutated; TF_EXPECT_OK( ConstantFold(opts, nullptr, Env::Default(), nullptr, &g, &was_mutated)); EXPECT_TRUE(was_mutated); std::unordered_map<string, Node*> index = g.BuildNodeNameIndex(); Node* shape = index.at("shape"); Node* size = index.at("size"); Node* rank1 = index.at("rank1"); Node* send0 = index.at("send0"); Node* send1 = index.at("send1"); Node* send2 = index.at("send2"); Node* send3 = index.at("send3"); ASSERT_EQ(1, send0->num_inputs()); Node* cf0 = *(send0->in_nodes().begin()); ExpectNodeEqual<int>(cf0, {2}, {}); ASSERT_EQ(1, send1->num_inputs()); Node* ncf1 = *(send1->in_nodes().begin()); EXPECT_EQ(ncf1, shape); ASSERT_EQ(1, send2->num_inputs()); Node* ncf2 = *(send2->in_nodes().begin()); EXPECT_EQ(ncf2, size); ASSERT_EQ(1, send3->num_inputs()); Node* ncf3 = *(send3->in_nodes().begin()); EXPECT_EQ(ncf3, rank1); } TEST_F(ConstantFoldingTest, ConstShapeKnown) { Graph g(OpRegistry::Global()); { Scope s = Scope::NewRootScope(); auto recv0 = ops::_Recv(s.WithOpName("recv0"), DT_FLOAT, "recv0", "sender", 0, "receiver"); auto c0 = ops::Const<int>(s.WithOpName("c0").WithControlDependencies(recv0), 1); auto rank = ops::Rank(s.WithOpName("rank"), c0); auto add0 = ops::Add(s, rank, rank); auto send0 = ops::_Send(s.WithOpName("send0"), add0, "send0", "sender", 0, "receiver"); TF_ASSERT_OK(s.ToGraph(&g)); } std::unordered_map<string, Node*> orig_index = g.BuildNodeNameIndex(); Node* c0 = orig_index.at("c0"); PartialTensorShape ps0; int c0_dims[] = {}; TF_EXPECT_OK(PartialTensorShape::MakePartialShape(c0_dims, 0, &ps0)); std::unordered_map<string, std::vector<PartialTensorShape>> map; map[c0->name()].push_back(ps0); ConstantFoldingOptions opts; opts.shape_map = &map; bool was_mutated; TF_EXPECT_OK( ConstantFold(opts, nullptr, Env::Default(), nullptr, &g, &was_mutated)); EXPECT_TRUE(was_mutated); std::unordered_map<string, Node*> index = g.BuildNodeNameIndex(); Node* recv0 = index.at("recv0"); Node* send0 = index.at("send0"); ASSERT_EQ(1, send0->num_inputs()); Node* cf0 = *(send0->in_nodes().begin()); ExpectNodeEqual<int>(cf0, {0}, {}); ASSERT_EQ(1, cf0->in_edges().size()); for (const Edge* e : cf0->in_edges()) { EXPECT_TRUE(e->IsControlEdge()); EXPECT_TRUE(e->src() == recv0) << e->src()->name(); } } TEST_F(ConstantFoldingTest, NoReplacePartialOutput) { Graph g(OpRegistry::Global()); { Scope s = Scope::NewRootScope().ExitOnError().WithAssignedDevice("/gpu:0"); auto c0 = ops::Const<float>(s.WithOpName("c0"), {5.0, 2.0, 8.0, 1.0}, {4}); auto k = ops::Const<int>(s.WithOpName("k"), 3); auto topK = ops::TopK(s.WithOpName("topK"), c0, k, ops::TopK::Sorted(false)); auto send_values = ops::_Send(s.WithOpName("send_values"), topK.values, "send_values", "sender", 0, "receiver"); auto send_indices = ops::_Send(s.WithOpName("send_indices"), topK.indices, "send_indices", "sender", 0, "receiver"); TF_ASSERT_OK(s.ToGraph(&g)); } bool was_mutated; TF_EXPECT_OK(ConstantFold( ConstantFoldingOptions{}, nullptr, Env::Default(), FakeDevice::Make("/job:tpu_worker/replica:0/task:0/device:GPU:0", DEVICE_GPU) .get(), &g, &was_mutated)); EXPECT_FALSE(was_mutated); } namespace { const char kTestMemRegionName[] = "test: class TestReadOnlyMemoryRegion : public ::tensorflow::ReadOnlyMemoryRegion { public: ~TestReadOnlyMemoryRegion() override = default; TestReadOnlyMemoryRegion(const void* data, uint64 length) : data_(data), length_(length) {} const void* data() override { return data_; } uint64 length() override { return length_; } protected: const void* data_; uint64 length_; }; class TestTFFileSystem : public ::tensorflow::NullFileSystem { public: TestTFFileSystem() : ::tensorflow::NullFileSystem(), data_tensor_(test::AsTensor<double>({1., 2., 3., 4.}, {2, 2})) {} using ::tensorflow::NullFileSystem::NewReadOnlyMemoryRegionFromFile; ::tensorflow::Status NewReadOnlyMemoryRegionFromFile( const string& fname, ::tensorflow::TransactionToken* token, std::unique_ptr<::tensorflow::ReadOnlyMemoryRegion>* result) override { if (fname != kTestMemRegionName) { return ::tensorflow::errors::Unimplemented( "NewReadOnlyMemoryRegionFromFile unimplemented"); } const ::tensorflow::StringPiece sp = data_tensor_.tensor_data(); *result = std::unique_ptr<::tensorflow::ReadOnlyMemoryRegion>( new TestReadOnlyMemoryRegion(sp.data(), sp.size())); return absl::OkStatus(); } protected: ::tensorflow::Tensor data_tensor_; }; class TestTFEnvironment : public ::tensorflow::EnvWrapper { public: using tf_base = ::tensorflow::EnvWrapper; TestTFEnvironment() : ::tensorflow::EnvWrapper(Default()) {} ::tensorflow::Status GetFileSystemForFile( const string& fname, ::tensorflow::FileSystem** result) override { was_used_ = true; if (fname == "test: *result = &test_filesystem_; return absl::OkStatus(); } return tf_base::GetFileSystemForFile(fname, result); } bool was_used() const { return was_used_; } protected: TestTFFileSystem test_filesystem_; bool was_used_ = false; }; } TEST_F(ConstantFoldingTest, TestImmutableConst) { Graph g(OpRegistry::Global()); Scope root = Scope::NewRootScope(); auto a = ops::ImmutableConst(root, DT_DOUBLE, {2, 2}, kTestMemRegionName); auto b = ops::Const<double>(root, {1.0, 2.0, 3.0, 4.0}, {2, 2}); auto c = ops::RandomGamma(root, {2, 2}, 2.0); auto result1 = ops::MatMul(root, a, b); auto result2 = ops::MatMul(root, result1, c); TF_ASSERT_OK(root.ToGraph(&g)); TestTFEnvironment test_env; bool was_mutated; Status status = ConstantFold(ConstantFoldingOptions{}, nullptr, Env::Default(), nullptr, &g, &was_mutated); EXPECT_FALSE(was_mutated); EXPECT_FALSE(status.ok()); TF_EXPECT_OK(ConstantFold(ConstantFoldingOptions{}, nullptr, &test_env, nullptr, &g, &was_mutated)); EXPECT_TRUE(was_mutated); } } }
1,049
cpp
tensorflow/tensorflow
ops
third_party/xla/xla/python/ops.cc
tensorflow/core/ir/ops_test.cc
#ifndef XLA_PYTHON_OPS_H_ #define XLA_PYTHON_OPS_H_ #include "third_party/nanobind/include/nanobind/nanobind.h" namespace xla { void BuildOpsSubmodule(nanobind::module_& m); } #endif #include "xla/python/ops.h" #include <algorithm> #include <cstdint> #include <optional> #include <string> #include <tuple> #include <utility> #include <vector> #include "absl/types/span.h" #include "third_party/nanobind/include/nanobind/nanobind.h" #include "third_party/nanobind/include/nanobind/stl/optional.h" #include "third_party/nanobind/include/nanobind/stl/pair.h" #include "third_party/nanobind/include/nanobind/stl/shared_ptr.h" #include "third_party/nanobind/include/nanobind/stl/string.h" #include "third_party/nanobind/include/nanobind/stl/tuple.h" #include "third_party/nanobind/include/nanobind/stl/vector.h" #include "xla/client/lib/approx_topk.h" #include "xla/client/lib/approx_topk_shape.h" #include "xla/client/lib/comparators.h" #include "xla/client/lib/lu_decomposition.h" #include "xla/client/lib/math.h" #include "xla/client/lib/qr.h" #include "xla/client/lib/self_adjoint_eig.h" #include "xla/client/lib/sorting.h" #include "xla/client/lib/svd.h" #include "xla/client/xla_builder.h" #include "xla/client/xla_computation.h" #include "xla/pjrt/status_casters.h" #include "xla/python/nb_absl_span.h" #include "xla/python/nb_helpers.h" #include "xla/python/types.h" #include "xla/xla_data.pb.h" namespace nb = nanobind; namespace nanobind { namespace detail { template <> struct type_caster<xla::ConvolutionDimensionNumbers> { public: NB_TYPE_CASTER_FROM_PYTHON_ONLY( xla::ConvolutionDimensionNumbers, const_name("xla::ConvolutionDimensionNumbers")); bool from_python(handle handle, uint8_t, cleanup_list*) { try { value.set_input_batch_dimension( cast<int64_t>(getattr(handle, "input_batch_dimension"))); value.set_input_feature_dimension( cast<int64_t>(getattr(handle, "input_feature_dimension"))); value.set_output_batch_dimension( cast<int64_t>(getattr(handle, "output_batch_dimension"))); value.set_output_feature_dimension( cast<int64_t>(getattr(handle, "output_feature_dimension"))); value.set_kernel_input_feature_dimension( cast<int64_t>(getattr(handle, "kernel_input_feature_dimension"))); value.set_kernel_output_feature_dimension( cast<int64_t>(getattr(handle, "kernel_output_feature_dimension"))); std::vector<int64_t> dims; dims = cast<std::vector<int64_t>>( getattr(handle, "input_spatial_dimensions")); std::copy(dims.begin(), dims.end(), tsl::protobuf::RepeatedFieldBackInserter( value.mutable_input_spatial_dimensions())); dims = cast<std::vector<int64_t>>( getattr(handle, "kernel_spatial_dimensions")); std::copy(dims.begin(), dims.end(), tsl::protobuf::RepeatedFieldBackInserter( value.mutable_kernel_spatial_dimensions())); dims = cast<std::vector<int64_t>>( getattr(handle, "output_spatial_dimensions")); std::copy(dims.begin(), dims.end(), tsl::protobuf::RepeatedFieldBackInserter( value.mutable_output_spatial_dimensions())); return true; } catch (...) { return false; } } }; template <> struct type_caster<xla::DotDimensionNumbers> { public: NB_TYPE_CASTER_FROM_PYTHON_ONLY(xla::DotDimensionNumbers, const_name("xla::DotDimensionNumbers")); bool from_python(handle handle, uint8_t flags, cleanup_list*) noexcept { try { std::vector<int64_t> dims = cast<std::vector<int64_t>>( getattr(handle, "lhs_contracting_dimensions")); std::copy(dims.begin(), dims.end(), tsl::protobuf::RepeatedFieldBackInserter( value.mutable_lhs_contracting_dimensions())); dims = cast<std::vector<int64_t>>( getattr(handle, "rhs_contracting_dimensions")); std::copy(dims.begin(), dims.end(), tsl::protobuf::RepeatedFieldBackInserter( value.mutable_rhs_contracting_dimensions())); dims = cast<std::vector<int64_t>>(getattr(handle, "lhs_batch_dimensions")); std::copy(dims.begin(), dims.end(), tsl::protobuf::RepeatedFieldBackInserter( value.mutable_lhs_batch_dimensions())); dims = cast<std::vector<int64_t>>(getattr(handle, "rhs_batch_dimensions")); std::copy(dims.begin(), dims.end(), tsl::protobuf::RepeatedFieldBackInserter( value.mutable_rhs_batch_dimensions())); return true; } catch (...) { return false; } } }; template <> struct type_caster<xla::GatherDimensionNumbers> { public: NB_TYPE_CASTER_FROM_PYTHON_ONLY(xla::GatherDimensionNumbers, const_name("xla::GatherDimensionNumbers")); bool from_python(handle handle, uint8_t, cleanup_list*) { try { std::vector<int64_t> dims; dims = cast<std::vector<int64_t>>(getattr(handle, "offset_dims")); std::copy(dims.begin(), dims.end(), tsl::protobuf::RepeatedFieldBackInserter( value.mutable_offset_dims())); dims = cast<std::vector<int64_t>>(getattr(handle, "collapsed_slice_dims")); std::copy(dims.begin(), dims.end(), tsl::protobuf::RepeatedFieldBackInserter( value.mutable_collapsed_slice_dims())); dims = cast<std::vector<int64_t>>(getattr(handle, "start_index_map")); std::copy(dims.begin(), dims.end(), tsl::protobuf::RepeatedFieldBackInserter( value.mutable_start_index_map())); value.set_index_vector_dim( cast<int64_t>(getattr(handle, "index_vector_dim"))); return true; } catch (...) { return false; } } }; template <> struct type_caster<xla::ScatterDimensionNumbers> { public: NB_TYPE_CASTER_FROM_PYTHON_ONLY(xla::ScatterDimensionNumbers, const_name("xla::ScatterDimensionNumbers")); bool from_python(handle handle, uint8_t, cleanup_list*) { try { std::vector<int64_t> dims; dims = cast<std::vector<int64_t>>(getattr(handle, "update_window_dims")); std::copy(dims.begin(), dims.end(), tsl::protobuf::RepeatedFieldBackInserter( value.mutable_update_window_dims())); dims = cast<std::vector<int64_t>>(getattr(handle, "inserted_window_dims")); std::copy(dims.begin(), dims.end(), tsl::protobuf::RepeatedFieldBackInserter( value.mutable_inserted_window_dims())); dims = cast<std::vector<int64_t>>( getattr(handle, "scatter_dims_to_operand_dims")); std::copy(dims.begin(), dims.end(), tsl::protobuf::RepeatedFieldBackInserter( value.mutable_scatter_dims_to_operand_dims())); value.set_index_vector_dim( cast<int64_t>(getattr(handle, "index_vector_dim"))); return true; } catch (...) { return false; } } }; template <> struct type_caster<xla::ReplicaGroup> { public: NB_TYPE_CASTER_FROM_PYTHON_ONLY(xla::ReplicaGroup, const_name("xla::ReplicaGroup")); bool from_python(handle handle, uint8_t, cleanup_list*) { try { auto dims = cast<std::vector<int64_t>>(getattr(handle, "replica_ids")); std::copy(dims.begin(), dims.end(), tsl::protobuf::RepeatedFieldBackInserter( value.mutable_replica_ids())); return true; } catch (...) { return false; } } }; template <> struct type_caster<xla::PaddingConfig> { public: NB_TYPE_CASTER_FROM_PYTHON_ONLY(xla::PaddingConfig, const_name("xla::PaddingConfig")); bool from_python(handle handle, uint8_t, cleanup_list*) { try { sequence dimensions = borrow<sequence>(getattr(handle, "dimensions")); for (const auto& dimension : dimensions) { xla::PaddingConfig::PaddingConfigDimension* config_dim = value.add_dimensions(); config_dim->set_edge_padding_low( cast<int64_t>(getattr(dimension, "edge_padding_low"))); config_dim->set_edge_padding_high( cast<int64_t>(getattr(dimension, "edge_padding_high"))); config_dim->set_interior_padding( cast<int64_t>(getattr(dimension, "interior_padding"))); } return true; } catch (...) { return false; } } }; template <> struct type_caster<xla::PrecisionConfig> { public: NB_TYPE_CASTER_FROM_PYTHON_ONLY(xla::PrecisionConfig, const_name("xla::PrecisionConfig")); bool from_python(handle handle, uint8_t, cleanup_list*) { try { if (handle.is_none()) { return true; } sequence operand_precisions = borrow<sequence>(getattr(handle, "operand_precision")); for (const auto& operand_precision : operand_precisions) { value.add_operand_precision( cast<xla::PrecisionConfig::Precision>(operand_precision)); } return true; } catch (...) { return false; } } }; } } namespace xla { void BuildOpsSubmodule(nb::module_& m) { nb::module_ ops = m.def_submodule("ops", "XLA operations"); nb::enum_<TriangularSolveOptions::Transpose>( ops, "TriangularSolveOptions_Transpose") .value("TRANSPOSE_INVALID", TriangularSolveOptions::TRANSPOSE_INVALID) .value("NO_TRANSPOSE", TriangularSolveOptions::NO_TRANSPOSE) .value("TRANSPOSE", TriangularSolveOptions::TRANSPOSE) .value("ADJOINT", TriangularSolveOptions::ADJOINT); nb::enum_<RandomAlgorithm>(ops, "RandomAlgorithm") .value("RNG_DEFAULT", RandomAlgorithm::RNG_DEFAULT) .value("RNG_THREE_FRY", RandomAlgorithm::RNG_THREE_FRY) .value("RNG_PHILOX", RandomAlgorithm::RNG_PHILOX); nb::enum_<CustomCallSchedule>(ops, "CustomCallSchedule") .value("SCHEDULE_NONE", CustomCallSchedule::SCHEDULE_NONE) .value("SCHEDULE_LATEST", CustomCallSchedule::SCHEDULE_LATEST) .value("SCHEDULE_EARLIEST", CustomCallSchedule::SCHEDULE_EARLIEST); nb::enum_<CustomCallApiVersion>(ops, "CustomCallApiVersion") .value("API_VERSION_ORIGINAL", CustomCallApiVersion::API_VERSION_ORIGINAL) .value("API_VERSION_STATUS_RETURNING", CustomCallApiVersion::API_VERSION_STATUS_RETURNING) .value("API_VERSION_STATUS_RETURNING_UNIFIED", CustomCallApiVersion::API_VERSION_STATUS_RETURNING_UNIFIED) .value("API_VERSION_TYPED_FFI", CustomCallApiVersion::API_VERSION_TYPED_FFI); ops.def("AfterAll", &AfterAll, nb::arg("builder"), nb::arg("tokens")); ops.def("AllGather", &AllGather, nb::arg("operand"), nb::arg("all_gather_dimension"), nb::arg("shard_count"), nb::arg("replica_groups") = nb::list(), nb::arg("channel_id") = std::nullopt, nb::arg("shape_with_layout") = std::nullopt, nb::arg("use_global_device_ids") = std::nullopt); ops.def("AllReduce", static_cast<XlaOp (*)( XlaOp, const XlaComputation&, absl::Span<const ReplicaGroup>, const std::optional<ChannelHandle>&, const std::optional<Shape>&, const std::optional<bool>)>(&AllReduce), nb::arg("operand"), nb::arg("computation"), nb::arg("replica_groups") = nb::list(), nb::arg("channel_id") = std::nullopt, nb::arg("shape_with_layout") = std::nullopt, nb::arg("use_global_device_ids") = std::nullopt); ops.def("ReduceScatter", &ReduceScatter, nb::arg("operand"), nb::arg("computation"), nb::arg("scatter_dimension"), nb::arg("shard_count"), nb::arg("replica_groups") = nb::list(), nb::arg("channel_id") = std::nullopt, nb::arg("layout") = std::nullopt, nb::arg("use_global_device_ids") = std::nullopt); ops.def("AllToAll", &AllToAll, nb::arg("operand"), nb::arg("split_dimension"), nb::arg("concat_dimension"), nb::arg("split_count"), nb::arg("replica_groups") = nb::list(), nb::arg("layout") = std::nullopt, nb::arg("channel_id") = std::nullopt); ops.def("ApproxTopK", &ApproxTopK, nb::arg("builder"), nb::arg("operands"), nb::arg("init_values"), nb::arg("top_k"), nb::arg("reduction_dim"), nb::arg("comparator"), nb::arg("recall_target") = 0.9, nb::arg("aggregate_to_topk") = true, nb::arg("reduction_input_size_override") = -1); ops.def("ApproxTopKFallback", &ApproxTopKFallback, nb::arg("builder"), nb::arg("operands"), nb::arg("init_values"), nb::arg("top_k"), nb::arg("reduction_dim"), nb::arg("comparator"), nb::arg("recall_target") = 0.9, nb::arg("aggregate_to_topk") = true, nb::arg("reduction_input_size_override") = -1); ops.def("ApproxTopKReductionOutputSize", xla::ValueOrThrowWrapper(ApproxTopKReductionOutputSize), nb::arg("input_size"), nb::arg("rank"), nb::arg("top_k"), nb::arg("recall_target"), nb::arg("aggregate_to_topk") = true, nb::arg("input_size_override") = -1); ops.def("BitcastConvertType", &BitcastConvertType, nb::arg("operand"), nb::arg("new_element_type")); ops.def("Broadcast", &Broadcast, nb::arg("operand"), nb::arg("sizes")); ops.def("BroadcastInDim", &BroadcastInDim, nb::arg("operand"), nb::arg("shape"), nb::arg("broadcast_dimensions")); ops.def("Call", &Call, nb::arg("builder"), nb::arg("computation"), nb::arg("operands")); ops.def("Cholesky", &Cholesky, nb::arg("a"), nb::arg("lower") = true); ops.def("Clamp", &Clamp, nb::arg("min"), nb::arg("operand"), nb::arg("max")); ops.def("Collapse", &Collapse, nb::arg("operand"), nb::arg("dimensions")); ops.def("CollectivePermute", &CollectivePermute, nb::arg("operand"), nb::arg("source_target_pairs"), nb::arg("channel_id") = std::nullopt); ops.def("ConcatInDim", &ConcatInDim, nb::arg("builder"), nb::arg("operands"), nb::arg("dimension")); ops.def("Conditional", static_cast<XlaOp (*)(XlaOp, absl::Span<const XlaComputation* const>, absl::Span<const XlaOp>)>(&Conditional), nb::arg("branch_index"), nb::arg("branch_computations"), nb::arg("branch_operands")); ops.def("Conditional", static_cast<XlaOp (*)(XlaOp, XlaOp, const XlaComputation&, XlaOp, const XlaComputation&)>(&Conditional), nb::arg("predicate"), nb::arg("true_operand"), nb::arg("true_computation"), nb::arg("false_operand"), nb::arg("false_computation")); ops.def("Constant", &ConstantLiteral, nb::arg("builder"), nb::arg("literal")); ops.def("ConstantLiteral", &ConstantLiteral, nb::arg("builder"), nb::arg("literal")); ops.def("ConvGeneralDilated", &ConvGeneralDilated, nb::arg("lhs"), nb::arg("rhs"), nb::arg("window_strides"), nb::arg("padding"), nb::arg("lhs_dilation"), nb::arg("rhs_dilation"), nb::arg("dimension_numbers"), nb::arg("feature_group_count") = 1, nb::arg("batch_group_count") = 1, nb::arg("precision_config") = nullptr, nb::arg("preferred_element_type") = std::nullopt, nb::arg("window_reversal") = std::nullopt); ops.def("ConvertElementType", &ConvertElementType, nb::arg("operand"), nb::arg("new_element_type")); ops.def("CreateToken", &CreateToken, nb::arg("builder")); ops.def("CrossReplicaSum", static_cast<XlaOp (*)(XlaOp, absl::Span<const ReplicaGroup>)>( &CrossReplicaSum), nb::arg("operand"), nb::arg("replica_groups") = nb::list()); ops.def( "CustomCall", [](XlaBuilder* builder, const nb::bytes& call_target_name, absl::Span<const XlaOp> operands, const Shape& shape, const nb::bytes& opaque, bool has_side_effect, CustomCallSchedule schedule, CustomCallApiVersion api_version) -> XlaOp { std::string call_target_name_str(call_target_name.c_str(), call_target_name.size()); std::string opaque_str(opaque.c_str(), opaque.size()); return CustomCall(builder, call_target_name_str, operands, shape, opaque_str, has_side_effect, {}, nullptr, schedule, api_version); }, nb::arg("builder"), nb::arg("call_target_name"), nb::arg("operands"), nb::arg("shape"), nb::arg("opaque") = nb::bytes(""), nb::arg("has_side_effect") = false, nb::arg("schedule") = CustomCallSchedule::SCHEDULE_NONE, nb::arg("api_version") = CustomCallApiVersion::API_VERSION_ORIGINAL); ops.def( "CustomCallWithLayout", [](XlaBuilder* builder, const nb::bytes& call_target_name, absl::Span<const XlaOp> operands, const Shape& shape_with_layout, absl::Span<const Shape> operand_shapes_with_layout, const nb::bytes& opaque, bool has_side_effect, CustomCallSchedule schedule, CustomCallApiVersion api_version) -> XlaOp { std::string call_target_name_str(call_target_name.c_str(), call_target_name.size()); std::string opaque_str(opaque.c_str(), opaque.size()); return CustomCallWithLayout( builder, call_target_name_str, operands, shape_with_layout, operand_shapes_with_layout, opaque_str, has_side_effect, {}, nullptr, schedule, api_version); }, nb::arg("builder"), nb::arg("call_target_name"), nb::arg("operands"), nb::arg("shape_with_layout"), nb::arg("operand_shapes_with_layout"), nb::arg("opaque") = nb::bytes(""), nb::arg("has_side_effect") = false, nb::arg("schedule") = CustomCallSchedule::SCHEDULE_NONE, nb::arg("api_version") = CustomCallApiVersion::API_VERSION_ORIGINAL); ops.def( "CustomCallWithAliasing", [](XlaBuilder* builder, const nb::bytes& call_target_name, absl::Span<const XlaOp> operands, const Shape& shape_with_layout, absl::Span<const Shape> operand_shapes_with_layout, const nb::bytes& opaque, bool has_side_effect, absl::Span<const std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>> output_operand_aliasing, const Literal* literal, CustomCallSchedule schedule, CustomCallApiVersion api_version) -> XlaOp { std::string call_target_name_str(call_target_name.c_str(), call_target_name.size()); std::string opaque_str(opaque.c_str(), opaque.size()); return CustomCallWithLayout( builder, call_target_name_str, operands, shape_with_layout, operand_shapes_with_layout, opaque_str, has_side_effect, output_operand_aliasing, literal, schedule, api_version); }, nb::arg("builder"), nb::arg("call_target_name"), nb::arg("operands"), nb::arg("shape_with_layout"), nb::arg("operand_shapes_with_layout"), nb::arg("opaque") = nb::bytes(""), nb::arg("has_side_effect") = false, nb::arg("output_operand_aliasing"), nb::arg("literal") = nullptr, nb::arg("schedule") = CustomCallSchedule::SCHEDULE_NONE, nb::arg("api_version") = CustomCallApiVersion::API_VERSION_ORIGINAL); ops.def( "CustomCallWithComputation", [](XlaBuilder* builder, const nb::bytes& call_target_name, absl::Span<const XlaOp> operands, const XlaComputation& computation, const Shape& shape, const nb::bytes& opaque, bool has_side_effect, absl::Span<const std::pair<ShapeIndex, std::pair<int64_t, ShapeIndex>>> output_operand_aliasing, const Literal* literal, CustomCallSchedule schedule, CustomCallApiVersion api_version) -> XlaOp { std::string call_target_name_str(call_target_name.c_str(), call_target_name.size()); std::string opaque_str(opaque.c_str(), opaque.size()); return CustomCallWithComputation( builder, call_target_name_str, operands, computation, shape, opaque_str, has_side_effect, output_operand_aliasing, literal, schedule, api_version); }, nb::arg("builder"), nb::arg("call_target_name"), nb::arg("operands"), nb::arg("computation"), nb::arg("shape"), nb::arg("opaque") = nb::bytes(""), nb::arg("has_side_effect") = false, nb::arg("output_operand_aliasing"), nb::arg("literal") = nullptr, nb::arg("schedule") = CustomCallSchedule::SCHEDULE_NONE, nb::arg("api_version") = CustomCallApiVersion::API_VERSION_ORIGINAL); ops.def("Dot", &Dot, nb::arg("lhs"), nb::arg("rhs"), nb::arg("precision_config") = nullptr, nb::arg("preferred_element_type") = std::nullopt); ops.def("DotGeneral", &DotGeneral, nb::arg("lhs"), nb::arg("rhs"), nb::arg("dimension_numbers"), nb::arg("precision_config") = nullptr, nb::arg("preferred_element_type") = std::nullopt); ops.def("DynamicReshape", static_cast<XlaOp (*)(XlaOp, absl::Span<const XlaOp>, absl::Span<const int64_t>, const std::vector<bool>&)>(&DynamicReshape), nb::arg("operand"), nb::arg("dim_sizes"), nb::arg("new_size_bounds"), nb::arg("dims_are_dynamic")); ops.def("DynamicSlice", static_cast<XlaOp (*)(XlaOp, absl::Span<const XlaOp>, absl::Span<const int64_t>)>(&DynamicSlice), nb::arg("operand"), nb::arg("start_indices"), nb::arg("slice_sizes")); ops.def("DynamicUpdateSlice", static_cast<XlaOp (*)(XlaOp, XlaOp, absl::Span<const XlaOp>)>( &DynamicUpdateSlice), nb::arg("operand"), nb::arg("update"), nb::arg("start_indices")); ops.def( "Eigh", [](XlaOp a, bool lower, int64_t max_iter, float epsilon, bool sort_eigenvalues) -> std::pair<XlaOp, XlaOp> { auto eigh = SelfAdjointEig(a, lower, max_iter, epsilon, sort_eigenvalues); return std::make_pair(eigh.v, eigh.w); }, nb::arg("a"), nb::arg("lower") = true, nb::arg("max_iter") = 15, nb::arg("epsilon") = 1e-5, nb::arg("sort_eigenvalues") = true); ops.def("Fft", &Fft, nb::arg("operand"), nb::arg("fft_type"), nb::arg("fft_length")); ops.def("Gather", &Gather, nb::arg("a"), nb::arg("start_indices"), nb::arg("dimension_numbers"), nb::arg("slice_sizes"), nb::arg("indices_are_sorted") = false); ops.def("GetDimensionSize", &GetDimensionSize, nb::arg("operand"), nb::arg("dimension")); ops.def("GetTupleElement", &GetTupleElement, nb::arg("tuple_data"), nb::arg("index")); ops.def("InfeedWithToken", &InfeedWithToken, nb::arg("token"), nb::arg("shape"), nb::arg("config") = ""); ops.def("Iota", static_cast<XlaOp (*)(XlaBuilder*, const Shape&, int64_t)>(&Iota), nb::arg("builder"), nb::arg("shape"), nb::arg("iota_dimension")); ops.def("Iota", static_cast<XlaOp (*)(XlaBuilder*, PrimitiveType, int64_t)>(&Iota), nb::arg("builder"), nb::arg("type"), nb::arg("size")); ops.def( "LU", [](XlaOp a) -> std::tuple<XlaOp, XlaOp, XlaOp> { LuDecompositionResult lu = LuDecomposition(a); return std::make_tuple(lu.lu, lu.pivots, lu.permutation); }, nb::arg("operand")); ops.def("Map", &Map, nb::arg("builder"), nb::arg("operands"), nb::arg("computation"), nb::arg("dimensions"), nb::arg("static_operands") = nb::list()); ops.def("NextAfter", &NextAfter, nb::arg("from"), nb::arg("to")); ops.def("OutfeedWithToken", &OutfeedWithToken, nb::arg("operand"), nb::arg("token"), nb::arg("shape_with_layout"), nb::arg("outfeed_config") = ""); ops.def("Pad", &Pad, nb::arg("operand"), nb::arg("padding_value"), nb::arg("padding_config")); ops.def("Parameter", static_cast<XlaOp (*)(XlaBuilder*, int64_t, const Shape&, const std::string&, const std::vector<bool>&)>( &Parameter), nb::arg("builder"), nb::arg("parameter_number"), nb::arg("shape"), nb::arg("name") = "", nb::arg("replicated_at_leaf_buffers") = std::vector<bool>()); ops.def("ProductOfElementaryHouseholderReflectors", &ProductOfElementaryHouseholderReflectors, nb::arg("a"), nb::arg("taus")); ops.def( "QR", [](XlaOp a, bool full_matrices) -> std::pair<XlaOp, XlaOp> { XlaOp q, r; QrExplicit(a, full_matrices, q, r); return std::make_pair(q, r); }, nb::arg("operand"), nb::arg("full_matrices")); ops.def( "QrDecomposition", [](XlaOp a) -> std::pair<XlaOp, XlaOp> { QrDecomposition d = Qr(a); return std::make_pair(d.q_and_r, d.taus); }, nb::arg("operand")); ops.def("RecvFromHost", &RecvFromHost, nb::arg("token"), nb::arg("shape"), nb::arg("handle")); ops.def("Reduce", static_cast<XlaOp (*)(XlaBuilder*, absl::Span<const XlaOp>, absl::Span<const XlaOp>, const XlaComputation&, absl::Span<const int64_t>)>(&Reduce), nb::arg("builder"), nb::arg("operands"), nb::arg("init_values"), nb::arg("computation"), nb::arg("dimensions_to_reduce")); ops.def("ReducePrecision", &ReducePrecision, nb::arg("operand"), nb::arg("exponent_bits"), nb::arg("mantissa_bits")); ops.def("ReduceWindowWithGeneralPadding", static_cast<XlaOp (*)( XlaOp, XlaOp, const XlaComputation&, absl::Span<const int64_t>, absl::Span<const int64_t>, absl::Span<const int64_t>, absl::Span<const int64_t>, absl::Span<const std::pair<int64_t, int64_t>>)>( &ReduceWindowWithGeneralPadding), nb::arg("operand"), nb::arg("init_value"), nb::arg("computation"), nb::arg("window_dimensions"), nb::arg("window_strides"), nb::arg("base_dilations"), nb::arg("window_dilations"), nb::arg("padding")); ops.def("ReduceWindowWithGeneralPadding", static_cast<XlaOp (*)( absl::Span<const XlaOp>, absl::Span<const XlaOp>, const XlaComputation&, absl::Span<const int64_t>, absl::Span<const int64_t>, absl::Span<const int64_t>, absl::Span<const int64_t>, absl::Span<const std::pair<int64_t, int64_t>>)>( &ReduceWindowWithGeneralPadding), nb::arg("operands"), nb::arg("init_values"), nb::arg("computation"), nb::arg("window_dimensions"), nb::arg("window_strides"), nb::arg("base_dilations"), nb::arg("window_dilations"), nb::arg("padding")); ops.def("RemoveDynamicDimension", &RemoveDynamicDimension, nb::arg("operand"), nb::arg("dimension")); ops.def("ReplicaId", &ReplicaId, nb::arg("builder")); ops.def("Reshape", static_cast<XlaOp (*)(XlaOp, absl::Span<const int64_t>, absl::Span<const int64_t>)>(&Reshape), nb::arg("operand"), nb::arg("dimensions"), nb::arg("new_sizes")); ops.def("Reshape", static_cast<XlaOp (*)(XlaOp, absl::Span<const int64_t>)>(&Reshape), nb::arg("operand"), nb::arg("new_sizes")); ops.def("Rev", &Rev, nb::arg("operand"), nb::arg("dimensions")); ops.def("RngBitGenerator", &RngBitGenerator, nb::arg("algorithm"), nb::arg("initial_state"), nb::arg("shape")); ops.def("RngNormal", &RngNormal, nb::arg("mu"), nb::arg("sigma"), nb::arg("shape")); ops.def("RngUniform", &RngUniform, nb::arg("a"), nb::arg("b"), nb::arg("shape")); ops.def("Scatter", static_cast<XlaOp (*)(XlaOp, XlaOp, XlaOp, const XlaComputation&, const ScatterDimensionNumbers&, bool, bool)>( &Scatter), nb::arg("input"), nb::arg("scatter_indices"), nb::arg("updates"), nb::arg("update_computation"), nb::arg("dimension_numbers"), nb::arg("indices_are_sorted") = false, nb::arg("unique_indices") = false); ops.def("Scatter", static_cast<XlaOp (*)(absl::Span<const XlaOp>, XlaOp, absl::Span<const XlaOp>, const XlaComputation&, const ScatterDimensionNumbers&, bool, bool)>( &Scatter), nb::arg("inputs"), nb::arg("scatter_indices"), nb::arg("updates"), nb::arg("update_computation"), nb::arg("dimension_numbers"), nb::arg("indices_are_sorted") = false, nb::arg("unique_indices") = false); ops.def("Select", &Select, nb::arg("pred"), nb::arg("on_true"), nb::arg("on_false")); ops.def("SelectAndScatterWithGeneralPadding", &SelectAndScatterWithGeneralPadding, nb::arg("operand"), nb::arg("select"), nb::arg("window_dimensions"), nb::arg("window_strides"), nb::arg("padding"), nb::arg("source"), nb::arg("init_value"), nb::arg("scatter")); ops.def("SendToHost", &SendToHost, nb::arg("operand"), nb::arg("token"), nb::arg("shape_with_layout"), nb::arg("handle")); ops.def("SetDimensionSize", &SetDimensionSize, nb::arg("operand"), nb::arg("val"), nb::arg("dimension")); ops.def("Slice", &Slice, nb::arg("operand"), nb::arg("start_indices"), nb::arg("limit_indices"), nb::arg("strides")); ops.def("SliceInDim", &SliceInDim, nb::arg("operand"), nb::arg("start_index"), nb::arg("limit_index"), nb::arg("stride"), nb::arg("dimno")); ops.def( "Sort", [](XlaBuilder* builder, absl::Span<const XlaOp> operands, std::optional<const XlaComputation*> comparator, int64_t dimension, bool is_stable) -> XlaOp { return builder->ReportErrorOrReturn([&]() -> XlaOp { std::vector<PrimitiveType> operand_types; operand_types.reserve(operands.size()); for (const auto& operand : operands) { auto operand_shape = xla::ValueOrThrow(builder->GetShape(operand)); operand_types.push_bac
#include "tensorflow/core/ir/ops.h" #include <optional> #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/MLIRContext.h" #include "mlir/Interfaces/ControlFlowInterfaces.h" #include "mlir/Parser/Parser.h" #include "tensorflow/core/ir/dialect.h" #include "tensorflow/core/platform/test.h" namespace mlir { namespace tfg { namespace { template <typename OpT> OpT findOp(ModuleOp module) { OpT result; module.walk([&](OpT op) { result = op; return WalkResult::interrupt(); }); assert(result); return result; } TEST(TestTFGRegionOps, TestIfLikeRegionOpSuccessorRegions) { const char *const code = R"mlir( tfg.func @test(%arg0: tensor<i1>, %arg1: tensor<f32>) -> (tensor<f32>) { %IfRegion, %ctl = IfRegion %arg0 then { yield(%arg1) : tensor<f32> } else { yield(%arg1) : tensor<f32> } : (tensor<i1>) -> (tensor<f32>) return(%IfRegion) : tensor<f32> } )mlir"; MLIRContext context; context.getOrLoadDialect<TFGraphDialect>(); OwningOpRef<ModuleOp> module = mlir::parseSourceString<mlir::ModuleOp>(code, &context); ASSERT_TRUE(module); auto op = findOp<IfRegionOp>(*module); SmallVector<RegionSuccessor> regions; for (unsigned index = 0; index <= 1; ++index, regions.clear()) { op.getSuccessorRegions(op->getRegion(index), regions); ASSERT_EQ(regions.size(), 1u); EXPECT_TRUE(regions.front().isParent()); } op.getSuccessorRegions(RegionBranchPoint::parent(), regions); EXPECT_EQ(regions.size(), 2u); regions.clear(); Builder b(&context); ShapedType tensor_type = RankedTensorType::get({}, b.getI1Type()); Attribute cond = DenseElementsAttr::get(tensor_type, true); op.getEntrySuccessorRegions({cond}, regions); ASSERT_EQ(regions.size(), 1u); EXPECT_EQ(regions.front().getSuccessor(), &op.getThenRegion()); } TEST(TestTFGRegionOps, TestCaseLikeRegionOpSuccessorRegions) { const char *const code = R"mlir( tfg.func @test(%arg0: tensor<i32>, %arg1: tensor<f32>) -> (tensor<f32>) { %CaseRegion, %ctl = CaseRegion %arg0 { yield(%arg1) : tensor<f32> }, { yield(%arg1) : tensor<f32> } : (tensor<i32>) -> (tensor<f32>) return(%CaseRegion) : tensor<f32> } )mlir"; MLIRContext context; context.getOrLoadDialect<TFGraphDialect>(); OwningOpRef<ModuleOp> module = mlir::parseSourceString<mlir::ModuleOp>(code, &context); ASSERT_TRUE(module); auto op = findOp<CaseRegionOp>(*module); SmallVector<RegionSuccessor> regions; for (unsigned index = 0; index < op.getNumRegions(); ++index, regions.clear()) { op.getSuccessorRegions(op->getRegion(index), regions); ASSERT_EQ(regions.size(), 1u); EXPECT_TRUE(regions.front().isParent()); } op.getSuccessorRegions(RegionBranchPoint::parent(), regions); EXPECT_EQ(regions.size(), 2u); regions.clear(); Builder b(&context); ShapedType tensor_type = RankedTensorType::get({}, b.getI32Type()); Attribute branch = DenseElementsAttr::get(tensor_type, 1); op.getEntrySuccessorRegions({branch}, regions); ASSERT_EQ(regions.size(), 1u); EXPECT_EQ(regions.front().getSuccessor(), &op.getBranches()[1]); } TEST(TestTFGRegionOps, TestWhileLikeRegionOpSuccessorRegions) { const char *const code = R"mlir( tfg.func @test(%arg0: tensor<f32>) -> (tensor<f32>) { %WhileRegion, %ctl = WhileRegion(%arg0) { ^bb0(%arg1: tensor<f32>, %arg2: !tf_type.control): %Cond, %ctl = Cond : () -> (tensor<i1>) condition %Cond : tensor<i1> (%arg1) : tensor<f32> } do { ^bb0(%arg1: tensor<f32>, %arg2: !tf_type.control): yield(%arg1) : tensor<f32> } {parallel_iterations = 10 : i64} : (tensor<f32>) -> (tensor<f32>) return(%WhileRegion) : tensor<f32> } )mlir"; MLIRContext context; context.getOrLoadDialect<TFGraphDialect>(); OwningOpRef<ModuleOp> module = mlir::parseSourceString<mlir::ModuleOp>(code, &context); ASSERT_TRUE(module); auto op = findOp<WhileRegionOp>(*module); SmallVector<RegionSuccessor> regions; op.getSuccessorRegions(RegionBranchPoint::parent(), regions); ASSERT_EQ(regions.size(), 1u); EXPECT_EQ(regions.front().getSuccessor(), &op.getCondRegion()); regions.clear(); op.getSuccessorRegions(op.getRegion(0), regions); ASSERT_EQ(regions.size(), 2u); EXPECT_TRUE(regions.front().isParent() ^ regions.back().isParent()); regions.clear(); op.getSuccessorRegions(op.getRegion(1), regions); ASSERT_EQ(regions.size(), 1u); EXPECT_EQ(regions.front().getSuccessor(), &op.getCondRegion()); regions.clear(); } TEST(TestTFGRegionOps, TestForLikeRegionOpSuccessorRegions) { const char *const code = R"mlir( tfg.func @test(%arg0: tensor<i32>, %arg1: tensor<f32>) -> (tensor<f32>) { %ForRegion, %ctl = ForRegion(%arg1) from %arg0 to %arg0 by %arg0 { ^bb0(%arg2: tensor<i32>, %arg3: tensor<f32>, %arg4: !tf_type.control, %arg5: !tf_type.control): yield(%arg3) : tensor<f32> } : (tensor<i32>, tensor<i32>, tensor<i32>, tensor<f32>) -> (tensor<f32>) return(%ForRegion) : tensor<f32> } )mlir"; MLIRContext context; context.getOrLoadDialect<TFGraphDialect>(); OwningOpRef<ModuleOp> module = mlir::parseSourceString<mlir::ModuleOp>(code, &context); ASSERT_TRUE(module); auto op = findOp<ForRegionOp>(*module); SmallVector<RegionSuccessor> regions; op.getSuccessorRegions(RegionBranchPoint::parent(), regions); EXPECT_EQ(regions.size(), 1u); regions.clear(); op.getSuccessorRegions(op.getRegion(), regions); ASSERT_EQ(regions.size(), 2u); EXPECT_TRUE(regions.front().isParent() ^ regions.back().isParent()); } } } }
1,050
cpp
tensorflow/tensorflow
tensor_layout
tensorflow/dtensor/cc/tensor_layout.cc
tensorflow/dtensor/tests/tensor_layout_test.cc
#ifndef TENSORFLOW_DTENSOR_CC_TENSOR_LAYOUT_H_ #define TENSORFLOW_DTENSOR_CC_TENSOR_LAYOUT_H_ #include <algorithm> #include <cstdint> #include <iostream> #include <string> #include <utility> #include <vector> #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/container/inlined_vector.h" #include "absl/strings/string_view.h" #include "tensorflow/core/common_runtime/device_mgr.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/statusor.h" #include "tensorflow/dtensor/cc/dstatus.h" #include "tensorflow/dtensor/proto/layout.pb.h" namespace tensorflow { namespace dtensor { bool IsDynamicSize(int64_t size); bool IsDynamicShape(absl::Span<const int64_t> shape); using DeviceLocation = absl::InlinedVector<int64, 4>; using Shard = std::vector<int>; struct ShardVector { bool operator==(const ShardVector& other) const; bool operator!=(const ShardVector& other) const { return !(*this == other); } std::string ToString() const; bool ContainsShard(const Shard& shard) const; std::vector<Shard> shards; std::vector<int> num_shards_per_dim; }; struct MeshDimension { MeshDimension(const std::string& name, int64 size) : name(std::move(name)), size(size) {} MeshDimension() = default; std::string name; int64 size; }; class Mesh { public: static constexpr const char* kEmptyMeshString = "empty_mesh"; static constexpr const char* kUseXLASPMDString = "use_xla_spmd"; static constexpr bool kUseXLASPMD = false; enum class MeshType { kTile, kSingleDevice, }; static Mesh Empty(); bool IsEmpty() const; Mesh() { mesh_type_ = MeshType::kTile; } inline bool IsTile() const { return mesh_type_ == MeshType::kTile; } inline bool IsSingleDevice() const { return mesh_type_ == MeshType::kSingleDevice; } static Mesh CreateMesh(const std::string& mesh_name, const std::vector<std::string>& dim_names, const std::vector<std::int64_t>& mesh_shape, const std::vector<std::int64_t>& global_device_ids, const std::vector<std::string>& global_devices_str, const std::vector<std::int64_t>& local_device_ids, const std::vector<std::string>& local_devices_str, bool use_xla_spmd = Mesh::kUseXLASPMD); static StatusOr<Mesh> ParseFromProto(const MeshProto& proto); static StatusOr<Mesh> FromString(absl::string_view str); std::string ToString() const; StatusOr<MeshProto> ToProto() const; static StatusOr<Mesh> GetAbstractMesh( const std::string& name, const std::vector<MeshDimension>& mesh_dims); static StatusOr<Mesh> GetMesh( const std::string& name, const std::vector<MeshDimension>& mesh_dims, const std::vector<std::int64_t>& global_device_ids, const std::vector<std::int64_t>& local_device_ids, const std::vector<std::string>& local_devices, const std::vector<std::string>& global_devices, bool use_xla_spmd = Mesh::kUseXLASPMD); static StatusOr<Mesh> GetSingleDeviceMesh(absl::string_view single_device); bool is_cpu_mesh() const { return device_type() == "CPU"; } bool is_epu_mesh() const { return device_type() == "EPU"; } bool is_gpu_mesh() const { return device_type() == "GPU"; } bool is_tpu_mesh() const { return device_type() == "TPU"; } bool is_remote() const { return local_device_ids_.empty() && !global_device_ids_.empty(); } StatusOr<Mesh> host_mesh() const { return ToDeviceType("CPU"); } std::string device_type() const; StatusOr<const DeviceLocation> device_location(int offset) const; int64 num_devices() const; absl::Span<const std::string> local_devices() const { return local_devices_; } absl::Span<const int64_t> local_device_ids() const { return local_device_ids_; } StatusOr<const std::vector<DeviceNameUtils::ParsedName>> ParsedDevices() const; StatusOr<Mesh> ToDeviceType(const std::string& device_type) const; std::vector<std::string> hosts() const; int64 GetFlattenedCoordinate(const DeviceLocation& loc) const; const MeshDimension& dim(int64 index) const { return mesh_dims_[index]; } std::vector<MeshDimension> dims() const { return mesh_dims_; } StatusOr<int64> dim_size(absl::string_view name) const; std::vector<int64> dim_sizes() const; const std::string& dim_name(int64 index) const { return mesh_dims_[index].name; } int64_t min_global_device_id() const { DCHECK(!global_device_ids_.empty()); return *std::min_element(global_device_ids_.begin(), global_device_ids_.end()); } int64_t num_local_devices() const { return local_devices_.size(); } absl::Span<const int64_t> global_device_ids() const { return global_device_ids_; } const std::vector<std::string>& global_devices() const { return global_devices_; } StatusOr<int32> idx_for_dim(absl::string_view dim_name) const; int GetMeshDimIndexWithName(const std::string& mesh_name) const; bool IsMeshDim(const std::string& dim_name) const; std::vector<std::string> MeshDimNames() const; int64 rank() const; int64 size() const; bool use_xla_spmd() const { return use_xla_spmd_; } const std::string& name() const { return name_; } absl::string_view single_device() const { return single_device_; } uint64 GlobalFingerprint() const; bool operator==(const Mesh& b) const; bool operator!=(const Mesh& b) const { return !((*this) == b); } bool operator<(const Mesh& b) const { return this->ToString() < b.ToString(); } template <typename H> friend H AbslHashValue(H h, const Mesh& m) { return H::combine(std::move(h), m.ToString()); } static std::map<std::string, std::vector<int>>& tpu_core_ids(); static std::string& tpu_host_mesh(); private: MeshType mesh_type_; std::string name_; std::vector<MeshDimension> mesh_dims_; std::vector<std::string> local_devices_; std::vector<int64_t> local_device_ids_; std::vector<int64_t> global_device_ids_; std::vector<std::string> global_devices_; bool use_xla_spmd_ = Mesh::kUseXLASPMD; std::string single_device_; }; std::vector<DeviceLocation> ComputeDeviceLocations(const Mesh& mesh); class Layout { public: enum class LayoutType { kEmpty, kStatic, kSingleDevice, kParted, }; static constexpr const char* kPartedPrefix = "parted:"; static constexpr const char* kStaticPrefix = "sharding_specs:"; static constexpr const char* kSingleDevicePrefix = "maximal:"; static constexpr const char* kUnshardedDim = "unsharded"; static constexpr const char* kAny = "any"; static constexpr const char* kEmptyLayoutString = "empty_layout"; static constexpr const char* kMatch = "match"; Layout() = default; Layout(const Layout& other) = default; inline bool IsSingleDevice() const { return mesh_.IsSingleDevice(); } static Layout Empty(); static StatusOr<Layout> FromProto(const LayoutProto& proto); static StatusOr<Layout> FromString(absl::string_view layout_str); std::string ToString() const; StatusOr<LayoutProto> ToProto() const; LayoutType type() const { return type_; } const Mesh& mesh() const { return mesh_; } static Layout ReplicatedOnMesh(const Mesh& mesh, int rank); static Layout BatchShardedOnMesh(const Mesh& mesh, int rank, const string& mesh_dim, int axis = 0); static Layout ReplicatedLike(const Layout& layout); static Layout BatchShardedLike(const Layout& layout, const string& mesh_dim, int axis = 0); static Layout AnyOnMesh(const Mesh& mesh, int rank); Mesh ReducedMesh() const; void set_mesh(Mesh mesh) { mesh_ = mesh; } static StatusOr<Layout> Transposed2D(const Layout& layout); static bool IsUnshardedDimension(const absl::string_view name) { return name == kUnshardedDim; } static bool IsShardedDimension(const absl::string_view name) { return !IsUnshardedDimension(name); } static StatusOr<Layout> GetLayout( LayoutType type, const std::vector<std::string>& sharding_spec_strs, const Mesh& mesh); static StatusOr<Layout> GetLayout( const std::vector<std::string>& sharding_spec_strs, const Mesh& mesh) { return GetLayout(LayoutType::kStatic, sharding_spec_strs, mesh); } static StatusOr<Layout> GetSingleDeviceLayout(const Mesh& mesh) { return GetLayout(LayoutType::kSingleDevice, {}, mesh); } StatusOr<Layout> GetLayoutWithReducedDims( const absl::flat_hash_set<int>& reduced_dims, bool keep_dims) const; StatusOr<Layout> ToParted() const { return GetLayout(LayoutType::kParted, sharding_specs_, mesh_); } Layout Truncate(int64 split_point, bool end = false) const; Layout LeftPad(int64 rank) const; StatusOr<Layout> EnsureRank(int64_t rank) const; bool IsFullyReplicated() const; bool IsLastDimReplicated() const; bool IsBatchParallel() const; bool IsBatchParallel(int non_batch_rank) const; bool IsEmpty() const; std::vector<int64_t> GlobalShapeFromLocalShape( absl::Span<const int64_t> local_shape, const std::vector<std::vector<int64_t>>* local_shapes = nullptr) const; std::vector<int64_t> LocalShapeFromGlobalShape( absl::Span<const int64_t> global_shape) const; PartialTensorShape LocalShapeFromGlobalShape( const PartialTensorShape& global_shape) const; int64 rank() const { return sharding_specs_.size(); } size_t num_shards_for_dim(int) const; std::vector<int32> num_shards() const; ShardVector GetShardVector() const; std::vector<std::string> sharding_spec_strs() const; int64 num_devices() const { return mesh_.num_devices(); } std::map<std::string, ShardVector> HostShardMap() const; const std::string& sharding_spec(int idx) const; bool IsEquivalent(const Layout& b) const; bool IsEquivalentIgnoringType(const Layout& b) const; bool operator==(const Layout& b) const; bool operator!=(const Layout& b) const { return !((*this) == b); } bool operator<(const Layout& b) const { return this->ToString() < b.ToString(); } private: std::vector<std::string> sharding_specs_; LayoutType type_; Mesh mesh_; }; StatusOr<Layout> ConcatenateLayouts(const Layout& layout_a, const Layout& layout_b); StatusOr<Layout> GetMostShardedLayout(const std::vector<Layout>& layouts); StatusOr<Layout> GetLeastShardedLayout(const std::vector<Layout>& layouts); } } #endif #include "tensorflow/dtensor/cc/tensor_layout.h" #include <algorithm> #include <cstddef> #include <cstdint> #include <map> #include <memory> #include <numeric> #include <set> #include <string> #include <utility> #include <vector> #include "absl/container/inlined_vector.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_join.h" #include "absl/strings/str_split.h" #include "absl/strings/string_view.h" #include "absl/types/optional.h" #include "mlir/IR/BuiltinTypes.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/lib/math/math_util.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/fingerprint.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/statusor.h" #include "tensorflow/core/util/device_name_utils.h" #include "tensorflow/dtensor/cc/dstatus.h" #include "tensorflow/dtensor/proto/layout.pb.h" namespace tensorflow { namespace dtensor { constexpr const char* Layout::kUnshardedDim; constexpr const char* Layout::kAny; constexpr const char* Layout::kEmptyLayoutString; constexpr const char* Layout::kMatch; constexpr const char* Mesh::kEmptyMeshString; constexpr const char* Mesh::kUseXLASPMDString; constexpr bool Mesh::kUseXLASPMD; namespace { ShardVector ExpandShardVector(const ShardVector& shard_vec, const std::vector<int>& new_num_shards_per_dim) { if (shard_vec.shards.empty()) return shard_vec; auto ExpandShard = [shard_vec, new_num_shards_per_dim]( const Shard& shard, int dim_ind) -> std::vector<Shard> { int original_dim_size = shard_vec.num_shards_per_dim[dim_ind]; int new_dim_size = new_num_shards_per_dim[dim_ind]; int size_ratio = new_dim_size / original_dim_size; std::vector<Shard> expanded_shards; expanded_shards.reserve(size_ratio); for (int i = 0; i < size_ratio; ++i) { int original_coord = shard[dim_ind]; int shifted_coord = (original_coord - 1) * size_ratio + 1 + i; Shard new_shard = shard; new_shard[dim_ind] = shifted_coord; expanded_shards.push_back(new_shard); } return expanded_shards; }; std::vector<Shard> total_expanded_shards = shard_vec.shards; for (int dim_ind = 0; dim_ind < new_num_shards_per_dim.size(); ++dim_ind) { std::vector<Shard> dim_expanded_shards; for (const auto& shard : total_expanded_shards) { std::vector<Shard> expanded_shards = ExpandShard(shard, dim_ind); dim_expanded_shards.insert(dim_expanded_shards.end(), expanded_shards.begin(), expanded_shards.end()); } total_expanded_shards = dim_expanded_shards; } std::sort(total_expanded_shards.begin(), total_expanded_shards.end()); ShardVector expanded_shard_vec; expanded_shard_vec.shards = total_expanded_shards; expanded_shard_vec.num_shards_per_dim = new_num_shards_per_dim; return expanded_shard_vec; } } std::vector<DeviceLocation> ComputeDeviceLocations(const Mesh& mesh) { std::vector<DeviceLocation> mesh_locs(mesh.size()); for (size_t i = 0; i < mesh.size(); ++i) mesh_locs[i] = *(mesh.device_location(i)); return mesh_locs; } bool ShardVector::operator==(const ShardVector& other) const { if (this->shards.empty() && other.shards.empty()) return true; if (this->shards.empty() || other.shards.empty()) return false; if (this->num_shards_per_dim.size() != other.num_shards_per_dim.size()) return false; Shard first_shard_this = this->shards[0]; Shard first_shard_other = other.shards[0]; std::vector<int> new_sizes; for (size_t i = 0; i < first_shard_this.size(); ++i) { int lcm = this->num_shards_per_dim[i] * other.num_shards_per_dim[i] / MathUtil::GCD(static_cast<unsigned>(this->num_shards_per_dim[i]), static_cast<unsigned>(other.num_shards_per_dim[i])); new_sizes.push_back(lcm); } return ExpandShardVector(*this, new_sizes).shards == ExpandShardVector(other, new_sizes).shards; } std::string ShardVector::ToString() const { std::string string = "shards:["; std::vector<std::string> shard_strs; shard_strs.reserve(shards.size()); for (const Shard& shard : shards) shard_strs.push_back("(" + absl::StrJoin(shard, ",") + ")"); absl::StrAppend(&string, absl::StrJoin(shard_strs, ",")); absl::StrAppend(&string, "] num_shards_per_dim:("); absl::StrAppend(&string, absl::StrJoin(num_shards_per_dim, ",") + ")"); return string; } bool ShardVector::ContainsShard(const Shard& shard) const { for (const auto& shard_in_vec : shards) if (shard_in_vec == shard) return true; return false; } bool IsDynamicSize(int64_t size) { return mlir::ShapedType::isDynamic(size) || size == -1; } bool IsDynamicShape(absl::Span<const int64_t> shape) { for (int64_t size : shape) { if (IsDynamicSize(size)) return true; } return false; } std::map<std::string, std::vector<int>>& Mesh::tpu_core_ids() { static auto tpu_core_ids = new std::map<std::string, std::vector<int>>(); return *tpu_core_ids; } std::string& Mesh::tpu_host_mesh() { static auto tpu_host_mesh = new std::string; return *tpu_host_mesh; } StatusOr<Mesh> Mesh::ParseFromProto(const MeshProto& proto) { Mesh mesh; mesh.name_ = proto.name(); mesh.use_xla_spmd_ = proto.use_xla_spmd(); if (proto.single_device().empty()) { mesh.mesh_type_ = MeshType::kTile; for (const auto& device : proto.local_devices()) { mesh.local_devices_.push_back(device); } for (const auto& device_id : proto.local_device_ids()) { mesh.local_device_ids_.push_back(device_id); } for (const auto& device_id : proto.global_device_ids()) { mesh.global_device_ids_.push_back(device_id); } for (const auto& device : proto.global_devices()) { mesh.global_devices_.push_back(device); } mesh.mesh_dims_.resize(proto.mesh_dimensions_size()); for (int i = 0; i < proto.mesh_dimensions_size(); ++i) { const MeshDimensionProto& dim = proto.mesh_dimensions(i); mesh.mesh_dims_[i].name = dim.name(); mesh.mesh_dims_[i].size = dim.size(); } int64 mesh_size = mesh.size(); int num_devices = proto.global_device_ids_size(); if (mesh_size > 0 && mesh_size != num_devices) { TF_RETURN_WITH_CONTEXT(absl::InvalidArgumentError( absl::StrCat("Number of devices ", num_devices, " not matching mesh size ", mesh_size))); } } else { mesh.mesh_type_ = MeshType::kSingleDevice; mesh.single_device_ = proto.single_device(); } return mesh; } StatusOr<Mesh> Mesh::GetAbstractMesh( const std::string& name, const std::vector<MeshDimension>& mesh_dims) { Mesh mesh; mesh.mesh_type_ = MeshType::kTile; mesh.name_ = name; mesh.mesh_dims_ = mesh_dims; std::set<std::string> dims_set; for (const MeshDimension& dim : mesh.dims()) { if (dims_set.find(dim.name) != dims_set.end()) TF_RETURN_WITH_CONTEXT( absl::InvalidArgumentError("repeated mesh dimension")); if (dim.name == Layout::kAny || dim.name == Layout::kMatch || dim.name == Layout::kUnshardedDim) TF_RETURN_WITH_CONTEXT(absl::InvalidArgumentError( absl::StrCat("mesh dimension name ", dim.name, " is reserved"))); dims_set.insert(dim.name); } return mesh; } StatusOr<Mesh> Mesh::GetMesh(const std::string& name, const std::vector<MeshDimension>& mesh_dims, const std::vector<std::int64_t>& global_device_ids, const std::vector<std::int64_t>& local_device_ids, const std::vector<std::string>& local_devices, const std::vector<std::string>& global_devices, bool use_xla_spmd) { TF_ASSIGN_OR_RETURN(Mesh mesh, GetAbstractMesh(name, mesh_dims)); mesh.global_device_ids_ = global_device_ids; mesh.local_device_ids_ = local_device_ids; mesh.local_devices_ = local_devices; mesh.global_devices_ = global_devices; mesh.use_xla_spmd_ = use_xla_spmd; size_t global_n = mesh.global_device_ids_.size(); size_t local_n = mesh.local_device_ids_.size(); size_t dev_n = mesh.local_devices_.size(); if (!(global_n >= local_n && dev_n == local_n)) TF_RETURN_WITH_CONTEXT(absl::InvalidArgumentError(absl::StrCat( "number of global_device_ids ", std::to_string(global_n), " local_devices ids ", std::to_string(local_n), " and local devices ", std::to_string(dev_n), "not meeting requirements"))); if (global_n == 0) return Mesh::Empty(); if (local_n && !(global_n % local_n == 0)) TF_RETURN_WITH_CONTEXT(absl::InvalidArgumentError(absl::StrCat( "Uneven local clusters with global_ids ", std::to_string(global_n), " and local_devices ids ", std::to_string(local_n)))); if (mesh.size() != global_n) TF_RETURN_WITH_CONTEXT(absl::InvalidArgumentError( "mesh size doesn't match number of devices")); TF_ASSIGN_OR_RETURN(const auto& parsed_devs, mesh.ParsedDevices()); std::set<std::string> types_set; for (const DeviceNameUtils::ParsedName& dev : parsed_devs) { if (!dev.has_job || !dev.has_task || !dev.has_type) return absl::InvalidArgumentError( "Failed to either identify host or device type"); types_set.insert(dev.type); if (types_set.size() > 1) return absl::InvalidArgumentError(absl::StrCat( "More than one device type per mesh not supported. Found ", types_set.size())); } return mesh; } StatusOr<Mesh> Mesh::GetSingleDeviceMesh(absl::string_view single_device) { if (single_device.empty()) { return absl::InvalidArgumentError("Single device is empty."); } Mesh mesh; mesh.mesh_type_ = MeshType::kSingleDevice; mesh.single_device_ = single_device; return mesh; } StatusOr<int64_t> Mesh::dim_size(absl::string_view name) const { for (const auto& mesh_dim : dims()) { if (name == mesh_dim.name) { return mesh_dim.size; } } std::vector<std::string> dim_names; for (const auto& mesh_dim : dims()) dim_names.push_back(mesh_dim.name); return absl::NotFoundError( absl::StrCat("Dimension ", name, " does not exist in mesh.", "Available dimensions: ", absl::StrJoin(dim_names, ","))); } std::vector<int64_t> Mesh::dim_sizes() const { std::vector<int64_t> dim_sizes; if (mesh_dims_.empty()) return dim_sizes; for (const auto& mesh_dim : mesh_dims_) dim_sizes.push_back(mesh_dim.size); return dim_sizes; } bool Mesh::operator==(const Mesh& b) const { StatusOr<MeshProto> this_proto = ToProto(); StatusOr<MeshProto> b_proto = b.ToProto(); if (!this_proto.ok() || !b_proto.ok()) { return false; } return protobuf::util::MessageDifferencer::Equals(*this_proto, *b_proto); } bool Mesh::IsEmpty() const { return mesh_type_ == MeshType::kTile && global_device_ids_.empty(); } StatusOr<const std::vector<DeviceNameUtils::ParsedName>> Mesh::ParsedDevices() const { std::vector<DeviceNameUtils::ParsedName> parsed_devices( local_devices_.size()); for (std::size_t i = 0; i < local_devices_.size(); ++i) if (!DeviceNameUtils::ParseFullOrLocalName( absl::string_view(local_devices_[i]), &parsed_devices[i])) return absl::InvalidArgumentError("Failed to parse local_devices"); return parsed_devices; } StatusOr<Mesh> Mesh::ToDeviceType(const std::string& device_type) const { std::vector<std::string> to_local_devices; DeviceNameUtils::ParsedName parsed_dev; for (const std::string& local_dev : local_devices_) { if (!DeviceNameUtils::ParseFullOrLocalName(absl::string_view(local_dev), &parsed_dev)) { return absl::InvalidArgumentError("Failed to parse local devices"); } to_local_devices.push_back( DeviceNameUtils::FullName(parsed_dev.job, parsed_dev.replica,
#include "tensorflow/dtensor/cc/tensor_layout.h" #include <map> #include <memory> #include <ostream> #include <string> #include <vector> #include <gmock/gmock.h> #include "absl/container/inlined_vector.h" #include "absl/strings/match.h" #include "mlir/IR/BuiltinTypeInterfaces.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/dtensor/cc/dstatus.h" #include "tensorflow/dtensor/proto/layout.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/status_matchers.h" #include "tsl/platform/statusor.h" namespace tensorflow { namespace dtensor { namespace { using ::testing::ContainsRegex; using ::testing::ElementsAre; using ::testing::IsEmpty; using ::testing::SizeIs; using ::tsl::testing::IsOkAndHolds; using ::tsl::testing::StatusIs; class ProtoStringMatcher { public: explicit ProtoStringMatcher(const tensorflow::protobuf::Message& expected) : expected_(expected.SerializeAsString()) {} template <typename Message> bool MatchAndExplain(const Message& p, ::testing::MatchResultListener*) const { return p.SerializeAsString() == expected_; } void DescribeTo(::std::ostream* os) const { *os << expected_; } void DescribeNegationTo(::std::ostream* os) const { *os << "not equal to expected message: " << expected_; } private: const std::string expected_; }; inline ::testing::PolymorphicMatcher<ProtoStringMatcher> EqualsProto( const tensorflow::protobuf::Message& x) { return ::testing::MakePolymorphicMatcher(ProtoStringMatcher(x)); } class LayoutTest : public ::testing::Test { protected: Layout BatchLayout() { return Layout::FromString("sharding_specs:x,batch, mesh:|x=4,batch=8|*TPU") .value(); } }; TEST(MeshTest, FromStringEmptyMesh) { Mesh mesh = Mesh::Empty(); std::string mesh_str = mesh.ToString(); EXPECT_EQ(mesh_str, Mesh::kEmptyMeshString); } TEST(MeshTest, FromStringMeshWithGlobalDevices) { StatusOr<Mesh> mesh = Mesh::FromString( "mesh:|x=2|0,1|0|/job:localhost/task:0/device:CPU:0|/job:localhost/" "task:0/device:CPU:0,/job:localhost/task:0/device:CPU:1"); EXPECT_THAT(mesh->global_devices(), ElementsAre("/job:localhost/task:0/device:CPU:0", "/job:localhost/task:0/device:CPU:1")); } TEST(MeshTest, FromStringMeshWithXLASPMDAndGlobalDevices) { StatusOr<Mesh> mesh = Mesh::FromString( "mesh:|x=2|0,1|0|/job:localhost/task:0/device:CPU:0|/job:localhost/" "task:0/device:CPU:1|use_xla_spmd"); EXPECT_TRUE(mesh->use_xla_spmd()); } TEST(MeshTest, FromStringMeshWithXLASPMD) { StatusOr<Mesh> mesh = Mesh::FromString( "mesh:|x=1|0|0|/job:localhost/task:0/device:CPU:0|use_xla_spmd"); EXPECT_TRUE(mesh->use_xla_spmd()); } TEST(MeshTest, FromStringMeshWithoutXLASPMD) { StatusOr<Mesh> mesh = Mesh::FromString("mesh:|x=1|0|0|/job:localhost/task:0/device:CPU:0"); EXPECT_FALSE(mesh->use_xla_spmd()); } TEST(MeshTest, ToStringMeshWithoutXLASPMD) { Mesh mesh = Mesh::CreateMesh("MyMesh", {"x"}, {2}, {0, 1}, {"/job:localhost/task:0/device:CPU:0", "/job:localhost/task:0/device:CPU:1"}, {0, 1}, {"/job:localhost/task:0/device:CPU:0", "/job:localhost/task:0/device:CPU:1"}, false); EXPECT_TRUE(!absl::StrContains(mesh.ToString(), Mesh::kUseXLASPMDString)); } TEST(MeshTest, ToStringMeshWithXLASPMD) { Mesh mesh = Mesh::CreateMesh("MyMesh", {"x"}, {2}, {0, 1}, {"/job:localhost/task:0/device:CPU:0", "/job:localhost/task:0/device:CPU:1"}, {0, 1}, {"/job:localhost/task:0/device:CPU:0", "/job:localhost/task:0/device:CPU:1"}, true); EXPECT_THAT(mesh.ToString(), ContainsRegex(Mesh::kUseXLASPMDString)); } TEST(MeshTest, FromStringInvalidSingleDeviceMesh) { EXPECT_THAT(Mesh::FromString("/job:localhost/device:CPU:0"), StatusIs(tsl::error::INVALID_ARGUMENT)); } TEST(MeshTest, FromStringSingleDeviceMesh) { TF_ASSERT_OK_AND_ASSIGN( Mesh mesh, Mesh::FromString("/job:localhost/task:0/device:CPU:0")); EXPECT_EQ(mesh.ToString(), "/job:localhost/task:0/device:CPU:0"); } TEST_F(LayoutTest, FromStringEmptyLayout) { Layout layout = Layout::Empty(); std::string layout_str = layout.ToString(); TF_ASSERT_OK_AND_ASSIGN(Layout layout_from_str, Layout::FromString(layout_str)); TF_ASSERT_OK_AND_ASSIGN(LayoutProto layout_from_str_proto, layout_from_str.ToProto()); EXPECT_THAT(layout.ToProto(), IsOkAndHolds(EqualsProto(layout_from_str_proto))); } TEST_F(LayoutTest, LayoutToFromString) { Layout layout = BatchLayout(); std::string layout_str = layout.ToString(); TF_ASSERT_OK_AND_ASSIGN(Layout layout_from_str, Layout::FromString(layout_str)); TF_ASSERT_OK_AND_ASSIGN(LayoutProto layout_from_str_proto, layout_from_str.ToProto()); EXPECT_THAT(layout.ToProto(), IsOkAndHolds(EqualsProto(layout_from_str_proto))); } TEST_F(LayoutTest, LayoutToFromStringNotSharded) { std::string layout_str = "sharding_specs:x," + string(Layout::kUnshardedDim) + ", mesh:|x=1|0|0|/job:localhost/task:0/device:CPU:0"; EXPECT_EQ(layout_str, Layout::FromString(layout_str)->ToString()); } TEST_F(LayoutTest, LayoutToFromStringAny) { std::string layout_str = "sharding_specs:any, mesh:|x=1|0|0|/job:localhost/task:0/device:CPU:0"; EXPECT_EQ(layout_str, Layout::FromString(layout_str)->ToString()); } TEST_F(LayoutTest, LayoutToFromStringSingleDevice) { std::string layout_str = "maximal:true, mesh:/job:localhost/task:0/device:CPU:0"; EXPECT_EQ(layout_str, Layout::FromString(layout_str)->ToString()); } TEST_F(LayoutTest, AutoGenerateLayout) { std::string layout_str = "sharding_specs:x, mesh:|x=2,y=2|*CPU"; std::string exp_layout_str = "sharding_specs:x, " "mesh:|x=2,y=2|0,1,2,3|0,1,2,3|/job:localhost/task:0/device:CPU:0,/" "job:localhost/task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2,/" "job:localhost/task:0/device:CPU:3"; EXPECT_EQ(exp_layout_str, Layout::FromString(layout_str)->ToString()); } TEST_F(LayoutTest, MeshToFromString) { Mesh mesh = BatchLayout().mesh(); std::string mesh_str = mesh.ToString(); TF_ASSERT_OK_AND_ASSIGN(Mesh mesh_from_str, Mesh::FromString(mesh_str)); TF_ASSERT_OK_AND_ASSIGN(MeshProto mesh_from_str_proto, mesh_from_str.ToProto()); EXPECT_THAT(mesh.ToProto(), IsOkAndHolds(EqualsProto(mesh_from_str_proto))); } TEST_F(LayoutTest, GetType) { Mesh mesh = BatchLayout().mesh(); EXPECT_TRUE(mesh.is_tpu_mesh()); } TEST_F(LayoutTest, OnTPUMesh) { Layout layout = BatchLayout(); EXPECT_TRUE(layout.mesh().is_tpu_mesh()); } TEST_F(LayoutTest, NumShardsAsVector) { std::vector<int32> shards = {4, 8}; EXPECT_EQ(BatchLayout().num_shards(), shards); } TEST_F(LayoutTest, IsReplicated) { EXPECT_FALSE(BatchLayout().IsFullyReplicated()); } TEST_F(LayoutTest, MeshDeviceLocations) { Layout layout = BatchLayout(); absl::InlinedVector<int64, 4> offset = {1, 2}; EXPECT_THAT(layout.mesh().device_location(10), IsOkAndHolds(offset)); offset = {2, 2}; EXPECT_THAT(layout.mesh().device_location(18), IsOkAndHolds(offset)); offset = {3, 7}; EXPECT_THAT(layout.mesh().device_location(31), IsOkAndHolds(offset)); EXPECT_FALSE(layout.mesh().device_location(32).ok()); EXPECT_FALSE(layout.mesh().device_location(-1).ok()); } TEST_F(LayoutTest, ScalarLayout) { TF_ASSERT_OK_AND_ASSIGN( Layout layout, Layout::FromString("sharding_specs:scalar, mesh:|x=4,y=4|*TPU")); EXPECT_EQ(layout.num_devices(), 16); EXPECT_TRUE(layout.mesh().is_tpu_mesh()); TF_ASSERT_OK_AND_ASSIGN(LayoutProto layout_proto, layout.ToProto()); EXPECT_EQ(layout_proto.mesh_config().mesh_dimensions(0).size(), 4); EXPECT_EQ(layout.rank(), 0); } TEST_F(LayoutTest, ParseSimpleTpuMesh) { TF_ASSERT_OK_AND_ASSIGN( Layout layout, Layout::FromString("sharding_specs:x, mesh:|x=4,y=4|*TPU")); EXPECT_EQ(layout.num_devices(), 16); EXPECT_TRUE(layout.mesh().is_tpu_mesh()); TF_ASSERT_OK_AND_ASSIGN(LayoutProto layout_proto, layout.ToProto()); EXPECT_EQ(layout_proto.mesh_config().mesh_dimensions(0).size(), 4); } TEST_F(LayoutTest, ParseSimpleCpuMesh) { TF_ASSERT_OK_AND_ASSIGN( Layout layout, Layout::FromString("sharding_specs:x,unsharded, mesh:|x=4,y=4|*CPU")); EXPECT_EQ(layout.num_devices(), 16); EXPECT_FALSE(layout.mesh().is_tpu_mesh()); TF_ASSERT_OK_AND_ASSIGN(LayoutProto layout_proto, layout.ToProto()); EXPECT_EQ(layout_proto.mesh_config().mesh_dimensions(0).size(), 4); } TEST_F(LayoutTest, ParseFailsOnRepeatedShardingSpec) { StatusOr<Layout> maybe_layout = Layout::FromString("sharding_specs:x,x, mesh:|x=1,y=2|*CPU"); EXPECT_FALSE(maybe_layout.ok()); } TEST_F(LayoutTest, ParseFailsOnInvalidScalarShardingSpec) { StatusOr<Layout> maybe_layout = Layout::FromString("sharding_specs:x,scalar, mesh:|x=1,y=2|*CPU"); EXPECT_FALSE(maybe_layout.ok()); } TEST_F(LayoutTest, ParseFailsOnShardingSpecOverNonExistentMeshDim) { StatusOr<Layout> maybe_layout = Layout::FromString("sharding_specs:x,z, mesh:|x=1,y=2|*CPU"); EXPECT_FALSE(maybe_layout.ok()); } TEST_F(LayoutTest, ParseFailsOnBadDeviceString) { auto layout = Layout::FromString("sharding_specs:x,unsharded, d:TPU mesh:x=4,y=4"); EXPECT_FALSE(layout.ok()) << layout.status(); } TEST_F(LayoutTest, ParseReplicatedLayout) { TF_ASSERT_OK_AND_ASSIGN( auto layout, Layout::FromString( "sharding_specs:unsharded,unsharded, mesh:|x=4,y=4|*CPU")); EXPECT_EQ(layout.num_devices(), 16); EXPECT_FALSE(layout.mesh().is_tpu_mesh()); EXPECT_TRUE(layout.IsFullyReplicated()); TF_ASSERT_OK_AND_ASSIGN(LayoutProto layout_proto, layout.ToProto()); EXPECT_EQ(layout_proto.mesh_config().mesh_dimensions(0).size(), 4); } TEST_F(LayoutTest, SingleHostFullyReplicatedReducedMesh) { TF_ASSERT_OK_AND_ASSIGN( Layout replicated_layout, Layout::FromString( "sharding_specs:unsharded,unsharded, mesh:|x=2,y=2|*CPU")); Mesh reduced_mesh = replicated_layout.ReducedMesh(); EXPECT_EQ(reduced_mesh.size(), 1); EXPECT_THAT(reduced_mesh.hosts(), SizeIs(1)); } TEST_F(LayoutTest, SingleHostFullShardedReducedMesh) { Layout layout = BatchLayout(); Mesh original_mesh = layout.mesh(); Mesh reduced_mesh = layout.ReducedMesh(); EXPECT_EQ(original_mesh.ToString(), reduced_mesh.ToString()); EXPECT_EQ(reduced_mesh.size(), 32); EXPECT_THAT(reduced_mesh.hosts(), SizeIs(1)); } TEST_F(LayoutTest, MultiHostReplicatedReducedMesh) { StatusOr<Layout> layout = Layout::FromString( "sharding_specs:unsharded,unsharded, " "mesh:|x=4,y=2|0,1,2,3,4,5,6,7|4,5,6,7|" "/job:localhost/task:1/device:CPU:0,/job:localhost/task:1/device:CPU:1," "/job:localhost/task:1/device:CPU:2,/job:localhost/task:1/device:CPU:3"); Mesh reduced_mesh = layout->ReducedMesh(); EXPECT_EQ(reduced_mesh.size(), 1); EXPECT_THAT(reduced_mesh.global_device_ids(), ElementsAre(0)); EXPECT_THAT(reduced_mesh.local_device_ids(), IsEmpty()); EXPECT_THAT(reduced_mesh.local_devices(), IsEmpty()); EXPECT_THAT(reduced_mesh.hosts(), IsEmpty()); } TEST_F(LayoutTest, MultiHostPartiallyShardedReducedMesh) { StatusOr<Layout> layout = Layout::FromString( "sharding_specs:x,unsharded, " "mesh:|x=4,y=2|0,1,2,3,4,5,6,7|4,5,6,7|" "/job:localhost/task:1/device:CPU:0,/job:localhost/task:1/device:CPU:1," "/job:localhost/task:1/device:CPU:2,/job:localhost/task:1/device:CPU:3"); Mesh reduced_mesh = layout->ReducedMesh(); EXPECT_EQ(reduced_mesh.size(), 4); EXPECT_THAT(reduced_mesh.global_device_ids(), ElementsAre(0, 2, 4, 6)); EXPECT_THAT(reduced_mesh.local_device_ids(), ElementsAre(4, 6)); EXPECT_THAT(reduced_mesh.local_devices(), ElementsAre("/job:localhost/task:1/device:CPU:0", "/job:localhost/task:1/device:CPU:2")); EXPECT_THAT(reduced_mesh.hosts(), SizeIs(1)); } TEST_F(LayoutTest, MultiHostFullyShardedReducedMesh) { StatusOr<Layout> layout = Layout::FromString( "sharding_specs:x,y, " "mesh:|x=4,y=2|0,1,2,3,4,5,6,7|4,5,6,7|" "/job:localhost/task:1/device:CPU:0,/job:localhost/task:1/device:CPU:1," "/job:localhost/task:1/device:CPU:2,/job:localhost/task:1/device:CPU:3"); Mesh reduced_mesh = layout->ReducedMesh(); EXPECT_EQ(reduced_mesh.size(), 8); EXPECT_THAT(reduced_mesh.global_device_ids(), ElementsAre(0, 1, 2, 3, 4, 5, 6, 7)); EXPECT_THAT(reduced_mesh.local_device_ids(), ElementsAre(4, 5, 6, 7)); EXPECT_THAT(reduced_mesh.local_devices(), ElementsAre("/job:localhost/task:1/device:CPU:0", "/job:localhost/task:1/device:CPU:1", "/job:localhost/task:1/device:CPU:2", "/job:localhost/task:1/device:CPU:3")); EXPECT_EQ(reduced_mesh.hosts().size(), 1); } TEST_F(LayoutTest, FlippedShardedMultiHostMeshes) { StatusOr<Layout> multi_host_layout_1 = Layout::FromString( "sharding_specs:x,y, " "mesh:|x=4,y=2|0,1,2,3,4,5,6,7|4,5,6,7|" "/job:localhost/task:1/device:CPU:0,/job:localhost/task:1/device:CPU:1," "/job:localhost/task:1/device:CPU:2,/job:localhost/task:1/device:CPU:3"); StatusOr<Layout> multi_host_layout_2 = Layout::FromString( "sharding_specs:x,y, " "mesh:|x=4,y=2|0,1,2,3,4,5,6,7|6,7,4,5|" "/job:localhost/task:1/device:CPU:2,/job:localhost/task:1/device:CPU:3," "/job:localhost/task:1/device:CPU:0,/job:localhost/task:1/device:CPU:1"); Mesh reduced_mesh_1 = multi_host_layout_1->ReducedMesh(); Mesh reduced_mesh_2 = multi_host_layout_2->ReducedMesh(); EXPECT_FALSE(reduced_mesh_1 == reduced_mesh_2); } TEST_F(LayoutTest, ShardEqualityOneDim) { ShardVector shard_vec1; Shard shard1{1}; shard_vec1.shards.push_back(shard1); shard_vec1.num_shards_per_dim.push_back(1); ShardVector shard_vec2; Shard shard2{2}; Shard shard3{3}; shard_vec2.shards.push_back(shard1); shard_vec2.shards.push_back(shard2); shard_vec2.shards.push_back(shard3); shard_vec2.num_shards_per_dim.push_back(3); EXPECT_EQ(shard_vec1, shard_vec2); } TEST_F(LayoutTest, ShardEqualityOneDimOffset) { ShardVector shard_vec1; Shard shard1{3}; shard_vec1.shards.push_back(shard1); shard_vec1.num_shards_per_dim.push_back(3); ShardVector shard_vec2; Shard shard2{7}; Shard shard3{8}; Shard shard4{9}; shard_vec2.shards.push_back(shard2); shard_vec2.shards.push_back(shard3); shard_vec2.shards.push_back(shard4); shard_vec2.num_shards_per_dim.push_back(9); EXPECT_EQ(shard_vec1, shard_vec2); } TEST_F(LayoutTest, ShardEqualityTwoDims) { auto GenFullVector = [](std::vector<int> num_shards_per_dim) -> ShardVector { ShardVector shard_vec; shard_vec.num_shards_per_dim = num_shards_per_dim; for (int i = 1; i <= num_shards_per_dim[0]; ++i) for (int j = 1; j <= num_shards_per_dim[1]; ++j) { Shard shard{i, j}; shard_vec.shards.push_back(shard); } return shard_vec; }; std::vector<int> num_shards_per_dim_1{2, 4}; ShardVector shard_vec1 = GenFullVector(num_shards_per_dim_1); std::vector<int> num_shards_per_dim_2{3, 3}; ShardVector shard_vec2 = GenFullVector(num_shards_per_dim_2); EXPECT_EQ(shard_vec1, shard_vec2); } TEST_F(LayoutTest, Shards) { TF_ASSERT_OK_AND_ASSIGN( Layout layout, Layout::FromString("sharding_specs:x,y, mesh:|x=2,y=3|*CPU")); ShardVector shard_vec = layout.GetShardVector(); std::string expected_shard_vec_str = "shards:[(1,1),(1,2),(1,3),(2,1),(2,2),(2,3)] num_shards_per_dim:(2,3)"; EXPECT_EQ(shard_vec.ToString(), expected_shard_vec_str); } TEST_F(LayoutTest, ShardsInverted) { TF_ASSERT_OK_AND_ASSIGN( Layout layout, Layout::FromString("sharding_specs:y,x, mesh:|x=2,y=3|*CPU")); ShardVector shards = layout.GetShardVector(); std::string expected_shards = "shards:[(1,1),(2,1),(3,1),(1,2),(2,2),(3,2)] num_shards_per_dim:(3,2)"; EXPECT_EQ(shards.ToString(), expected_shards); } TEST_F(LayoutTest, HostShardMap) { TF_ASSERT_OK_AND_ASSIGN( Layout layout, Layout::FromString("sharding_specs:x,y, mesh:TPU|x=2,y=2|*TPU")); std::string host_name = layout.mesh().hosts()[0]; auto host_map = layout.HostShardMap(); std::string expected_shards = "shards:[(1,1),(1,2),(2,1),(2,2)] num_shards_per_dim:(2,2)"; EXPECT_EQ(host_map.find(host_name)->second.ToString(), expected_shards); } TEST_F(LayoutTest, MultiHostMultiDeviceShards) { std::string host1 = "/job:localhost/task:0"; std::string host2 = "/job:localhost/task:1"; std::string device1 = "/device:TPU:0"; std::string device2 = "/device:TPU:1"; TF_ASSERT_OK_AND_ASSIGN( Layout layout, Layout::FromString( "sharding_specs:x,unsharded, mesh:TPU|x=4,y=1|0,1,2,3|0,1,2,3|" + host1 + device1 + "," + host1 + device2 + "," + host2 + device1 + "," + host2 + device2)); std::string expected_shard_vec = "shards:[(1,1),(2,1),(3,1),(4,1)] num_shards_per_dim:(4,1)"; EXPECT_EQ(layout.GetShardVector().ToString(), expected_shard_vec); std::map<std::string, ShardVector> host_shard_map = layout.HostShardMap(); std::string expected_shards_host1 = "shards:[(1,1),(2,1)] num_shards_per_dim:(4,1)"; ShardVector host1_shard_vec = host_shard_map.find(host1)->second; EXPECT_EQ(host1_shard_vec.ToString(), expected_shards_host1); std::string expected_shards_host2 = "shards:[(3,1),(4,1)] num_shards_per_dim:(4,1)"; ShardVector host2_shard_vec = host_shard_map.find(host2)->second; EXPECT_EQ(host2_shard_vec.ToString(), expected_shards_host2); } TEST_F(LayoutTest, MultiHostCommXYSharded) { std::string host_0 = "/job:localhost/task:0/"; std::string host_1 = "/job:localhost/task:1/"; StatusOr<Layout> send_layout = Layout::FromString("sharding_specs:y,x, mesh:|x=2,y=2|0,1,2,3|0,1,2,3|" + host_0 + "device:CPU:0," + host_0 + "device:CPU:1," + host_1 + "device:CPU:0," + host_1 + "device:CPU:1"); StatusOr<Layout> recv_layout = Layout::FromString("sharding_specs:x,y, mesh:|x=2,y=2|0,1,2,3|0,1,2,3|" + host_0 + "device:TPU:0," + host_0 + "device:TPU:1," + host_1 + "device:TPU:0," + host_1 + "device:TPU:1"); std::vector<std::string> send_hosts = send_layout->ReducedMesh().hosts(); std::vector<std::string> recv_hosts = recv_layout->ReducedMesh().hosts(); EXPECT_TRUE(send_hosts == recv_hosts); } TEST_F(LayoutTest, MultiHostCommXSharded) { std::vector<std::string> hosts{"/job:localhost/task:0", "/job:localhost/task:1"}; StatusOr<Layout> send_layout = Layout::FromString( "sharding_specs:x, mesh:|x=2,y=2|0,1,2,3|0,1,2,3|" + hosts[0] + "/device:CPU:0," + hosts[0] + "/device:CPU:1," + hosts[1] + "/device:CPU:0," + hosts[1] + "/device:CPU:1"); StatusOr<Layout> recv_layout = Layout::FromString( "sharding_specs:x, mesh:|x=2,y=2|0,1,2,3|0,1,2,3|" + hosts[0] + "/device:TPU:0," + hosts[0] + "/device:TPU:1," + hosts[1] + "/device:TPU:0," + hosts[1] + "/device:TPU:1"); std::vector<std::string> send_hosts = send_layout->ReducedMesh().hosts(); std::vector<std::string> recv_hosts = recv_layout->ReducedMesh().hosts(); EXPECT_TRUE(send_hosts == recv_hosts); std::map<std::string, ShardVector> send_host_shard_map = send_layout->HostShardMap(); std::map<std::string, ShardVector> recv_host_shard_map = recv_layout->HostShardMap(); for (const std::string& host : hosts) { ShardVector shard_vec_in_send_host = send_host_shard_map.find(host)->second; ShardVector shard_vec_in_recv_host = recv_host_shard_map.find(host)->second; EXPECT_EQ(shard_vec_in_send_host, shard_vec_in_recv_host); } } TEST_F(LayoutTest, Transposed2DLayout) { TF_ASSERT_OK_AND_ASSIGN( auto layout, Layout::FromString("sharding_specs:x,y, mesh:|x=2,y=2|*CPU")); TF_ASSERT_OK_AND_ASSIGN( auto expected_layout, Layout::FromString("sharding_specs:y,x, mesh:|x=2,y=2|*CPU")); EXPECT_THAT(Layout::Transposed2D(layout), IsOkAndHolds(expected_layout)); } TEST_F(LayoutTest, Transposed2DLayoutWithBatch) { TF_ASSERT_OK_AND_ASSIGN( auto layout, Layout::FromString( "sharding_specs:b1,b2,x,y, mesh:|x=2,y=2,b1=2,b2=2|*CPU")); TF_ASSERT_OK_AND_ASSIGN( auto expected_layout, Layout::FromString( "sharding_specs:b1,b2,y,x, mesh:|x=2,y=2,b1=2,b2=2|*CPU")); EXPECT_THAT(Layout::Transposed2D(layout), IsOkAndHolds(expected_layout)); } TEST_F(LayoutTest, MeshDimensionIndex) { TF_ASSERT_OK_AND_ASSIGN( auto layout, Layout::FromString("sharding_specs:x,y, mesh:|x=2,y=2|*CPU")); EXPECT_THAT(layout.mesh().idx_for_dim("x"), IsOkAndHolds(0)); EXPECT_THAT(layout.mesh().idx_for_dim("y"), IsOkAndHolds(1)); } TEST_F(LayoutTest, TruncateBeginning) { TF_ASSERT_OK_AND_ASSIGN( auto layout, Layout::FromString("sharding_specs:x,y, mesh:CPU|x=2,y=2|*CPU")); TF_ASSERT_OK_AND_ASSIGN( auto expected_layout, Layout::FromString("sharding_specs:x, mesh:CPU|x=2,y=2|*CPU")); EXPECT_EQ(layout.Truncate(1), expected_layout); } TEST_F(LayoutTest, TruncateEnd) { TF_ASSERT_OK_AND_ASSIGN( auto layout, Layout::FromString("sharding_specs:x,y, mesh:CPU|x=2,y=2|*CPU")); TF_ASSERT_OK_AND_ASSIGN( auto expected_layout, Layout::FromString("sharding_specs:y, mesh:CPU|x=2,y=2|*CPU")); EXPECT_EQ(layout.Truncate(1, true), expected_layout); } TEST_F(LayoutTest, Concatenate) { TF_ASSERT_OK_AND_ASSIGN( auto layout_1, Layout::FromString("sharding_specs:x, mesh:CPU|x=2,y=2|*CPU")); TF_ASSERT_OK_AND_ASSIGN( auto layout_2, Layout::FromString("sharding_specs:y, mesh:CPU|x=2,y=2|*CPU")); TF_ASSERT_OK_AND_ASSIGN( auto expected_layout, Layout::FromString("sharding_specs:x,y, mesh:CPU|x=2,y=2|*CPU")); EXPECT_THAT(ConcatenateLayouts(layout_1, layout_2), IsOkAndHolds(expected_layout)); } TEST_F(LayoutTest, ConcatenateDifferentMesh) { TF_ASSERT_OK_AND_ASSIGN( auto layout_1, Layout::FromString("sharding_specs:x, mesh:CPU|x=2|*CPU")); TF_ASSERT_OK_AND_ASSIGN( auto layout_2, Layout::FromString("sharding_specs:y, mesh:CPU|y=2|*CPU")); auto layout = ConcatenateLayouts(layout_1, layout_2); EXPECT_FALSE(layout.ok()) << layout.status(); } TEST_F(LayoutTest, ConcatenateSameDimension) { TF_ASSERT_OK_AND_ASSIGN( auto layout_1, Layout::FromString("sharding_specs:x, mesh:CPU|x=2,y=2|*CPU")); TF_ASSERT_OK_AND_ASSIGN( auto layout_2, Layout::FromString("sharding_specs:x, mesh:CPU|x=2,y=2|*CPU")); auto layout = ConcatenateLayouts(layout_1, layout_2); EXPECT_FALSE(layout.ok()) << layout.status(); } TEST_F(LayoutTest, EmptyMeshDeviceType) { auto mesh = Mesh::Empty(); EXPECT_EQ(mesh.device_type(), std::string()); } TEST_F(LayoutTest, ConvertMeshDeviceType) { TF_ASSERT_OK_AND_ASSIGN(Mesh mesh, Mesh::FromString("mesh_name|x=2,batch=1|*TPU")); TF_ASSERT_OK_AND_ASSIGN(Mesh cpu_mesh, mesh.ToDeviceType("CPU")); EXPECT_TRUE(cpu_mesh.is_cpu_mesh()); std::string expected_task_name = "/job:localhost/replica:0/task:0/"; TF_ASSERT_OK_AND_ASSIGN( Mesh expected_mesh, Mesh::FromString("|x=2,batch=1|0,1|0,1|" + expected_task_name + "device:CPU:0," + expected_task_name + "device:CPU:1")); EXPECT_EQ(cpu_mesh, expected_mesh); } TEST_F(LayoutTest, EquivalentLayout) { TF_ASSERT_OK_AND_ASSIGN( Layout fully_sharded, Layout::FromString("sharding_specs:x,y, mesh:|x=2,y=1|*TPU")); TF_ASSERT_OK_AND_ASSIGN( Layout x_sharded, Layout::FromString("sharding_specs:x,unsharded, mesh:|x=2,y=1|*TPU")); TF_ASSERT_OK_AND_ASSIGN( Layout y_sharded, Layout::FromString("sharding_specs:unsharded,y, mesh:|x=2,y=1|*TPU")); EXPECT_TRUE(fully_sharded.IsEquivalent(x_sharded)); EXPECT_TRUE(x_sharded.IsEquivalent(fully_sharded)); EXPECT_FALSE(fully_sharded.IsEquivalent(y_sharded)); EXPECT_FALSE(y_sharded.IsEquivalent(fully_sharded)); } TEST_F(LayoutTest, GetSingleDeviceMeshEmptyDeviceString) { EXPECT_THAT(Mesh::GetSingleDeviceMesh(""), StatusIs(tsl::error::INVALID_ARGUMENT)); } TEST_F(LayoutTest, GetSingleDeviceMeshSuccess) { TF_ASSERT_OK_AND_ASSIGN( auto mesh, Mesh::FromString("/job:localhost/task:1/device:CPU:0")); EXPECT_THAT(Mesh::GetSingleDeviceMesh("/job:localhost/task:1/device:CPU:0"), IsOkAndHolds(mesh)); } TEST_F(LayoutTest, GetSingleDeviceLayoutInvalidMesh) { auto mesh = Mesh::Empty(); EXPECT_THAT(Layout::GetLayout(Layout::LayoutType::kSingleDevice, {}, mesh), StatusIs(tsl::error::INVALID_ARGUMENT)); } TEST_F(LayoutTest, GetSingleDeviceLayoutSuccess) { TF_ASSERT_OK_AND_ASSIGN( auto mesh, Mesh::FromString("/job:localhost/task:1/device:CPU:0")); TF_ASSERT_OK_AND_ASSIGN( auto layout, Layout::FromString( "maximal:true, mesh:/job:localhost/task:1/device:CPU:0")); EXPECT_THAT(Layout::GetLayout(Layout::LayoutType::kSingleDevice, {}, mesh), IsOkAndHolds(layout)); } TEST(DynamicSizeTest, IsDynamicSize) { EXPECT_TRUE(IsDynamicSize(-1)); EXPECT_TRUE(IsDynamicSize(mlir::ShapedType::kDynamic)); EXPECT_FALSE(IsDynamicSize(10)); } TEST_F(LayoutTest, LayoutType) { TF_ASSERT_OK_AND_ASSIGN( auto maximal, Layout::FromString( "maximal:true, mesh:/job:localhost/task:1/device:CPU:0")); EXPECT_EQ(maximal.type(), Layout::LayoutType::kSingleDevice); TF_ASSERT_OK_AND_ASSIGN(auto parted, Layout::FromString("parted:x, mesh:|x=2|*TPU")); EXPECT_EQ(parted.type(), Layout::LayoutType::kParted); TF_ASSERT_OK_AND_ASSIGN( auto static_layout, Layout::FromString("sharding_specs:x, mesh:|x=2|*TPU")); EXPECT_EQ(static_layout.type(), Layout::LayoutType::kStatic); } TEST_F(LayoutTest, PartedLayoutToFromString) { TF_ASSERT_OK_AND_ASSIGN(Layout layout, BatchLayout().ToParted()); std::string layout_str = layout.ToString(); TF_ASSERT_OK_AND_ASSIGN(Layout layout_from_str, Layout::FromString(layout_str)); TF_ASSERT_OK_AND_ASSIGN(LayoutProto layout_from_str_proto, layout_from_str.ToProto()); EXPECT_THAT(layout.ToProto(), IsOkAndHolds(EqualsProto(layout_from_str_proto))); } TEST_F(LayoutTest, RaggedLayoutEqual) { TF_ASSERT_OK_AND_ASSIGN( Layout fully_sharded, Layout::FromString("sharding_specs:x,y, mesh:|x=2,y=1|*TPU")); TF_ASSERT_OK_AND_ASSIGN( Layout x_sharded, Layout::FromString("sharding_specs:x,unsharded, mesh:|x=2,y=1|*TPU")); TF_ASSERT_OK_AND_ASSIGN( Layout x_parted, Layout::FromString("parted:x,unsharded, mesh:|x=2,y=1|*TPU")); TF_ASSERT_OK_AND_ASSIGN(Layout x_y_parted, Layout::FromString("parted:x,y, mesh:|x=2,y=1|*TPU")); EXPECT_TRUE(x_parted.IsEquivalent(x_y_parted)); EXPECT_TRUE(x_y_parted.IsEquivalent(x_parted)); EXPECT_FALSE(x_sharded.IsEquivalent(x_parted)); EXPECT_FALSE(fully_sharded.IsEquivalent(x_y_parted)); EXPECT_FALSE(x_sharded == x_parted); EXPECT_FALSE(fully_sharded == x_y_parted); } } } }
1,051
cpp
tensorflow/tensorflow
slice_util
tensorflow/dtensor/cc/slice_util.cc
tensorflow/dtensor/tests/slice_util_test.cc
#ifndef TENSORFLOW_DTENSOR_CC_SLICE_UTIL_H_ #define TENSORFLOW_DTENSOR_CC_SLICE_UTIL_H_ #include <optional> #include <string> #include <vector> #include "mlir/IR/BuiltinTypes.h" #include "tensorflow/core/platform/statusor.h" #include "tensorflow/dtensor/cc/tensor_layout.h" namespace tensorflow { namespace dtensor { namespace slice_util { struct Token { enum TokenType { REGULAR, NEW_AXIS, ELLIPSIS, SHRINK_AXIS } token_type; int64_t begin = 0; int64_t end = 0; int64_t stride = 0; bool dynamic_mask = false; bool begin_mask = false; bool end_mask = false; Token() = default; Token(TokenType token_type, int64_t begin, int64_t end, int64_t stride, bool dynamic_mask = false, bool begin_mask = false, bool end_mask = false) : token_type(token_type), begin(begin), end(end), stride(stride), dynamic_mask(dynamic_mask), begin_mask(begin_mask), end_mask(end_mask) {} Token normalize(int64_t dim_size) const; std::optional<Token> GetLocalToken(int64_t dim_size, int64_t num_shards) const; }; template <typename T, typename... Types> StatusOr<T> CreateAndRun(const std::vector<Token>& tokens, Types... args) { T visitor(args...); TF_RETURN_IF_ERROR(visitor.Run(tokens)); return visitor; } class TokenProcessor { public: explicit TokenProcessor(int64_t input_rank) : input_rank_(input_rank) {} virtual ~TokenProcessor() = default; Status Run(const std::vector<Token>& tokens); protected: bool VisitLoop(int64_t input_rank, int64_t output_rank, int64_t ellipsis_size, int64_t* input_index, int64_t* output_index); virtual void VisitImplicitAxis(int64_t input_index, int64_t output_index) = 0; virtual void VisitEllipsisAxis(const Token& token) = 0; virtual void VisitShrinkAxis(const Token& token, int64_t input_index, int64_t output_index) = 0; virtual void VisitNewAxis(const Token& token, int64_t input_index, int64_t output_index) = 0; virtual void VisitRegularAxis(const Token& token, int64_t input_index, int64_t output_index) = 0; virtual void PrepareResults(int64_t spec_rank, int64_t input_rank, int64_t output_rank) = 0; virtual Status FinalizeResults(int64_t input_rank, int64_t output_rank) = 0; private: const int64_t input_rank_; }; class ForwardLayoutInference : public TokenProcessor { public: ForwardLayoutInference(const Layout& input_layout, const llvm::ArrayRef<int64_t> input_shape) : TokenProcessor(input_shape.size()), input_layout_(input_layout), input_shape_(input_shape), input_sharding_(input_layout.sharding_spec_strs()) {} const Layout& expander_value_layout() const { return expander_value_layout_; } const Layout& expander_input_layout() const { return expander_input_layout_; } const std::vector<Token>& local_tokens() const { return local_tokens_; } protected: void VisitEllipsisAxis(const Token& token) override { local_tokens_.push_back(token); } void VisitImplicitAxis(int64_t input_index, int64_t output_index) override { expander_input_sharding_.push_back(input_sharding_[output_index]); expander_value_sharding_.push_back(input_sharding_[output_index]); } void VisitShrinkAxis(const Token& token, int64_t input_index, int64_t output_index) override { local_tokens_.push_back(token); expander_input_sharding_.push_back(Layout::kUnshardedDim); } void VisitNewAxis(const Token& token, int64_t input_index, int64_t output_index) override { local_tokens_.push_back(token); expander_value_sharding_.push_back(Layout::kUnshardedDim); } void VisitRegularAxis(const Token& token, int64_t input_index, int64_t output_index) override { auto local_token = token.GetLocalToken( input_shape_[input_index], input_layout_.num_shards_for_dim(input_index)); std::string sharding = input_sharding_[input_index]; if (local_token.has_value()) { local_tokens_.push_back(*local_token); } else { sharding = Layout::kUnshardedDim; local_tokens_.push_back(token); } expander_value_sharding_.push_back(sharding); expander_input_sharding_.push_back(sharding); } void PrepareResults(int64_t spec_rank, int64_t input_rank, int64_t output_rank) override { local_tokens_.reserve(spec_rank); expander_input_sharding_.reserve(input_rank); expander_value_sharding_.reserve(output_rank); } Status FinalizeResults(int64_t input_rank, int64_t output_rank) override { DCHECK_EQ(expander_input_sharding_.size(), input_rank); DCHECK_EQ(expander_value_sharding_.size(), output_rank); TF_ASSIGN_OR_RETURN( expander_input_layout_, Layout::GetLayout(expander_input_sharding_, input_layout_.mesh())); TF_ASSIGN_OR_RETURN( expander_value_layout_, Layout::GetLayout(expander_value_sharding_, input_layout_.mesh())); return absl::OkStatus(); } private: const Layout& input_layout_; const llvm::ArrayRef<int64_t> input_shape_; std::vector<std::string> input_sharding_; std::vector<std::string> expander_value_sharding_; std::vector<std::string> expander_input_sharding_; Layout expander_value_layout_; Layout expander_input_layout_; std::vector<Token> local_tokens_; }; class BackwardLayoutInference : public TokenProcessor { public: BackwardLayoutInference(const Layout& value_layout, const llvm::ArrayRef<int64_t> input_shape) : TokenProcessor(input_shape.size()), value_layout_(value_layout), input_shape_(input_shape), value_sharding_(value_layout.sharding_spec_strs()) {} const Layout& expander_input_layout() const { return expander_input_layout_; } const Layout& expander_value_layout() const { return expander_value_layout_; } const std::vector<Token>& local_tokens() const { return local_tokens_; } protected: void VisitEllipsisAxis(const Token& token) override { local_tokens_.push_back(token); } void VisitImplicitAxis(int64_t input_index, int64_t output_index) override { expander_input_sharding_.push_back(value_sharding_[output_index]); expander_value_sharding_.push_back(value_sharding_[output_index]); } void VisitShrinkAxis(const Token& token, int64_t input_index, int64_t output_index) override { local_tokens_.push_back(token); expander_input_sharding_.push_back(Layout::kUnshardedDim); } void VisitNewAxis(const Token& token, int64_t input_index, int64_t output_index) override { local_tokens_.push_back(token); expander_value_sharding_.push_back(Layout::kUnshardedDim); } void VisitRegularAxis(const Token& token, int64_t input_index, int64_t output_index) override { auto local_token = token.GetLocalToken( input_shape_[input_index], value_layout_.num_shards_for_dim(output_index)); if (local_token.has_value()) { std::string sharding = value_sharding_[output_index]; local_tokens_.push_back(*local_token); expander_input_sharding_.push_back(sharding); expander_value_sharding_.push_back(sharding); } else { local_tokens_.push_back(token); expander_input_sharding_.push_back(Layout::kUnshardedDim); expander_value_sharding_.push_back(Layout::kUnshardedDim); } } void PrepareResults(int64_t spec_rank, int64_t input_rank, int64_t output_rank) override { local_tokens_.reserve(spec_rank); expander_input_sharding_.reserve(input_rank); expander_value_sharding_.reserve(output_rank); } Status FinalizeResults(int64_t input_rank, int64_t output_rank) override { DCHECK_EQ(expander_input_sharding_.size(), input_rank); DCHECK_EQ(expander_value_sharding_.size(), output_rank); TF_ASSIGN_OR_RETURN( expander_input_layout_, Layout::GetLayout(expander_input_sharding_, value_layout_.mesh())); TF_ASSIGN_OR_RETURN( expander_value_layout_, Layout::GetLayout(expander_value_sharding_, value_layout_.mesh())); return absl::OkStatus(); } private: const Layout& value_layout_; const llvm::ArrayRef<int64_t> input_shape_; std::vector<std::string> value_sharding_; std::vector<std::string> expander_input_sharding_; std::vector<std::string> expander_value_sharding_; Layout expander_input_layout_; Layout expander_value_layout_; std::vector<Token> local_tokens_; }; } } } #endif #include "tensorflow/dtensor/cc/slice_util.h" #include <optional> #include <string> #include <vector> #include "mlir/IR/BuiltinTypes.h" #include "tensorflow/core/platform/statusor.h" #include "tensorflow/dtensor/cc/tensor_layout.h" namespace tensorflow { namespace dtensor { namespace slice_util { namespace { StatusOr<int64_t> GetEllipsisSize(int64_t input_rank, const std::vector<Token>& tokens, int64_t* output_rank) { bool found = false; int64_t regular_axis = 0; int64_t new_axis = 0; int64_t shrink_axis = 0; for (const auto& token : tokens) { switch (token.token_type) { case Token::ELLIPSIS: if (found) { return absl::InvalidArgumentError( "More than one ellipsis was found."); } found = true; break; case Token::NEW_AXIS: ++new_axis; break; case Token::SHRINK_AXIS: ++shrink_axis; break; case Token::REGULAR: ++regular_axis; break; } } int64_t ellipsis_size = input_rank - (regular_axis + shrink_axis); if (found && ellipsis_size < 0) { return absl::InvalidArgumentError(absl::StrCat( "Ellipsis was found, but there is no remaining axis for it.", " input_rank=", input_rank, " regular_axis=", regular_axis, " shrink_axis=", shrink_axis)); } *output_rank = regular_axis + ellipsis_size + new_axis; return ellipsis_size; } } Token Token::normalize(int64_t dim_size) const { if (dynamic_mask) { return *this; } int64_t new_begin = begin; int dir = (stride > 0) ? 1 : -1; if (begin_mask) { if (dir > 0) { new_begin = 0; } else { new_begin = dim_size - 1; } } int64_t new_end = end; if (end_mask) { if (dir > 0) { new_end = dim_size; } else { new_end = -1; } } int64_t shift = (new_begin - new_begin % dim_size); new_begin -= shift; new_end -= shift; int64_t n = dir * (new_end - new_begin + stride - dir) / (dir * stride); if (n < 0) { new_end = new_end + dir * dim_size; } n = dir * (new_end - new_begin + stride - dir) / (dir * stride); new_end = new_begin + n * stride; Token r = *this; r.begin = new_begin; r.end = new_end; return r; } std::optional<Token> Token::GetLocalToken(int64_t dim_size, int64_t num_shards) const { Token token = normalize(dim_size); VLOG(5) << "Compute: " << "dim_size=" << dim_size << " num_shards=" << num_shards << " token.begin=" << token.begin << " token.end=" << token.end << " token.stride=" << token.stride; if (token.begin_mask && token.end_mask) return token; if (token.dynamic_mask) return std::nullopt; if (token.stride < 0) return std::nullopt; int64_t shard_dim_size = dim_size / num_shards; if (shard_dim_size % token.stride == 0) { if (token.begin >= 0 && token.begin < token.stride && token.end >= dim_size && token.end < dim_size + token.stride) { token.end = shard_dim_size + (token.end - dim_size); return token; } } return std::nullopt; } Status TokenProcessor::Run(const std::vector<Token>& tokens) { int64_t input_rank = input_rank_; int64_t output_rank; TF_ASSIGN_OR_RETURN(int64_t ellipsis_size, GetEllipsisSize(input_rank, tokens, &output_rank)); PrepareResults(tokens.size(), input_rank, output_rank); bool out_of_bound = false; int64_t input_index = 0; int64_t output_index = 0; for (const auto& token : tokens) { switch (token.token_type) { case Token::ELLIPSIS: VisitEllipsisAxis(token); out_of_bound = VisitLoop(input_rank, output_rank, ellipsis_size, &input_index, &output_index); ellipsis_size = 0; break; case Token::SHRINK_AXIS: VisitShrinkAxis(token, input_index, output_index); ++input_index; break; case Token::NEW_AXIS: VisitNewAxis(token, input_index, output_index); ++output_index; break; case Token::REGULAR: if (input_index >= input_rank) { out_of_bound = true; break; } VisitRegularAxis(token, input_index, output_index); ++input_index; ++output_index; break; } if (out_of_bound) { break; } } if (ellipsis_size > 0) { out_of_bound = VisitLoop(input_rank, output_rank, ellipsis_size, &input_index, &output_index); } if (out_of_bound) { return absl::InvalidArgumentError( "Reading axis beyond the input tensor's rank. " "The slicing token is incorrect."); } return FinalizeResults(input_rank, output_rank); } bool TokenProcessor::VisitLoop(int64_t input_rank, int64_t output_rank, int64_t ellipsis_size, int64_t* input_index, int64_t* output_index) { for (int64_t k = 0; k < ellipsis_size; ++k) { if (*input_index >= input_rank) { return true; } VisitImplicitAxis(*input_index, *output_index); ++*input_index; ++*output_index; } return false; } } } }
#include "tensorflow/dtensor/cc/slice_util.h" #include <map> #include <memory> #include <ostream> #include <string> #include <vector> #include <gmock/gmock.h> #include "tensorflow/core/platform/test.h" #include "tensorflow/dtensor/cc/tensor_layout.h" #include "tensorflow/dtensor/proto/layout.pb.h" #include "tsl/platform/status_matchers.h" namespace tensorflow { namespace dtensor { namespace slice_util { namespace { using ::testing::SizeIs; using ::tsl::testing::IsOk; TEST(TokenTest, NormalizeDynamic) { auto spec = Token(Token::REGULAR, 0, 0, 1, true, true, true); EXPECT_EQ(spec.normalize(4).begin, 0); EXPECT_EQ(spec.normalize(4).end, 0); EXPECT_EQ(spec.normalize(4).dynamic_mask, true); EXPECT_EQ(spec.normalize(4).begin_mask, true); EXPECT_EQ(spec.normalize(4).end_mask, true); } TEST(TokenTest, NormalizeFullPositiveStride) { auto spec = Token(Token::REGULAR, 0, 4, 1); EXPECT_EQ(spec.normalize(4).begin, 0); EXPECT_EQ(spec.normalize(4).end, 4); spec = Token(Token::REGULAR, 0, 4, 2); EXPECT_EQ(spec.normalize(4).begin, 0); EXPECT_EQ(spec.normalize(4).end, 4); spec = Token(Token::REGULAR, 0, 4, 3); EXPECT_EQ(spec.normalize(4).begin, 0); EXPECT_EQ(spec.normalize(4).end, 6); spec = Token(Token::REGULAR, 0, 4, 5); EXPECT_EQ(spec.normalize(4).begin, 0); EXPECT_EQ(spec.normalize(4).end, 5); } TEST(TokenTest, NormalizeFullNegativeStride) { auto spec = Token(Token::REGULAR, 3, -1, -1); EXPECT_EQ(spec.normalize(4).begin, 3); EXPECT_EQ(spec.normalize(4).end, -1); spec = Token(Token::REGULAR, 3, -1, -2); EXPECT_EQ(spec.normalize(4).begin, 3); EXPECT_EQ(spec.normalize(4).end, -1); spec = Token(Token::REGULAR, 3, -1, -3); EXPECT_EQ(spec.normalize(4).begin, 3); EXPECT_EQ(spec.normalize(4).end, -3); spec = Token(Token::REGULAR, 3, -1, -5); EXPECT_EQ(spec.normalize(4).begin, 3); EXPECT_EQ(spec.normalize(4).end, -2); } TEST(TokenTest, NormalizeZeroPositiveStride) { auto spec = Token(Token::REGULAR, 3, 3, 1); EXPECT_EQ(spec.normalize(7).begin, 3); EXPECT_EQ(spec.normalize(7).end, 3); spec = Token(Token::REGULAR, 0, 0, 1); EXPECT_EQ(spec.normalize(7).begin, 0); EXPECT_EQ(spec.normalize(7).end, 0); } TEST(TokenTest, NormalizeZeroNegativeStride) { auto spec = Token(Token::REGULAR, 3, 3, -1); EXPECT_EQ(spec.normalize(7).begin, 3); EXPECT_EQ(spec.normalize(7).end, 3); spec = Token(Token::REGULAR, 0, 0, -1); EXPECT_EQ(spec.normalize(7).begin, 0); EXPECT_EQ(spec.normalize(7).end, 0); } TEST(TokenTest, NormalizePartialPositiveStride) { auto spec = Token(Token::REGULAR, 1, 5, 1); EXPECT_EQ(spec.normalize(7).begin, 1); EXPECT_EQ(spec.normalize(7).end, 5); spec = Token(Token::REGULAR, 1, 5, 2); EXPECT_EQ(spec.normalize(7).begin, 1); EXPECT_EQ(spec.normalize(7).end, 5); spec = Token(Token::REGULAR, 1, 5, 3); EXPECT_EQ(spec.normalize(7).begin, 1); EXPECT_EQ(spec.normalize(7).end, 7); spec = Token(Token::REGULAR, 1, 5, 5); EXPECT_EQ(spec.normalize(7).begin, 1); EXPECT_EQ(spec.normalize(7).end, 6); spec = Token(Token::REGULAR, 1, -1, 1); EXPECT_EQ(spec.normalize(7).begin, 1); EXPECT_EQ(spec.normalize(7).end, 6); spec = Token(Token::REGULAR, 0, -1, 1); EXPECT_EQ(spec.normalize(7).begin, 0); EXPECT_EQ(spec.normalize(7).end, 6); } TEST(TokenTest, NormalizePartialNegativeStride) { auto spec = Token(Token::REGULAR, 6, 2, -1); EXPECT_EQ(spec.normalize(7).begin, 6); EXPECT_EQ(spec.normalize(7).end, 2); spec = Token(Token::REGULAR, 6, 2, -2); EXPECT_EQ(spec.normalize(7).begin, 6); EXPECT_EQ(spec.normalize(7).end, 2); spec = Token(Token::REGULAR, 6, 2, -3); EXPECT_EQ(spec.normalize(7).begin, 6); EXPECT_EQ(spec.normalize(7).end, 0); spec = Token(Token::REGULAR, 6, 2, -5); EXPECT_EQ(spec.normalize(7).begin, 6); EXPECT_EQ(spec.normalize(7).end, 1); } TEST(TokenTest, NormalizeFarFromCenter) { auto spec = Token(Token::REGULAR, 100, 102, 1); EXPECT_EQ(spec.normalize(9).begin, 1); EXPECT_EQ(spec.normalize(9).end, 3); } TEST(TokenTest, NormalizeBeginMask) { auto spec = Token(Token::REGULAR, 3, 2, 1); spec.begin_mask = true; EXPECT_EQ(spec.normalize(7).begin, 0); spec = Token(Token::REGULAR, 3, 2, -1); spec.begin_mask = true; EXPECT_EQ(spec.normalize(7).begin, 6); } TEST(TokenTest, NormalizeEndMask) { auto spec = Token(Token::REGULAR, 3, 2, 1); spec.end_mask = true; EXPECT_EQ(spec.normalize(7).end, 7); spec = Token(Token::REGULAR, 3, 2, -1); spec.end_mask = true; EXPECT_EQ(spec.normalize(7).end, -1); } class InferenceTest : public ::testing::Test { protected: Mesh GetMesh() { return Mesh::CreateMesh("MyMesh", {"x", "y"}, {2, 1}, {0, 1}, {"/job:localhost/task:0/device:CPU:0", "/job:localhost/task:0/device:CPU:1"}, {0, 1}, {"/job:localhost/task:0/device:CPU:0", "/job:localhost/task:0/device:CPU:1"}, false); } }; TEST_F(InferenceTest, FullyReplicatedInputs) { const Layout input_layout = *Layout::GetLayout( std::vector<std::string>{Layout::kUnshardedDim, Layout::kUnshardedDim}, GetMesh()); const Layout output_layout = *Layout::GetLayout( std::vector<std::string>{Layout::kUnshardedDim, Layout::kUnshardedDim}, GetMesh()); const auto specs = std::vector<Token>{ Token(Token::REGULAR, 0, -1, 1, false, false, false), Token(Token::REGULAR, 0, 2, 2, false, false, false)}; auto forward = CreateAndRun<ForwardLayoutInference>( specs, input_layout, std::vector<int64_t>{2, 4}); ASSERT_THAT(forward, IsOk()); EXPECT_EQ( forward->expander_input_layout().sharding_spec_strs(), std::vector<std::string>({Layout::kUnshardedDim, Layout::kUnshardedDim})); EXPECT_EQ(forward->expander_value_layout(), output_layout); EXPECT_THAT(forward->local_tokens(), SizeIs(2)); EXPECT_EQ(forward->local_tokens()[0].end, -1); EXPECT_EQ(forward->local_tokens()[1].end, 2); auto backward = CreateAndRun<BackwardLayoutInference>( specs, output_layout, std::vector<int64_t>{2, 4}); ASSERT_THAT(backward, IsOk()); EXPECT_EQ( backward->expander_value_layout().sharding_spec_strs(), std::vector<std::string>({Layout::kUnshardedDim, Layout::kUnshardedDim})); EXPECT_EQ( backward->expander_input_layout().sharding_spec_strs(), std::vector<std::string>({Layout::kUnshardedDim, Layout::kUnshardedDim})); EXPECT_THAT(backward->local_tokens(), SizeIs(2)); EXPECT_EQ(backward->local_tokens()[0].end, -1); EXPECT_EQ(backward->local_tokens()[1].end, 2); } TEST_F(InferenceTest, NewAxisMask) { const Layout input_layout = *Layout::GetLayout(std::vector<std::string>{"x", "y"}, GetMesh()); const Layout output_layout = *Layout::GetLayout( std::vector<std::string>{Layout::kUnshardedDim, Layout::kUnshardedDim, "x", "y"}, GetMesh()); const auto specs = std::vector<Token>{ Token(Token::NEW_AXIS, 0, 0, 1, false, false, false), Token(Token::NEW_AXIS, 0, 0, 1, false, false, false), Token(Token::REGULAR, 0, 2, 1, false, false, false), Token(Token::REGULAR, 0, 4, 1, false, false, false)}; auto forward = CreateAndRun<ForwardLayoutInference>( specs, input_layout, std::vector<int64_t>{2, 4}); ASSERT_THAT(forward, IsOk()); EXPECT_EQ(forward->expander_input_layout().sharding_spec_strs(), std::vector<std::string>({"x", "y"})); EXPECT_EQ(forward->expander_value_layout(), output_layout); EXPECT_THAT(forward->local_tokens(), SizeIs(4)); EXPECT_EQ(forward->local_tokens()[0].end, 0); EXPECT_EQ(forward->local_tokens()[1].end, 0); EXPECT_EQ(forward->local_tokens()[2].end, 1); EXPECT_EQ(forward->local_tokens()[3].end, 4); auto backward = CreateAndRun<BackwardLayoutInference>( specs, output_layout, std::vector<int64_t>{2, 4}); ASSERT_THAT(backward, IsOk()); EXPECT_EQ(backward->expander_value_layout().sharding_spec_strs(), std::vector<std::string>( {Layout::kUnshardedDim, Layout::kUnshardedDim, "x", "y"})); EXPECT_EQ(backward->expander_input_layout(), input_layout); EXPECT_THAT(backward->local_tokens(), SizeIs(4)); EXPECT_EQ(backward->local_tokens()[0].end, 0); EXPECT_EQ(backward->local_tokens()[1].end, 0); EXPECT_EQ(backward->local_tokens()[2].end, 1); EXPECT_EQ(backward->local_tokens()[3].end, 4); } TEST_F(InferenceTest, ShrinkAxisMask) { const Layout input_layout = *Layout::GetLayout( std::vector<std::string>{Layout::kUnshardedDim, Layout::kUnshardedDim}, GetMesh()); const Layout output_layout = *Layout::GetLayout( std::vector<std::string>{Layout::kUnshardedDim}, GetMesh()); const auto specs = std::vector<Token>{ Token(Token::REGULAR, 0, -1, 1, false, false, false), Token(Token::SHRINK_AXIS, 0, 2, 1, false, false, false)}; auto forward = CreateAndRun<ForwardLayoutInference>( specs, input_layout, std::vector<int64_t>{2, 4}); ASSERT_THAT(forward, IsOk()); EXPECT_EQ( forward->expander_input_layout().sharding_spec_strs(), std::vector<std::string>({Layout::kUnshardedDim, Layout::kUnshardedDim})); EXPECT_EQ(forward->expander_value_layout(), output_layout); EXPECT_THAT(forward->local_tokens(), SizeIs(2)); EXPECT_EQ(forward->local_tokens()[0].end, -1); EXPECT_EQ(forward->local_tokens()[1].end, 2); auto backward = CreateAndRun<BackwardLayoutInference>( specs, output_layout, std::vector<int64_t>{2, 4}); ASSERT_THAT(backward, IsOk()); EXPECT_EQ(backward->expander_value_layout().sharding_spec_strs(), std::vector<std::string>({Layout::kUnshardedDim})); EXPECT_THAT(backward->local_tokens(), SizeIs(2)); EXPECT_EQ(backward->expander_input_layout(), input_layout); EXPECT_EQ(backward->local_tokens()[0].end, -1); EXPECT_EQ(backward->local_tokens()[1].end, 2); } TEST_F(InferenceTest, EllipsisMask) { const Layout input_layout = *Layout::GetLayout( std::vector<std::string>{"x", "y", Layout::kUnshardedDim}, GetMesh()); const Layout output_layout = *Layout::GetLayout( std::vector<std::string>{"x", "y", Layout::kUnshardedDim, Layout::kUnshardedDim, Layout::kUnshardedDim}, GetMesh()); const auto specs = std::vector<Token>{Token(Token::ELLIPSIS, 0, 0, 1, false, false, false), Token(Token::NEW_AXIS, 0, 0, 1, false, false, false), Token(Token::NEW_AXIS, 0, 0, 1, false, false, false)}; auto forward = CreateAndRun<ForwardLayoutInference>( specs, input_layout, std::vector<int64_t>{2, 4, 6}); ASSERT_THAT(forward, IsOk()); EXPECT_EQ(forward->expander_input_layout().sharding_spec_strs(), std::vector<std::string>({"x", "y", Layout::kUnshardedDim})); EXPECT_EQ(forward->expander_value_layout(), output_layout); EXPECT_THAT(forward->local_tokens(), SizeIs(3)); EXPECT_EQ(forward->local_tokens()[0].end, 0); EXPECT_EQ(forward->local_tokens()[1].end, 0); EXPECT_EQ(forward->local_tokens()[2].end, 0); auto backward = CreateAndRun<BackwardLayoutInference>( specs, output_layout, std::vector<int64_t>{2, 4, 6}); ASSERT_THAT(backward, IsOk()); EXPECT_EQ( backward->expander_value_layout().sharding_spec_strs(), std::vector<std::string>({"x", "y", Layout::kUnshardedDim, Layout::kUnshardedDim, Layout::kUnshardedDim})); EXPECT_EQ(backward->expander_input_layout(), input_layout); EXPECT_THAT(backward->local_tokens(), SizeIs(3)); EXPECT_EQ(backward->local_tokens()[0].end, 0); EXPECT_EQ(backward->local_tokens()[1].end, 0); EXPECT_EQ(backward->local_tokens()[2].end, 0); } TEST_F(InferenceTest, EllipsisNewAxisEndMask) { const Layout input_layout = *Layout::GetLayout( std::vector<std::string>{Layout::kUnshardedDim}, GetMesh()); const Layout output_layout = *Layout::GetLayout( std::vector<std::string>{Layout::kUnshardedDim, Layout::kUnshardedDim}, GetMesh()); const auto specs = std::vector<Token>{ Token(Token::ELLIPSIS, 0, 0, 1, false, false, false), Token(Token::NEW_AXIS, 0, 0, 1, false, false, false), Token(Token::REGULAR, 0, 0, 1, false, true, true), }; auto forward = CreateAndRun<ForwardLayoutInference>(specs, input_layout, std::vector<int64_t>{2}); ASSERT_THAT(forward, IsOk()); EXPECT_EQ(forward->expander_input_layout().sharding_spec_strs(), std::vector<std::string>({Layout::kUnshardedDim})); EXPECT_EQ(forward->expander_value_layout(), output_layout); EXPECT_THAT(forward->local_tokens(), SizeIs(3)); EXPECT_EQ(forward->local_tokens()[0].end, 0); EXPECT_EQ(forward->local_tokens()[1].end, 0); EXPECT_EQ(forward->local_tokens()[2].end, 2); auto backward = CreateAndRun<BackwardLayoutInference>( specs, output_layout, std::vector<int64_t>{2}); ASSERT_THAT(backward, IsOk()); EXPECT_EQ( backward->expander_value_layout().sharding_spec_strs(), std::vector<std::string>({Layout::kUnshardedDim, Layout::kUnshardedDim})); EXPECT_EQ(backward->expander_input_layout().sharding_spec_strs(), std::vector<std::string>({Layout::kUnshardedDim})); EXPECT_THAT(backward->local_tokens(), SizeIs(3)); EXPECT_EQ(backward->local_tokens()[0].end, 0); EXPECT_EQ(backward->local_tokens()[1].end, 0); EXPECT_EQ(backward->local_tokens()[2].end, 2); } TEST_F(InferenceTest, AdditionalAxes) { const Layout input_layout = *Layout::GetLayout(std::vector<std::string>{"x", "y"}, GetMesh()); const Layout output_layout = *Layout::GetLayout(std::vector<std::string>{"x", "y"}, GetMesh()); const auto specs = std::vector<Token>{Token(Token::REGULAR, 0, 0, 1, false, true, true)}; auto forward = CreateAndRun<ForwardLayoutInference>( specs, input_layout, std::vector<int64_t>{2, 4}); ASSERT_THAT(forward, IsOk()); EXPECT_EQ(forward->expander_input_layout().sharding_spec_strs(), std::vector<std::string>({"x", "y"})); EXPECT_EQ(forward->expander_value_layout(), output_layout); EXPECT_THAT(forward->local_tokens(), SizeIs(1)); EXPECT_EQ(forward->local_tokens()[0].begin_mask, true); EXPECT_EQ(forward->local_tokens()[0].end_mask, true); auto backward = CreateAndRun<BackwardLayoutInference>( specs, output_layout, std::vector<int64_t>{2, 4}); ASSERT_THAT(backward, IsOk()); EXPECT_EQ(backward->expander_value_layout().sharding_spec_strs(), std::vector<std::string>({"x", "y"})); EXPECT_EQ(backward->expander_input_layout(), input_layout); EXPECT_THAT(backward->local_tokens(), SizeIs(1)); EXPECT_EQ(forward->local_tokens()[0].begin_mask, true); EXPECT_EQ(forward->local_tokens()[0].end_mask, true); } TEST_F(InferenceTest, ShardingOnNonSlicedDimension) { const Layout input_layout = *Layout::GetLayout( std::vector<std::string>{"x", Layout::kUnshardedDim}, GetMesh()); const Layout output_layout = *Layout::GetLayout( std::vector<std::string>{"x", Layout::kUnshardedDim}, GetMesh()); const auto specs = std::vector<Token>{Token(Token::REGULAR, 0, 2, 1, false, false, false), Token(Token::REGULAR, 0, 2, 2, false, false, false)}; auto forward = CreateAndRun<ForwardLayoutInference>( specs, input_layout, std::vector<int64_t>{2, 4}); ASSERT_THAT(forward, IsOk()); EXPECT_EQ(forward->expander_value_layout(), output_layout); EXPECT_EQ(forward->expander_input_layout().sharding_spec_strs(), std::vector<std::string>({"x", Layout::kUnshardedDim})); EXPECT_THAT(forward->local_tokens(), SizeIs(2)); EXPECT_EQ(forward->local_tokens()[0].end, 1); EXPECT_EQ(forward->local_tokens()[1].end, 2); auto backward = CreateAndRun<BackwardLayoutInference>( specs, output_layout, std::vector<int64_t>{2, 4}); ASSERT_THAT(backward, IsOk()); EXPECT_EQ(backward->expander_input_layout(), input_layout); EXPECT_EQ(backward->expander_value_layout().sharding_spec_strs(), std::vector<std::string>({"x", Layout::kUnshardedDim})); EXPECT_THAT(backward->local_tokens(), SizeIs(2)); EXPECT_EQ(backward->local_tokens()[0].end, 1); EXPECT_EQ(backward->local_tokens()[1].end, 2); } TEST_F(InferenceTest, StrideOnShardedDimensionNoRelayout1) { const Layout input_layout = *Layout::GetLayout( std::vector<std::string>{Layout::kUnshardedDim, "x"}, GetMesh()); const Layout output_layout = *Layout::GetLayout( std::vector<std::string>{Layout::kUnshardedDim, "x"}, GetMesh()); const auto specs = std::vector<Token>{Token(Token::REGULAR, 0, 2, 1, false, false, false), Token(Token::REGULAR, 0, 4, 2, false, false, false)}; auto forward = CreateAndRun<ForwardLayoutInference>( specs, input_layout, std::vector<int64_t>{2, 4}); ASSERT_THAT(forward, IsOk()); EXPECT_EQ(forward->expander_input_layout().sharding_spec_strs(), std::vector<std::string>({Layout::kUnshardedDim, "x"})); EXPECT_EQ(forward->expander_value_layout(), output_layout); EXPECT_THAT(forward->local_tokens(), SizeIs(2)); EXPECT_EQ(forward->local_tokens()[0].end, 2); EXPECT_EQ(forward->local_tokens()[1].end, 2); auto backward = CreateAndRun<BackwardLayoutInference>( specs, output_layout, std::vector<int64_t>{2, 4}); ASSERT_THAT(backward, IsOk()); EXPECT_EQ(backward->expander_input_layout(), input_layout); EXPECT_EQ(backward->expander_value_layout().sharding_spec_strs(), std::vector<std::string>({Layout::kUnshardedDim, "x"})); EXPECT_THAT(backward->local_tokens(), SizeIs(2)); EXPECT_EQ(backward->local_tokens()[0].end, 2); EXPECT_EQ(backward->local_tokens()[1].end, 2); } TEST_F(InferenceTest, StrideOnShardedDimensionNoRelayout2) { const Layout input_layout = *Layout::GetLayout( std::vector<std::string>{Layout::kUnshardedDim, "y"}, GetMesh()); const Layout output_layout = *Layout::GetLayout( std::vector<std::string>{Layout::kUnshardedDim, "y"}, GetMesh()); const auto specs = std::vector<Token>{Token(Token::REGULAR, 0, 2, 1, false, false, false), Token(Token::REGULAR, 0, 4, 2, false, false, false)}; auto forward = CreateAndRun<ForwardLayoutInference>( specs, input_layout, std::vector<int64_t>{2, 4}); ASSERT_THAT(forward, IsOk()); EXPECT_EQ(forward->expander_input_layout().sharding_spec_strs(), std::vector<std::string>({Layout::kUnshardedDim, "y"})); EXPECT_EQ(forward->expander_value_layout(), output_layout); EXPECT_THAT(forward->local_tokens(), SizeIs(2)); EXPECT_EQ(forward->local_tokens()[0].end, 2); EXPECT_EQ(forward->local_tokens()[1].end, 4); auto backward = CreateAndRun<BackwardLayoutInference>( specs, output_layout, std::vector<int64_t>{2, 4}); ASSERT_THAT(backward, IsOk()); EXPECT_EQ(backward->expander_input_layout(), input_layout); EXPECT_EQ(backward->expander_value_layout().sharding_spec_strs(), std::vector<std::string>({Layout::kUnshardedDim, "y"})); EXPECT_THAT(backward->local_tokens(), SizeIs(2)); EXPECT_EQ(backward->local_tokens()[0].end, 2); EXPECT_EQ(backward->local_tokens()[1].end, 4); } TEST_F(InferenceTest, StrideOnShardedDimensionNoRelayout3) { const Layout input_layout = *Layout::GetLayout( std::vector<std::string>{Layout::kUnshardedDim, "x"}, GetMesh()); const Layout output_layout = *Layout::GetLayout( std::vector<std::string>{Layout::kUnshardedDim, "x"}, GetMesh()); const auto specs = std::vector<Token>{Token(Token::REGULAR, 0, 2, 1, false, false, false), Token(Token::REGULAR, 0, 3, 2, false, false, false)}; auto forward = CreateAndRun<ForwardLayoutInference>( specs, input_layout, std::vector<int64_t>{2, 4}); ASSERT_THAT(forward, IsOk()); EXPECT_EQ(forward->expander_input_layout().sharding_spec_strs(), std::vector<std::string>({Layout::kUnshardedDim, "x"})); EXPECT_EQ(forward->expander_value_layout(), output_layout); EXPECT_THAT(forward->local_tokens(), SizeIs(2)); EXPECT_EQ(forward->local_tokens()[0].end, 2); EXPECT_EQ(forward->local_tokens()[1].end, 2); auto backward = CreateAndRun<BackwardLayoutInference>( specs, output_layout, std::vector<int64_t>{2, 4}); ASSERT_THAT(backward, IsOk()); EXPECT_EQ(backward->expander_input_layout(), input_layout); EXPECT_EQ(backward->expander_value_layout().sharding_spec_strs(), std::vector<std::string>({Layout::kUnshardedDim, "x"})); EXPECT_THAT(backward->local_tokens(), SizeIs(2)); EXPECT_EQ(backward->local_tokens()[0].end, 2); EXPECT_EQ(backward->local_tokens()[1].end, 2); } TEST_F(InferenceTest, StrideOnShardedDimensionNeedRelayout) { const Layout input_layout = *Layout::GetLayout( std::vector<std::string>{"x", Layout::kUnshardedDim}, GetMesh()); const Layout output_layout = *Layout::GetLayout( std::vector<std::string>{Layout::kUnshardedDim, Layout::kUnshardedDim}, GetMesh()); const auto specs = std::vector<Token>{Token(Token::REGULAR, 0, -1, 1, false, false, false), Token(Token::REGULAR, 0, 4, 3, false, false, false)}; auto forward = CreateAndRun<ForwardLayoutInference>( specs, input_layout, std::vector<int64_t>{2, 4}); ASSERT_THAT(forward, IsOk()); EXPECT_EQ( forward->expander_input_layout().sharding_spec_strs(), std::vector<std::string>({Layout::kUnshardedDim, Layout::kUnshardedDim})); EXPECT_EQ(forward->expander_value_layout(), output_layout); EXPECT_THAT(forward->local_tokens(), SizeIs(2)); EXPECT_EQ(forward->local_tokens()[0].end, -1); EXPECT_EQ(forward->local_tokens()[1].end, 4); auto backward = CreateAndRun<BackwardLayoutInference>( specs, output_layout, std::vector<int64_t>{2, 4}); ASSERT_THAT(backward, IsOk()); EXPECT_EQ( backward->expander_input_layout().sharding_spec_strs(), std::vector<std::string>({Layout::kUnshardedDim, Layout::kUnshardedDim})); EXPECT_EQ( backward->expander_value_layout().sharding_spec_strs(), std::vector<std::string>({Layout::kUnshardedDim, Layout::kUnshardedDim})); EXPECT_THAT(backward->local_tokens(), SizeIs(2)); EXPECT_EQ(backward->local_tokens()[0].end, -1); EXPECT_EQ(backward->local_tokens()[1].end, 4); } } } } }
1,052
cpp
tensorflow/tensorflow
layout_to_xla_sharding
tensorflow/dtensor/cc/xla_spmd/layout_to_xla_sharding.cc
tensorflow/dtensor/tests/layout_to_xla_sharding_test.cc
#ifndef TENSORFLOW_DTENSOR_CC_XLA_SPMD_LAYOUT_TO_XLA_SHARDING_H_ #define TENSORFLOW_DTENSOR_CC_XLA_SPMD_LAYOUT_TO_XLA_SHARDING_H_ #include <cstdint> #include <vector> #include "xla/xla_data.pb.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/dtensor/cc/dstatus.h" #include "tensorflow/dtensor/cc/tensor_layout.h" namespace tensorflow { namespace dtensor { constexpr char kXlaShardingAttr[] = "mhlo.sharding"; struct MeshMajorToMinor { std::vector<int64_t> permutation; std::vector<int64_t> sizes; std::vector<int64_t> ToDeviceList(); }; StatusOr<MeshMajorToMinor> ConvertMeshMajorToMinor(const Layout& layout, const Mesh& mesh); StatusOr<::xla::OpSharding> ConvertLayoutToXlaOpSharding(const Layout& layout); } } #endif #include "tensorflow/dtensor/cc/xla_spmd/layout_to_xla_sharding.h" #include <string> #include <vector> #include "absl/container/flat_hash_map.h" #include "absl/types/span.h" #include "llvm/ADT/STLExtras.h" #include "xla/status_macros.h" #include "xla/xla_data.pb.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/dtensor/cc/dstatus.h" #include "tensorflow/dtensor/cc/tensor_layout.h" namespace tensorflow { namespace dtensor { namespace { void PopulateDevices(absl::Span<const int64_t> permutation, absl::Span<const int64_t> sizes, absl::Span<const int64_t> cum_sizes, std::vector<int64_t>* out_devices, int64_t base = 0) { int expanding_dim = permutation[0]; int expanding_dim_size = sizes[expanding_dim]; int expanding_cum_dim_size = cum_sizes[expanding_dim]; for (int i = 0; i < expanding_dim_size; ++i) { if (permutation.size() == 1) { out_devices->push_back(base + i * expanding_cum_dim_size); } else { PopulateDevices(permutation.subspan(1), sizes, cum_sizes, out_devices, base + i * expanding_cum_dim_size); } } } } std::vector<int64_t> MeshMajorToMinor::ToDeviceList() { std::vector<int64_t> cum_sizes(sizes.size()); int64_t cum_size = 1; for (int i = sizes.size() - 1; i >= 0; --i) { cum_sizes[i] = cum_size; cum_size *= sizes[i]; } std::vector<int64_t> devices; devices.reserve(cum_size * sizes[0]); PopulateDevices(permutation, sizes, cum_sizes, &devices); return devices; } StatusOr<MeshMajorToMinor> ConvertMeshMajorToMinor(const Layout& layout, const Mesh& mesh) { MeshMajorToMinor major_to_minor; major_to_minor.permutation.reserve(mesh.dims().size()); major_to_minor.sizes.reserve(mesh.dims().size()); absl::flat_hash_map<std::string, int64_t> dim_name_to_index_map; for (const auto& [index, mesh_dim] : llvm::enumerate(mesh.dims())) { major_to_minor.sizes.push_back(mesh_dim.size); dim_name_to_index_map[mesh_dim.name] = index; } for (const auto& spec : layout.sharding_spec_strs()) { if (mesh.IsMeshDim(spec)) { const auto it = dim_name_to_index_map.find(spec); TF_RET_CHECK(it != dim_name_to_index_map.end()); const auto& dimension_index = it->second; major_to_minor.permutation.push_back(dimension_index); dim_name_to_index_map.erase(it); } } for (const auto& [name, unused_size] : mesh.dims()) { if (const auto it = dim_name_to_index_map.find(name); it != dim_name_to_index_map.end()) { const auto& dimension_index = it->second; major_to_minor.permutation.push_back(dimension_index); } } TF_RET_CHECK(major_to_minor.permutation.size() == major_to_minor.sizes.size()); return major_to_minor; } StatusOr<::xla::OpSharding> ConvertLayoutToXlaOpSharding(const Layout& layout) { ::xla::OpSharding xla_sharding; if (layout.IsSingleDevice()) { xla_sharding.set_type(::xla::OpSharding::MAXIMAL); return xla_sharding; } else if (layout.IsFullyReplicated()) { xla_sharding.set_type(::xla::OpSharding::REPLICATED); return xla_sharding; } xla_sharding.set_type(::xla::OpSharding::OTHER); const Mesh& mesh = layout.mesh(); { int32 product_of_sharded_dimensions = 1; for (int32 dim_size : layout.num_shards()) { product_of_sharded_dimensions *= dim_size; xla_sharding.add_tile_assignment_dimensions(dim_size); } if (product_of_sharded_dimensions != mesh.num_devices()) { xla_sharding.add_tile_assignment_dimensions( mesh.num_devices() / product_of_sharded_dimensions); xla_sharding.set_replicate_on_last_tile_dim(true); } } TF_ASSIGN_OR_RETURN(auto major_to_minor, ConvertMeshMajorToMinor(layout, mesh)); std::vector<int64_t> tile_assignment_devices = major_to_minor.ToDeviceList(); *(xla_sharding.mutable_tile_assignment_devices()) = { tile_assignment_devices.begin(), tile_assignment_devices.end()}; return xla_sharding; } } }
#include "tensorflow/dtensor/cc/xla_spmd/layout_to_xla_sharding.h" #include <string> #include <vector> #include "absl/algorithm/container.h" #include "absl/strings/str_cat.h" #include "benchmark/benchmark.h" #include "xla/hlo/ir/hlo_sharding.h" #include "xla/xla_data.pb.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/dtensor/cc/dstatus.h" #include "tensorflow/dtensor/cc/tensor_layout.h" #include "tsl/lib/core/status_test_util.h" #include "tsl/platform/statusor.h" namespace tensorflow { namespace dtensor { namespace { StatusOr<std::string> ConvertLayoutStrToHloShardingStr(std::string layout_str) { TF_ASSIGN_OR_RETURN(const Layout layout, Layout::FromString(layout_str)); TF_ASSIGN_OR_RETURN(const xla::OpSharding op_sharding, ConvertLayoutToXlaOpSharding(layout)); TF_ASSIGN_OR_RETURN(const auto hlo_sharding, xla::HloSharding::FromProto(op_sharding)); return hlo_sharding.ToString(); } TEST(LayoutToXLAShardingTest, ReplicatedLayout1D) { std::string layout_str = "sharding_specs:unsharded, " "mesh:|x=2|0,1|0,1|/job:localhost/task:0/device:CPU:0,/job:localhost/" "task:0/device:CPU:1"; TF_ASSERT_OK_AND_ASSIGN(std::string sharding, ConvertLayoutStrToHloShardingStr(layout_str)); EXPECT_EQ("{replicated}", sharding); } TEST(LayoutToXLAShardingTest, ReplicatedLayout2D) { std::string layout_str = "sharding_specs:unsharded,unsharded " "mesh:|x=2,y=2|0,1,2,3|0,1,2,3|/job:localhost/task:0/device:CPU:0,/" "job:localhost/" "task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2,/job:localhost/" "task:0/device:CPU:3"; TF_ASSERT_OK_AND_ASSIGN(std::string sharding, ConvertLayoutStrToHloShardingStr(layout_str)); EXPECT_EQ("{replicated}", sharding); } TEST(LayoutToXLAShardingTest, ReplicatedLayout3D) { std::string layout_str = "sharding_specs:unsharded,unsharded,unsharded, " "mesh:|x=2,y=2,z=2|0,1,2,3,4,5,6,7|0,1,2,3,4,5,6,7|/job:localhost/task:0/" "device:CPU:0,/" "job:localhost/" "task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2,/job:localhost/" "task:0/device:CPU:3,/job:localhost/task:0/device:CPU:4,/job:localhost/" "task:0/device:CPU:5,/job:localhost/task:0/device:CPU:6,/job:localhost/" "task:0/device:CPU:7"; TF_ASSERT_OK_AND_ASSIGN(std::string sharding, ConvertLayoutStrToHloShardingStr(layout_str)); EXPECT_EQ("{replicated}", sharding); } TEST(LayoutToXLAShardingTest, FullyShardedLayout1D) { std::string layout_str = "sharding_specs:x, " "mesh:|x=3|0,1,2|0,1,2|/job:localhost/task:0/device:CPU:0,/job:localhost/" "task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2"; TF_ASSERT_OK_AND_ASSIGN(std::string sharding, ConvertLayoutStrToHloShardingStr(layout_str)); EXPECT_EQ("{devices=[3]0,1,2}", sharding); } TEST(LayoutToXLAShardingTest, FullyShardedLayout2D) { std::string layout_str = "sharding_specs:x,y, " "mesh:|x=2,y=2|0,1,2,3|0,1,2,3|/job:localhost/task:0/device:CPU:0,/" "job:localhost/" "task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2,/job:localhost/" "task:0/device:CPU:3"; TF_ASSERT_OK_AND_ASSIGN(std::string sharding, ConvertLayoutStrToHloShardingStr(layout_str)); EXPECT_EQ("{devices=[2,2]0,1,2,3}", sharding); } TEST(LayoutToXLAShardingTest, FullyShardedLayout2DAsymmetricMesh) { std::string layout_str = "sharding_specs:y,x, " "mesh:|x=2,y=4|0,1,2,3,4,5,6,7|0,1,2,3,4,5,6,7|/job:localhost/task:0/" "device:CPU:0,/job:localhost/task:0/device:CPU:1,/job:localhost/task:0/" "device:CPU:2,/job:localhost/task:0/device:CPU:3,/job:localhost/task:0/" "device:CPU:4,/job:localhost/task:0/device:CPU:5,/job:localhost/task:0/" "device:CPU:6,/job:localhost/task:0/device:CPU:7"; TF_ASSERT_OK_AND_ASSIGN(std::string sharding, ConvertLayoutStrToHloShardingStr(layout_str)); EXPECT_EQ("{devices=[4,2]0,4,1,5,2,6,3,7}", sharding); } TEST(LayoutToXLAShardingTest, FullyShardedPermutedLayout2D) { std::string layout_str = "sharding_specs:y,x, " "mesh:|x=2,y=2|0,1,2,3|0,1,2,3|/job:localhost/task:0/device:CPU:0,/" "job:localhost/" "task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2,/job:localhost/" "task:0/device:CPU:3"; TF_ASSERT_OK_AND_ASSIGN(std::string sharding, ConvertLayoutStrToHloShardingStr(layout_str)); EXPECT_EQ("{devices=[2,2]0,2,1,3}", sharding); } TEST(LayoutToXLAShardingTest, FullyShardedLayout3D) { std::string layout_str = "sharding_specs:x,y,z, " "mesh:|x=2,y=2,z=2|0,1,2,3,4,5,6,7|0,1,2,3,4,5,6,7|/job:localhost/task:0/" "device:CPU:0,/" "job:localhost/" "task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2,/job:localhost/" "task:0/device:CPU:3,/job:localhost/task:0/device:CPU:4,/job:localhost/" "task:0/device:CPU:5,/job:localhost/task:0/device:CPU:6,/job:localhost/" "task:0/device:CPU:7"; TF_ASSERT_OK_AND_ASSIGN(std::string sharding, ConvertLayoutStrToHloShardingStr(layout_str)); EXPECT_EQ("{devices=[2,2,2]0,1,2,3,4,5,6,7}", sharding); } TEST(LayoutToXLAShardingTest, FullyShardedPermutedLayout3D_1) { std::string layout_str = "sharding_specs:z,x,y, " "mesh:|x=2,y=2,z=2|0,1,2,3,4,5,6,7|0,1,2,3,4,5,6,7|/job:localhost/task:0/" "device:CPU:0,/" "job:localhost/" "task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2,/job:localhost/" "task:0/device:CPU:3,/job:localhost/task:0/device:CPU:4,/job:localhost/" "task:0/device:CPU:5,/job:localhost/task:0/device:CPU:6,/job:localhost/" "task:0/device:CPU:7"; TF_ASSERT_OK_AND_ASSIGN(std::string sharding, ConvertLayoutStrToHloShardingStr(layout_str)); EXPECT_EQ("{devices=[2,2,2]0,2,4,6,1,3,5,7}", sharding); } TEST(LayoutToXLAShardingTest, FullyShardedPermutedLayout3D_2) { std::string layout_str = "sharding_specs:z,y,x, " "mesh:|x=2,y=2,z=2|0,1,2,3,4,5,6,7|0,1,2,3,4,5,6,7|/job:localhost/task:0/" "device:CPU:0,/" "job:localhost/" "task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2,/job:localhost/" "task:0/device:CPU:3,/job:localhost/task:0/device:CPU:4,/job:localhost/" "task:0/device:CPU:5,/job:localhost/task:0/device:CPU:6,/job:localhost/" "task:0/device:CPU:7"; TF_ASSERT_OK_AND_ASSIGN(std::string sharding, ConvertLayoutStrToHloShardingStr(layout_str)); EXPECT_EQ("{devices=[2,2,2]0,4,2,6,1,5,3,7}", sharding); } TEST(LayoutToXLAShardingTest, PartiallyShardedLayout2D) { std::string layout_str = "sharding_specs:x,unsharded, " "mesh:|x=2,y=2|0,1,2,3|0,1,2,3|/job:localhost/task:0/device:CPU:0,/" "job:localhost/" "task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2,/job:localhost/" "task:0/device:CPU:3"; TF_ASSERT_OK_AND_ASSIGN(std::string sharding, ConvertLayoutStrToHloShardingStr(layout_str)); EXPECT_EQ("{devices=[2,1,2]0,1,2,3 last_tile_dim_replicate}", sharding); } TEST(LayoutToXLAShardingTest, PartiallyShardedPermutedLayout2D) { std::string layout_str = "sharding_specs:y,unsharded, " "mesh:|x=2,y=2|0,1,2,3|0,1,2,3|/job:localhost/task:0/device:CPU:0,/" "job:localhost/" "task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2,/job:localhost/" "task:0/device:CPU:3"; TF_ASSERT_OK_AND_ASSIGN(std::string sharding, ConvertLayoutStrToHloShardingStr(layout_str)); EXPECT_EQ("{devices=[2,1,2]0,2,1,3 last_tile_dim_replicate}", sharding); } TEST(LayoutToXLAShardingTest, PartiallyShardedLayout3D_1) { std::string layout_str = "sharding_specs:x,y,unsharded, " "mesh:|x=2,y=2,z=2|0,1,2,3,4,5,6,7|0,1,2,3,4,5,6,7|/job:localhost/task:0/" "device:CPU:0,/" "job:localhost/" "task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2,/job:localhost/" "task:0/device:CPU:3,/job:localhost/task:0/device:CPU:4,/job:localhost/" "task:0/device:CPU:5,/job:localhost/task:0/device:CPU:6,/job:localhost/" "task:0/device:CPU:7"; TF_ASSERT_OK_AND_ASSIGN(std::string sharding, ConvertLayoutStrToHloShardingStr(layout_str)); EXPECT_EQ("{devices=[2,2,1,2]0,1,2,3,4,5,6,7 last_tile_dim_replicate}", sharding); } TEST(LayoutToXLAShardingTest, PartiallyShardedLayout3D_2) { std::string layout_str = "sharding_specs:x,unsharded,unsharded, " "mesh:|x=2,y=2,z=2|0,1,2,3,4,5,6,7|0,1,2,3,4,5,6,7|/job:localhost/task:0/" "device:CPU:0,/" "job:localhost/" "task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2,/job:localhost/" "task:0/device:CPU:3,/job:localhost/task:0/device:CPU:4,/job:localhost/" "task:0/device:CPU:5,/job:localhost/task:0/device:CPU:6,/job:localhost/" "task:0/device:CPU:7"; TF_ASSERT_OK_AND_ASSIGN(std::string sharding, ConvertLayoutStrToHloShardingStr(layout_str)); EXPECT_EQ("{devices=[2,1,1,4]0,1,2,3,4,5,6,7 last_tile_dim_replicate}", sharding); } TEST(LayoutToXLAShardingTest, PartiallyShardedPermutedLayout3D_1) { std::string layout_str = "sharding_specs:z,y,unsharded, " "mesh:|x=2,y=2,z=2|0,1,2,3,4,5,6,7|0,1,2,3,4,5,6,7|/job:localhost/task:0/" "device:CPU:0,/" "job:localhost/" "task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2,/job:localhost/" "task:0/device:CPU:3,/job:localhost/task:0/device:CPU:4,/job:localhost/" "task:0/device:CPU:5,/job:localhost/task:0/device:CPU:6,/job:localhost/" "task:0/device:CPU:7"; TF_ASSERT_OK_AND_ASSIGN(std::string sharding, ConvertLayoutStrToHloShardingStr(layout_str)); EXPECT_EQ("{devices=[2,2,1,2]0,4,2,6,1,5,3,7 last_tile_dim_replicate}", sharding); } TEST(LayoutToXLAShardingTest, PartiallyShardedPermutedLayout3D_2) { std::string layout_str = "sharding_specs:y,unsharded,z, " "mesh:|x=2,y=2,z=2|0,1,2,3,4,5,6,7|0,1,2,3,4,5,6,7|/job:localhost/task:0/" "device:CPU:0,/" "job:localhost/" "task:0/device:CPU:1,/job:localhost/task:0/device:CPU:2,/job:localhost/" "task:0/device:CPU:3,/job:localhost/task:0/device:CPU:4,/job:localhost/" "task:0/device:CPU:5,/job:localhost/task:0/device:CPU:6,/job:localhost/" "task:0/device:CPU:7"; TF_ASSERT_OK_AND_ASSIGN(std::string sharding, ConvertLayoutStrToHloShardingStr(layout_str)); EXPECT_EQ("{devices=[2,1,2,2]0,4,1,5,2,6,3,7 last_tile_dim_replicate}", sharding); } void BM_65536Devices(benchmark::State& state) { std::vector<int64_t> device_ids(65536); absl::c_iota(device_ids, 0); std::vector<std::string> devices_str(65536); absl::c_generate(devices_str, [n = 0]() mutable { return absl::StrCat("/job:localhost/task:0/device:CPU:", n++); }); auto mesh = Mesh::CreateMesh("", {"x", "y", "z"}, {8, 128, 64}, device_ids, {}, device_ids, devices_str); TF_ASSERT_OK_AND_ASSIGN(auto layout, Layout::GetLayout({"x", "y", "z"}, mesh)); for (auto s : state) { TF_EXPECT_OK(ConvertLayoutToXlaOpSharding(layout).status()); } } BENCHMARK(BM_65536Devices); } } }
1,053
cpp
tensorflow/tensorflow
wav_to_spectrogram
tensorflow/examples/wav_to_spectrogram/wav_to_spectrogram.cc
tensorflow/examples/wav_to_spectrogram/wav_to_spectrogram_test.cc
#ifndef TENSORFLOW_EXAMPLES_WAV_TO_SPECTROGRAM_WAV_TO_SPECTROGRAM_H_ #define TENSORFLOW_EXAMPLES_WAV_TO_SPECTROGRAM_WAV_TO_SPECTROGRAM_H_ #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/platform/types.h" tensorflow::Status WavToSpectrogram(const tensorflow::string& input_wav, int32_t window_size, int32_t stride, float brightness, const tensorflow::string& output_image); #endif #include "tensorflow/examples/wav_to_spectrogram/wav_to_spectrogram.h" #include <vector> #include "tensorflow/cc/ops/audio_ops.h" #include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/image_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/graph/default_device.h" #include "tensorflow/core/graph/graph_def_builder.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/core/stringpiece.h" #include "tensorflow/core/lib/core/threadpool.h" #include "tensorflow/core/lib/io/path.h" #include "tensorflow/core/lib/strings/stringprintf.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/public/session.h" using tensorflow::DT_FLOAT; using tensorflow::DT_UINT8; using tensorflow::Output; using tensorflow::TensorShape; tensorflow::Status WavToSpectrogram(const tensorflow::string& input_wav, int32_t window_size, int32_t stride, float brightness, const tensorflow::string& output_image) { auto root = tensorflow::Scope::NewRootScope(); using namespace tensorflow::ops; Output file_reader = tensorflow::ops::ReadFile(root.WithOpName("input_wav"), input_wav); DecodeWav wav_decoder = DecodeWav(root.WithOpName("wav_decoder"), file_reader); Output spectrogram = AudioSpectrogram(root.WithOpName("spectrogram"), wav_decoder.audio, window_size, stride); Output brightness_placeholder = Placeholder(root.WithOpName("brightness_placeholder"), DT_FLOAT, Placeholder::Attrs().Shape(TensorShape({}))); Output mul = Mul(root.WithOpName("mul"), spectrogram, brightness_placeholder); Output min_const = Const(root.WithOpName("min_const"), 255.0f); Output min = Minimum(root.WithOpName("min"), mul, min_const); Output cast = Cast(root.WithOpName("cast"), min, DT_UINT8); Output expand_dims_const = Const(root.WithOpName("expand_dims_const"), -1); Output expand_dims = ExpandDims(root.WithOpName("expand_dims"), cast, expand_dims_const); Output squeeze = Squeeze(root.WithOpName("squeeze"), expand_dims, Squeeze::Attrs().Axis({0})); Output png_encoder = EncodePng(root.WithOpName("png_encoder"), squeeze); tensorflow::ops::WriteFile file_writer = tensorflow::ops::WriteFile( root.WithOpName("output_image"), output_image, png_encoder); tensorflow::GraphDef graph; TF_RETURN_IF_ERROR(root.ToGraphDef(&graph)); std::unique_ptr<tensorflow::Session> session( tensorflow::NewSession(tensorflow::SessionOptions())); TF_RETURN_IF_ERROR(session->Create(graph)); tensorflow::Tensor brightness_tensor(DT_FLOAT, TensorShape({})); brightness_tensor.scalar<float>()() = brightness; TF_RETURN_IF_ERROR( session->Run({{"brightness_placeholder", brightness_tensor}}, {}, {"output_image"}, nullptr)); return absl::OkStatus(); }
#include "tensorflow/examples/wav_to_spectrogram/wav_to_spectrogram.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/io/path.h" #include "tensorflow/core/lib/wav/wav_io.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/test.h" TEST(WavToSpectrogramTest, WavToSpectrogramTest) { const tensorflow::string input_wav = tensorflow::io::JoinPath(tensorflow::testing::TmpDir(), "input_wav.wav"); const tensorflow::string output_image = tensorflow::io::JoinPath( tensorflow::testing::TmpDir(), "output_image.png"); float audio[8] = {-1.0f, 0.0f, 1.0f, 0.0f, -1.0f, 0.0f, 1.0f, 0.0f}; tensorflow::string wav_string; TF_ASSERT_OK( tensorflow::wav::EncodeAudioAsS16LEWav(audio, 44100, 1, 8, &wav_string)); TF_ASSERT_OK(tensorflow::WriteStringToFile(tensorflow::Env::Default(), input_wav, wav_string)); TF_ASSERT_OK(WavToSpectrogram(input_wav, 4, 4, 64.0f, output_image)); TF_EXPECT_OK(tensorflow::Env::Default()->FileExists(output_image)); }
1,054
cpp
tensorflow/tensorflow
recognize_commands
tensorflow/examples/speech_commands/recognize_commands.cc
tensorflow/examples/speech_commands/recognize_commands_test.cc
#ifndef TENSORFLOW_EXAMPLES_SPEECH_COMMANDS_RECOGNIZE_COMMANDS_H_ #define TENSORFLOW_EXAMPLES_SPEECH_COMMANDS_RECOGNIZE_COMMANDS_H_ #include <deque> #include <unordered_set> #include <vector> #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { class RecognizeCommands { public: explicit RecognizeCommands(const std::vector<string>& labels, int32_t average_window_duration_ms = 1000, float detection_threshold = 0.2, int32_t suppression_ms = 500, int32_t minimum_count = 3); Status ProcessLatestResults(const Tensor& latest_results, const int64_t current_time_ms, string* found_command, float* score, bool* is_new_command); private: std::vector<string> labels_; int32 average_window_duration_ms_; float detection_threshold_; int32 suppression_ms_; int32 minimum_count_; std::deque<std::pair<int64_t, Tensor>> previous_results_; string previous_top_label_; int64_t labels_count_; int64_t previous_top_label_time_; }; } #endif #include "tensorflow/examples/speech_commands/recognize_commands.h" #include "absl/status/status.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { RecognizeCommands::RecognizeCommands(const std::vector<string>& labels, int32_t average_window_duration_ms, float detection_threshold, int32_t suppression_ms, int32_t minimum_count) : labels_(labels), average_window_duration_ms_(average_window_duration_ms), detection_threshold_(detection_threshold), suppression_ms_(suppression_ms), minimum_count_(minimum_count) { labels_count_ = labels.size(); previous_top_label_ = "_silence_"; previous_top_label_time_ = std::numeric_limits<int64_t>::min(); } Status RecognizeCommands::ProcessLatestResults(const Tensor& latest_results, const int64_t current_time_ms, string* found_command, float* score, bool* is_new_command) { if (latest_results.NumElements() != labels_count_) { return errors::InvalidArgument( "The results for recognition should contain ", labels_count_, " elements, but there are ", latest_results.NumElements()); } if ((!previous_results_.empty()) && (current_time_ms < previous_results_.front().first)) { return errors::InvalidArgument( "Results must be fed in increasing time order, but received a " "timestamp of ", current_time_ms, " that was earlier than the previous one of ", previous_results_.front().first); } previous_results_.push_back({current_time_ms, latest_results}); const int64_t time_limit = current_time_ms - average_window_duration_ms_; while (previous_results_.front().first < time_limit) { previous_results_.pop_front(); } const int64_t how_many_results = previous_results_.size(); const int64_t earliest_time = previous_results_.front().first; const int64_t samples_duration = current_time_ms - earliest_time; if ((how_many_results < minimum_count_) || (samples_duration < (average_window_duration_ms_ / 4))) { *found_command = previous_top_label_; *score = 0.0f; *is_new_command = false; return absl::OkStatus(); } std::vector<float> average_scores(labels_count_); for (const auto& previous_result : previous_results_) { const Tensor& scores_tensor = previous_result.second; auto scores_flat = scores_tensor.flat<float>(); for (int i = 0; i < scores_flat.size(); ++i) { average_scores[i] += scores_flat(i) / how_many_results; } } std::vector<std::pair<int, float>> sorted_average_scores; sorted_average_scores.reserve(labels_count_); for (int i = 0; i < labels_count_; ++i) { sorted_average_scores.push_back( std::pair<int, float>({i, average_scores[i]})); } std::sort(sorted_average_scores.begin(), sorted_average_scores.end(), [](const std::pair<int, float>& left, const std::pair<int, float>& right) { return left.second > right.second; }); const int current_top_index = sorted_average_scores[0].first; const string current_top_label = labels_[current_top_index]; const float current_top_score = sorted_average_scores[0].second; int64_t time_since_last_top; if ((previous_top_label_ == "_silence_") || (previous_top_label_time_ == std::numeric_limits<int64_t>::min())) { time_since_last_top = std::numeric_limits<int64_t>::max(); } else { time_since_last_top = current_time_ms - previous_top_label_time_; } if ((current_top_score > detection_threshold_) && (current_top_label != previous_top_label_) && (time_since_last_top > suppression_ms_)) { previous_top_label_ = current_top_label; previous_top_label_time_ = current_time_ms; *is_new_command = true; } else { *is_new_command = false; } *found_command = current_top_label; *score = current_top_score; return absl::OkStatus(); } }
#include "tensorflow/examples/speech_commands/recognize_commands.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/types.h" #include "tsl/lib/core/status_test_util.h" namespace tensorflow { TEST(RecognizeCommandsTest, Basic) { RecognizeCommands recognize_commands({"_silence_", "a", "b"}); Tensor results(DT_FLOAT, {3}); test::FillValues<float>(&results, {1.0f, 0.0f, 0.0f}); string found_command; float score; bool is_new_command; TF_EXPECT_OK(recognize_commands.ProcessLatestResults( results, 0, &found_command, &score, &is_new_command)); } TEST(RecognizeCommandsTest, FindCommands) { RecognizeCommands recognize_commands({"_silence_", "a", "b"}, 1000, 0.2f); Tensor results(DT_FLOAT, {3}); test::FillValues<float>(&results, {0.0f, 1.0f, 0.0f}); bool has_found_new_command = false; string new_command; for (int i = 0; i < 10; ++i) { string found_command; float score; bool is_new_command; int64_t current_time_ms = 0 + (i * 100); TF_EXPECT_OK(recognize_commands.ProcessLatestResults( results, current_time_ms, &found_command, &score, &is_new_command)); if (is_new_command) { EXPECT_FALSE(has_found_new_command); has_found_new_command = true; new_command = found_command; } } EXPECT_TRUE(has_found_new_command); EXPECT_EQ("a", new_command); test::FillValues<float>(&results, {0.0f, 0.0f, 1.0f}); has_found_new_command = false; new_command = ""; for (int i = 0; i < 10; ++i) { string found_command; float score; bool is_new_command; int64_t current_time_ms = 1000 + (i * 100); TF_EXPECT_OK(recognize_commands.ProcessLatestResults( results, current_time_ms, &found_command, &score, &is_new_command)); if (is_new_command) { EXPECT_FALSE(has_found_new_command); has_found_new_command = true; new_command = found_command; } } EXPECT_TRUE(has_found_new_command); EXPECT_EQ("b", new_command); } TEST(RecognizeCommandsTest, BadInputLength) { RecognizeCommands recognize_commands({"_silence_", "a", "b"}, 1000, 0.2f); Tensor bad_results(DT_FLOAT, {2}); test::FillValues<float>(&bad_results, {1.0f, 0.0f}); string found_command; float score; bool is_new_command; EXPECT_FALSE(recognize_commands .ProcessLatestResults(bad_results, 0, &found_command, &score, &is_new_command) .ok()); } TEST(RecognizeCommandsTest, BadInputTimes) { RecognizeCommands recognize_commands({"_silence_", "a", "b"}, 1000, 0.2f); Tensor results(DT_FLOAT, {3}); test::FillValues<float>(&results, {1.0f, 0.0f, 0.0f}); string found_command; float score; bool is_new_command; TF_EXPECT_OK(recognize_commands.ProcessLatestResults( results, 100, &found_command, &score, &is_new_command)); EXPECT_FALSE(recognize_commands .ProcessLatestResults(results, 0, &found_command, &score, &is_new_command) .ok()); } }
1,055
cpp
tensorflow/tensorflow
accuracy_utils
tensorflow/examples/speech_commands/accuracy_utils.cc
tensorflow/examples/speech_commands/accuracy_utils_test.cc
#ifndef TENSORFLOW_EXAMPLES_SPEECH_COMMANDS_ACCURACY_UTILS_H_ #define TENSORFLOW_EXAMPLES_SPEECH_COMMANDS_ACCURACY_UTILS_H_ #include <vector> #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { struct StreamingAccuracyStats { StreamingAccuracyStats() : how_many_ground_truth_words(0), how_many_ground_truth_matched(0), how_many_false_positives(0), how_many_correct_words(0), how_many_wrong_words(0) {} int32 how_many_ground_truth_words; int32 how_many_ground_truth_matched; int32 how_many_false_positives; int32 how_many_correct_words; int32 how_many_wrong_words; }; Status ReadGroundTruthFile(const string& file_name, std::vector<std::pair<string, int64_t>>* result); void CalculateAccuracyStats( const std::vector<std::pair<string, int64_t>>& ground_truth_list, const std::vector<std::pair<string, int64_t>>& found_words, int64_t up_to_time_ms, int64_t time_tolerance_ms, StreamingAccuracyStats* stats); void PrintAccuracyStats(const StreamingAccuracyStats& stats); } #endif #include "tensorflow/examples/speech_commands/accuracy_utils.h" #include <fstream> #include <iomanip> #include <unordered_set> #include "absl/log/log.h" #include "absl/status/status.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/numbers.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { Status ReadGroundTruthFile(const string& file_name, std::vector<std::pair<string, int64_t>>* result) { std::ifstream file(file_name); if (!file) { return tensorflow::errors::NotFound("Ground truth file '", file_name, "' not found."); } result->clear(); string line; while (std::getline(file, line)) { std::vector<string> pieces = tensorflow::str_util::Split(line, ','); if (pieces.size() != 2) { continue; } float timestamp; if (!tensorflow::strings::safe_strtof(pieces[1], &timestamp)) { return tensorflow::errors::InvalidArgument( "Wrong number format at line: ", line); } string label = pieces[0]; auto timestamp_int64 = static_cast<int64_t>(timestamp); result->push_back({label, timestamp_int64}); } std::sort(result->begin(), result->end(), [](const std::pair<string, int64>& left, const std::pair<string, int64>& right) { return left.second < right.second; }); return absl::OkStatus(); } void CalculateAccuracyStats( const std::vector<std::pair<string, int64_t>>& ground_truth_list, const std::vector<std::pair<string, int64_t>>& found_words, int64_t up_to_time_ms, int64_t time_tolerance_ms, StreamingAccuracyStats* stats) { int64_t latest_possible_time; if (up_to_time_ms == -1) { latest_possible_time = std::numeric_limits<int64_t>::max(); } else { latest_possible_time = up_to_time_ms + time_tolerance_ms; } stats->how_many_ground_truth_words = 0; for (const std::pair<string, int64_t>& ground_truth : ground_truth_list) { const int64_t ground_truth_time = ground_truth.second; if (ground_truth_time > latest_possible_time) { break; } ++stats->how_many_ground_truth_words; } stats->how_many_false_positives = 0; stats->how_many_correct_words = 0; stats->how_many_wrong_words = 0; std::unordered_set<int64_t> has_ground_truth_been_matched; for (const std::pair<string, int64_t>& found_word : found_words) { const string& found_label = found_word.first; const int64_t found_time = found_word.second; const int64_t earliest_time = found_time - time_tolerance_ms; const int64_t latest_time = found_time + time_tolerance_ms; bool has_match_been_found = false; for (const std::pair<string, int64_t>& ground_truth : ground_truth_list) { const int64_t ground_truth_time = ground_truth.second; if ((ground_truth_time > latest_time) || (ground_truth_time > latest_possible_time)) { break; } if (ground_truth_time < earliest_time) { continue; } const string& ground_truth_label = ground_truth.first; if ((ground_truth_label == found_label) && (has_ground_truth_been_matched.count(ground_truth_time) == 0)) { ++stats->how_many_correct_words; } else { ++stats->how_many_wrong_words; } has_ground_truth_been_matched.insert(ground_truth_time); has_match_been_found = true; break; } if (!has_match_been_found) { ++stats->how_many_false_positives; } } stats->how_many_ground_truth_matched = has_ground_truth_been_matched.size(); } void PrintAccuracyStats(const StreamingAccuracyStats& stats) { if (stats.how_many_ground_truth_words == 0) { LOG(INFO) << "No ground truth yet, " << stats.how_many_false_positives << " false positives"; } else { float any_match_percentage = (stats.how_many_ground_truth_matched * 100.0f) / stats.how_many_ground_truth_words; float correct_match_percentage = (stats.how_many_correct_words * 100.0f) / stats.how_many_ground_truth_words; float wrong_match_percentage = (stats.how_many_wrong_words * 100.0f) / stats.how_many_ground_truth_words; float false_positive_percentage = (stats.how_many_false_positives * 100.0f) / stats.how_many_ground_truth_words; LOG(INFO) << std::setprecision(1) << std::fixed << any_match_percentage << "% matched, " << correct_match_percentage << "% correctly, " << wrong_match_percentage << "% wrongly, " << false_positive_percentage << "% false positives "; } } }
#include "tensorflow/examples/speech_commands/accuracy_utils.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/path.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/types.h" #include "tsl/lib/core/status_test_util.h" namespace tensorflow { TEST(AccuracyUtilsTest, ReadGroundTruthFile) { string file_name = tensorflow::io::JoinPath(tensorflow::testing::TmpDir(), "ground_truth.txt"); string file_data = "a,10\nb,12\n"; TF_ASSERT_OK(WriteStringToFile(Env::Default(), file_name, file_data)); std::vector<std::pair<string, int64_t>> ground_truth; TF_ASSERT_OK(ReadGroundTruthFile(file_name, &ground_truth)); ASSERT_EQ(2, ground_truth.size()); EXPECT_EQ("a", ground_truth[0].first); EXPECT_EQ(10, ground_truth[0].second); EXPECT_EQ("b", ground_truth[1].first); EXPECT_EQ(12, ground_truth[1].second); } TEST(AccuracyUtilsTest, CalculateAccuracyStats) { StreamingAccuracyStats stats; CalculateAccuracyStats({{"a", 1000}, {"b", 9000}}, {{"a", 1200}, {"b", 5000}, {"a", 8700}}, 10000, 500, &stats); EXPECT_EQ(2, stats.how_many_ground_truth_words); EXPECT_EQ(2, stats.how_many_ground_truth_matched); EXPECT_EQ(1, stats.how_many_false_positives); EXPECT_EQ(1, stats.how_many_correct_words); EXPECT_EQ(1, stats.how_many_wrong_words); } TEST(AccuracyUtilsTest, PrintAccuracyStats) { StreamingAccuracyStats stats; PrintAccuracyStats(stats); } }
1,056
cpp
tensorflow/tensorflow
offset_counter_helper
tensorflow/python/framework/offset_counter_helper.cc
tensorflow/python/framework/offset_counter_helper_test.cc
#ifndef TENSORFLOW_PYTHON_FRAMEWORK_OFFSET_COUNTER_HELPER_H_ #define TENSORFLOW_PYTHON_FRAMEWORK_OFFSET_COUNTER_HELPER_H_ #include <string> #include "absl/strings/string_view.h" #include "tensorflow/python/framework/op_reg_offset.pb.h" #include "tsl/platform/status.h" #include "tsl/platform/types.h" namespace tensorflow { absl::Status FindOpRegistationFromFile(absl::string_view filename, OpRegOffsets& op_reg_offsets); } #endif #include "tensorflow/python/framework/offset_counter_helper.h" #include <cstdint> #include <fstream> #include <string> #include "absl/strings/string_view.h" #include "tsl/platform/errors.h" #include "tsl/platform/regexp.h" #include "tsl/platform/strcat.h" namespace tensorflow { absl::Status FindOpRegistationFromFile(absl::string_view filename, OpRegOffsets& op_reg_offsets) { static constexpr LazyRE2 reg_pattern = { R"regex((REGISTER_OP)\("([\w>]+)"\))regex"}; std::ifstream f(std::string{filename}); if (f.bad()) { return tsl::errors::IOError( tsl::strings::StrCat("Cannot open file: ", filename), errno); } std::string line; absl::string_view reg_keyword, op_name; uint32_t offsets = 0; while (std::getline(f, line)) { if (RE2::PartialMatch(line, *reg_pattern, &reg_keyword, &op_name)) { uint32_t offset_start = offsets + (op_name.data() - line.data() - 1); uint32_t offset_end = offset_start + op_name.size() + 2; auto op_reg_offset = op_reg_offsets.add_offsets(); op_reg_offset->set_name(std::string{op_name}); op_reg_offset->set_filepath(std::string{filename}); op_reg_offset->set_start(offset_start); op_reg_offset->set_end(offset_end); } offsets += line.size() + 1; } f.close(); return absl::OkStatus(); } }
#include "tensorflow/python/framework/offset_counter_helper.h" #include <string> #include "absl/strings/str_format.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/python/framework/op_reg_offset.pb.h" #include "tsl/lib/core/status_test_util.h" namespace tensorflow { namespace { TEST(OffsetCounterHelper, FindOpRegistationFromFile) { std::string content = R"code( REGISTER_OP("Test>Op1"); REGISTER_OP("Test>Op2") .Input("input: int32") .Output("output: int32"); )code"; Env* env = Env::Default(); string fname; ASSERT_TRUE(env->LocalTempFilename(&fname)); TF_ASSERT_OK(WriteStringToFile(env, fname, content)); OpRegOffsets actual; TF_CHECK_OK(FindOpRegistationFromFile(fname, actual)); EXPECT_EQ(actual.offsets(0).name(), "Test>Op1"); EXPECT_EQ(actual.offsets(0).filepath(), fname); EXPECT_EQ(actual.offsets(0).start(), 13); EXPECT_EQ(actual.offsets(0).end(), 23); EXPECT_EQ(actual.offsets(1).name(), "Test>Op2"); EXPECT_EQ(actual.offsets(1).filepath(), fname); EXPECT_EQ(actual.offsets(1).start(), 38); EXPECT_EQ(actual.offsets(1).end(), 48); } } }
1,057
cpp
tensorflow/tensorflow
python_op_gen
tensorflow/python/framework/python_op_gen.cc
tensorflow/python/framework/python_op_gen_test.cc
#ifndef TENSORFLOW_PYTHON_FRAMEWORK_PYTHON_OP_GEN_H_ #define TENSORFLOW_PYTHON_FRAMEWORK_PYTHON_OP_GEN_H_ #include <string> #include <unordered_map> #include <unordered_set> #include <vector> #include "absl/types/span.h" #include "tensorflow/core/framework/op_def.pb.h" #include "tensorflow/core/framework/op_gen_lib.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/python/framework/op_reg_offset.pb.h" namespace tensorflow { string GetPythonOps(const OpList& ops, const ApiDefMap& api_defs, const OpRegOffsets& op_reg_offsets, absl::Span<const string> hidden_ops, absl::Span<const string> source_file_list); void PrintPythonOps(const OpList& ops, const ApiDefMap& api_defs, const OpRegOffsets& op_reg_offsets, absl::Span<const string> hidden_ops, absl::Span<const string> source_file_list); string GetPythonWrappers(const char* op_list_buf, size_t op_list_len); string GetArgAnnotation( const OpDef::ArgDef& arg, const std::unordered_map<string, string>& type_annotations); } #endif #include "tensorflow/python/framework/python_op_gen.h" #include <float.h> #include <stdio.h> #include <algorithm> #include <cmath> #include <cstdio> #include <iomanip> #include <locale> #include <set> #include <sstream> #include <string> #include <unordered_map> #include <unordered_set> #include <utility> #include <vector> #include "absl/strings/escaping.h" #include "absl/strings/str_join.h" #include "absl/strings/str_replace.h" #include "absl/types/span.h" #include "tensorflow/core/framework/api_def.pb.h" #include "tensorflow/core/framework/attr_value.pb.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/op_def.pb.h" #include "tensorflow/core/framework/op_def_util.h" #include "tensorflow/core/framework/op_gen_lib.h" #include "tensorflow/core/framework/tensor.pb.h" #include "tensorflow/core/framework/tensor_shape.pb.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/lib/gtl/map_util.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/platform/strcat.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/python/framework/python_op_gen_annotator.h" #include "tsl/platform/protobuf.h" namespace tensorflow { namespace { const int kLatestAPIExportVersion = 2; const int kRightMargin = 78; constexpr char kEagerFallbackSuffix[] = "_eager_fallback"; const std::unordered_map<string, string> dtype_type{ {"_dtypes.float16", "_atypes.Float16"}, {"_dtypes.half", "_atypes.Half"}, {"_dtypes.float32", "_atypes.Float32"}, {"_dtypes.float64", "_atypes.Float64"}, {"_dtypes.bfloat16", "_atypes.BFloat16"}, {"_dtypes.complex64", "_atypes.Complex64"}, {"_dtypes.complex128", "_atypes.Complex128"}, {"_dtypes.int8", "_atypes.Int8"}, {"_dtypes.uint8", "_atypes.UInt8"}, {"_dtypes.uint16", "_atypes.UInt16"}, {"_dtypes.uint32", "_atypes.UInt32"}, {"_dtypes.uint64", "_atypes.UInt64"}, {"_dtypes.int16", "_atypes.Int16"}, {"_dtypes.int32", "_atypes.Int32"}, {"_dtypes.int64", "_atypes.Int64"}, {"_dtypes.bool", "_atypes.Bool"}, {"_dtypes.string", "_atypes.String"}, {"_dtypes.qint8", "_atypes.QInt8"}, {"_dtypes.quint8", "_atypes.QUInt8"}, {"_dtypes.qint16", "_atypes.QInt16"}, {"_dtypes.quint16", "_atypes.QUInt16"}, {"_dtypes.qint32", "_atypes.QInt32"}, {"_dtypes.resource", "_atypes.Resource"}, {"_dtypes.variant", "_atypes.Variant"}, {"_dtypes.float8_e4m3fn", "_atypes.Float8e4m3fn"}, {"_dtypes.float8_e5m2", "_atypes.Float8e5m2"}, {"_dtypes.int4", "_atypes.Int4"}, {"_dtypes.uint4", "_atypes.UInt4"}, }; string AttrVarName(const string& attr_name, std::unordered_map<string, string>* attr_expressions) { const string var = strings::StrCat("_attr_", attr_name); if (attr_expressions != nullptr) (*attr_expressions)[attr_name] = var; return var; } void AddInferredAttr(const string& indentation, const string& attr_name, const string& value_expression, string* result, std::unordered_map<string, string>* attr_expressions) { strings::StrAppend(result, indentation, AttrVarName(attr_name, attr_expressions), " = ", value_expression, "\n"); } string VectorToTuple(const std::vector<string>& l) { if (l.size() == 1) return strings::StrCat("(", l.front(), ",)"); string ret = "("; for (int i = 0, end = l.size(); i < end; ++i) { if (i > 0) { strings::StrAppend(&ret, ", "); } strings::StrAppend(&ret, l[i]); } strings::StrAppend(&ret, ")"); return ret; } void Unflatten(const string& prefix, const std::vector<string>& output_sizes, const string& var, string* result) { for (int i = 0, end = output_sizes.size(); i < end; ++i) { if (!output_sizes[i].empty()) { strings::StrAppend(result, prefix, var, " = "); if (i > 0) strings::StrAppend(result, var, "[:", i, "] + "); if (i + 1 < end) { if (i == 0) { strings::StrAppend(result, "[", var, "[:", output_sizes[i], "]] + ", var, "[", output_sizes[i], ":]"); } else { strings::StrAppend(result, "[", var, "[", i, ":", i, " + ", output_sizes[i], "]] + ", var, "[", i, " + ", output_sizes[i], ":]"); } } else { strings::StrAppend(result, "[", var, "[", i, ":]]"); } strings::StrAppend(result, "\n"); } } } string TensorPBString(const TensorProto& pb) { std::string message_short_text; ::tensorflow::protobuf::TextFormat::Printer printer; printer.SetSingleLineMode(true); printer.SetExpandAny(true); printer.PrintToString(pb, &message_short_text); return strings::StrCat("\"\"\"", message_short_text, "\"\"\""); } bool IsPythonReserved(const string& s); bool IsOpWithUnderscorePrefix(const string& s); string AvoidPythonReserved(const string& s); string AttrValueToPython(const string& type, const AttrValue& value, const string& dtype_module = "tf."); void GenerateLowerCaseOpName(const string& str, string* result); string DataTypeToPython(DataType dtype, const string& dtype_module); class ParamNames { public: ParamNames(const string& name, const string& rename_to) : name_(name) { rename_to_ = AvoidPythonReserved(rename_to); } string GetName() const { return name_; } string GetRenameTo() const { return rename_to_; } private: string name_; string rename_to_; }; class GenPythonOp { public: GenPythonOp( const OpDef& op_def, const ApiDef& api_def, const string& function_name, python_op_gen_internal::GeneratedCodeAnnotator* annotator = nullptr) : op_def_(op_def), api_def_(api_def), function_name_(function_name), num_outs_(op_def.output_arg_size()), annotator_(annotator) { op_name_ = function_name_; absl::ConsumePrefix(&op_name_, "_"); } ~GenPythonOp() = default; string Code(); protected: void AddDefLine(const string& function_name, const string& parameters); void AddDefLine(const string& parameters); void AddDocStringDescription(); void AddDocStringArgs(); void AddDocStringInputs(); void AddDocStringAttrs(); void AddDocStringNameArg(); void AddOutputGlobals(); void AddDocStringOutputs(); void AddBody(const string& prefix); void AddBodyNoReturn(const string& apply_prefix); void AddExport(); void HandleGraphMode(const string& function_setup, const std::vector<string>& output_sizes); string GetEagerNotAllowedError(); void ExpectListArg(const string& indentation, const string& arg_name, string* output); bool GetEagerFunctionSetup(const string& indentation, string* function_setup); void GetOutputSizesAndNumOutputsExpr(std::vector<string>* output_sizes, string* num_outputs_expr); void AddEagerFunctionTeardown(const string& indentation, const std::vector<string>& output_sizes, bool execute_record_gradient); bool AddEagerFastPathAndGraphCode( const string& parameters, const std::vector<string>& output_sizes, const string& eager_not_allowed_error, const std::unordered_map<string, string>& type_annotations); bool AddEagerFallbackCode( const string& parameters, const std::vector<string>& output_sizes, const string& num_outputs_expr, const string& eager_not_allowed_error, const std::unordered_map<string, string>& type_annotations); void AddEagerFastPathExecute(); void AddEagerInferredAttrs(const string& indentation); void AddEagerInputCasts(const string& indentation); void AddEagerAttrs(const string& indentation); void AddEagerExecute(const string& indentation, const string& num_outputs_expr); void AddFallbackDispatch(const string& prefix); void AddTypeBasedDispatch(const string& prefix); void AddTypeBasedDispatcherAlias(); void AddRawOpExport(const string& parameters); std::unordered_map<string, string> GetTypeAnnotations(); void GenerateTypeVars( const std::unordered_map<string, string>& type_annotations); void AddReturnTypeAnnotation( const std::unordered_map<string, string>& type_annotations); void AddAttrForArg(const string& attr, int arg_index) { gtl::InsertIfNotPresent(&inferred_attrs_, attr, op_def_.input_arg(arg_index).name()); auto iter = attr_to_args_.find(attr); if (iter == attr_to_args_.end()) { attr_to_args_.insert(AttrToArgMap::value_type(attr, {arg_index})); } else { iter->second.push_back(arg_index); } } string FlattenInputs(const std::vector<int>* input_indices, std::vector<string>* output_sizes) const; const OpDef& op_def_; const ApiDef& api_def_; const string function_name_; const int num_outs_; python_op_gen_internal::GeneratedCodeAnnotator* annotator_ = nullptr; uint32_t def_offset_start_ = 0; string prelude_; string result_; std::unordered_map<string, string> inferred_attrs_; std::vector<string> attrs_; std::vector<ParamNames> param_names_; StringPiece op_name_; typedef std::unordered_map<string, std::vector<int>> AttrToArgMap; AttrToArgMap attr_to_args_; std::unordered_map<string, string> attr_expressions_; std::vector<ParamNames> params_no_default_; std::vector<std::pair<ParamNames, string>> params_with_default_; }; string GetEagerPythonOp( const OpDef& op_def, const ApiDef& api_def, const string& function_name, python_op_gen_internal::GeneratedCodeAnnotator* annotator = nullptr) { return GenPythonOp(op_def, api_def, function_name, annotator).Code(); } bool IsPythonReserved(const string& s) { static const std::set<string>* const kPythonReserved = new std::set<string>( { "and", "as", "assert", "break", "class", "continue", "def", "del", "elif", "else", "except", "exec", "finally", "for", "from", "global", "if", "import", "in", "is", "lambda", "not", "or", "pass", "print", "raise", "return", "try", "while", "with", "yield", "ArithmeticError", "AssertionError", "AttributeError", "BaseException", "BufferError", "BytesWarning", "DeprecationWarning", "EOFError", "Ellipsis", "EnvironmentError", "Exception", "False", "FloatingPointError", "FutureWarning", "GeneratorExit", "IOError", "ImportError", "ImportWarning", "IndentationError", "IndexError", "KeyError", "KeyboardInterrupt", "LookupError", "MemoryError", "NameError", "None", "NotImplemented", "NotImplementedError", "OSError", "OverflowError", "PendingDeprecationWarning", "ReferenceError", "RuntimeError", "RuntimeWarning", "StandardError", "StopIteration", "SyntaxError", "SyntaxWarning", "SystemError", "SystemExit", "TabError", "True", "TypeError", "UnboundLocalError", "UnicodeDecodeError", "UnicodeEncodeError", "UnicodeError", "UnicodeTranslateError", "UnicodeWarning", "UserWarning", "ValueError", "Warning", "ZeroDivisionError", "__debug__", "__doc__", "__import__", "__name__", "__package__"}); return kPythonReserved->count(s) > 0; } bool IsOpWithUnderscorePrefix(const string& s) { static const std::set<string>* const kUnderscoreOps = new std::set<string>( { "abs", "all", "any", "apply", "bin", "bool", "buffer", "bytearray", "bytes", "callable", "chr", "classmethod", "cmp", "coerce", "compile", "complex", "copyright", "credits", "delattr", "dict", "dir", "divmod", "enumerate", "eval", "execfile", "exit", "file", "filter", "float", "format", "frozenset", "getattr", "globals", "hasattr", "hash", "help", "hex", "id", "input", "int", "intern", "isinstance", "issubclass", "iter", "len", "license", "list", "locals", "long", "map", "max", "memoryview", "min", "next", "object", "oct", "open", "ord", "pow", "print", "property", "quit", "range", "raw_input", "reduce", "reload", "repr", "reversed", "set", "setattr", "slice", "sorted", "staticmethod", "str", "sum", "super", "tuple", "type", "unichr", "unicode", "vars", "xrange", "zip", "fused_batch_norm", "histogram_fixed_width", "stack", "batch_norm_with_global_normalization", "clip_by_value"}); return kUnderscoreOps->count(s) > 0; } string AvoidPythonReserved(const string& s) { string result = absl::StrReplaceAll(s, {{">", "_"}}); if (IsPythonReserved(result)) return strings::StrCat(result, "_"); return result; } string Indent(int initial, int rest, StringPiece in) { string copy(in.data(), in.size()); absl::StripTrailingAsciiWhitespace(&copy); std::vector<string> v = str_util::Split(copy, '\n'); string result; bool first = true; for (const string& line : v) { if (first) { result = strings::StrCat(Spaces(initial), line, "\n"); first = false; } else { if (line.empty()) { strings::StrAppend(&result, "\n"); } else { strings::StrAppend(&result, Spaces(rest), line, "\n"); } } } return result; } void AppendWithinWidth(string* dest, StringPiece append, int width) { auto first_line = append.find('\n'); if (first_line == string::npos) first_line = append.size(); if (dest->size() + first_line + 1 > static_cast<size_t>(width)) { strings::StrAppend(dest, "\n", append); } else { strings::StrAppend(dest, " ", append); } } string PythonDataTypeString(DataType dtype) { switch (dtype) { case DT_FLOAT: return "float32"; case DT_DOUBLE: return "float64"; default: return DataTypeString(dtype); } } string TypeString(DataType dtype, bool ref) { if (ref) { return strings::StrCat("mutable `", PythonDataTypeString(dtype), "`"); } else { return strings::StrCat("`", PythonDataTypeString(dtype), "`"); } } string TypeListString(const AttrValue& value) { string ret; for (int t : value.list().type()) { if (!ret.empty()) strings::StrAppend(&ret, ", "); DataType dtype = static_cast<DataType>(t); if (IsRefType(dtype)) { strings::StrAppend(&ret, PythonDataTypeString(RemoveRefType(dtype)), " mutable"); } else { strings::StrAppend(&ret, "`", PythonDataTypeString(dtype), "`"); } } return ret; } string SingleTensorName(DataType dtype, bool is_ref) { const string type_str = TypeString(dtype, is_ref); return strings::StrCat("A `Tensor` of type ", type_str, "."); } const char kUnknownTensorType[] = {"A `Tensor`."}; string ArgTypeName(const OpDef& op_def, const OpDef::ArgDef& arg, const std::unordered_map<string, string>& inferred_attrs, bool is_output) { if (!arg.number_attr().empty()) { const string* original_arg = gtl::FindOrNull(inferred_attrs, arg.number_attr()); string prefix; if (original_arg == nullptr) { prefix = strings::StrCat("A list of `", arg.number_attr(), "`"); } else if (*original_arg == arg.name()) { const OpDef::AttrDef* attr = FindAttr(arg.number_attr(), op_def); if (attr->has_minimum() && attr->minimum() > 0) { prefix = strings::StrCat("A list of at least ", attr->minimum()); } else { prefix = "A list of"; } } else { prefix = strings::StrCat("A list with the same length as `", AvoidPythonReserved(*original_arg), "` of"); } if (arg.type() != DT_INVALID) { return strings::StrCat(prefix, " `Tensor` objects with type ", TypeString(arg.type(), arg.is_ref()), "."); } else { original_arg = gtl::FindOrNull(inferred_attrs, arg.type_attr()); if (arg.is_ref()) { strings::StrAppend(&prefix, " mutable"); } if (original_arg == nullptr) { return strings::StrCat(prefix, " `Tensor` objects with type `", arg.type_attr(), "`."); } else if (*original_arg == arg.name()) { const OpDef::AttrDef* attr = FindAttr(arg.type_attr(), op_def); if (attr->has_allowed_values()) { return strings::StrCat(prefix, " `Tensor` objects with the same type in: ", TypeListString(attr->allowed_values()), "."); } else { return strings::StrCat(prefix, " `Tensor` objects with the same type."); } } else { return strings::StrCat(prefix, " `Tensor` objects with the same type as `", AvoidPythonReserved(*original_arg), "`."); } } } else if (!arg.type_attr().empty() || !arg.type_list_attr().empty()) { const bool is_list = !arg.type_list_attr().empty(); const string attr_name = is_list ? arg.type_list_attr() : arg.type_attr(); const OpDef::AttrDef* attr = FindAttr(attr_name, op_def); const string mutable_str = arg.is_ref() ? "mutable " : ""; const string prefix = is_list ? strings::StrCat("A list of ", mutable_str, "`Tensor` objects") : strings::StrCat("A ", mutable_str, "`Tensor`"); const string* original_arg = gtl::FindOrNull(inferred_attrs, attr_name); if (original_arg == nullptr) { return strings::StrCat(prefix, " of type `", attr_name, "`."); } else if (*original_arg == arg.name()) { if (attr->has_allowed_values()) { if (is_list) { return strings::StrCat(prefix, " with types from: ", TypeListString(attr->allowed_values()), "."); } else { return strings::StrCat(prefix, is_output ? ". Has one of the following types: " : ". Must be one of the following types: ", TypeListString(attr->allowed_values()), "."); } } else { return strings::StrCat(prefix, "."); } } else { return strings::StrCat(prefix, is_output ? ". Has the same type as `" : ". Must have the same type as `", AvoidPythonReserved(*original_arg), "`."); } } else { return SingleTensorName(arg.type(), arg.is_ref()); } } string GetReturns(const OpDef& op_def, const std::vector<string>& output_type_string) { string result; DCHECK_EQ(op_def.output_arg_size(), output_type_string.size()); const int num_outs = op_def.output_arg_size(); strings::StrAppend(&result, "\n Returns:\n"); if (num_outs == 0) { strings::StrAppend(&result, " The created Operation.\n"); } else { if (num_outs == 1) { StringPiece description = op_def.output_arg(0).description(); if (ConsumeEquals(&description)) { strings::StrAppend(&result, Indent(4, 4, description)); } else { string desc = output_type_string.empty() ? kUnknownTensorType : output_type_string[0]; if (desc == kUnknownTensorType) { if (!description.empty()) { desc = op_def.output_arg(0).description(); } else if (!op_def.output_arg(0).name().empty()) { desc = strings::StrCat(" The ", op_def.output_arg(0).name(), " `Tensor`."); } } else if (!description.empty()) { AppendWithinWidth(&desc, description, kRightMargin - 4 ); } strings::StrAppend(&result, Indent(4, 4, desc)); } } else { std::vector<string> out_names(num_outs); for (int i = 0; i < num_outs; ++i) { if (!op_def.output_arg(i).name().empty()) { out_names[i] = op_def.output_arg(i).name(); } else { out_names[i] = strings::StrCat("output", i); } } strings::StrAppend(&result, " A tuple of `Tensor` objects (", absl::StrJoin(out_names, ", "), ").\n\n"); for (int i = 0; i < num_outs; ++i) { string desc = strings::StrCat(out_names[i], ": "); StringPiece description = op_def.output_arg(i).description(); if (ConsumeEquals(&description)) { strings::StrAppend(&desc, description); } else { const string type = static_cast<size_t>(i) < output_type_string.size() ? output_type_string[i] : kUnknownTensorType; if (!description.empty()) { if (type == kUnknownTensorType) { strings::StrAppend(&desc, description); } else { strings::StrAppend(&desc, type, " ", description); } } else { strings::StrAppend(&desc, type); } } strings::StrAppend(&result, Indent(4, 6, desc)); } } } return result; } string StringToPython(const string& str) { return strings::StrCat("\"", absl::CEscape(str), "\""); } string DataTypeToPython(DataType dtype, const string& dtype_module) { return strings::StrCat(dtype_module, PythonDataTypeString(dtype)); } string ShapeToPython(const TensorShapeProto& shape) { if (shape.unknown_rank()) { return "None"; } string python = "["; for (const auto& dim : shape.dim()) { if (python.size() > 1) strings::StrAppend(&python, ", "); if (!dim.name().empty()) { strings::StrAppend(&python, "(", StringToPython(dim.name()), ", ", dim.size(), ")"); } else { strings::StrAppend(&python, dim.size()); } } strings::StrAppend(&python, "]"); return python; } string TensorToPython(const TensorProto& proto) { return tsl::LegacyUnredactedShortDebugString(proto); } string AttrListToPython(const AttrValue& value, const string& dtype_module = "tf.") { string ret; if (value.list().s_size() > 0) { for (int i = 0; i < value.list().s_size(); ++i) { if (i > 0) strings::StrAppend(&ret, ", "); strings::StrAppend(&ret, StringToPython(value.list().s(i))); } } else if (value.list().i_size() > 0) { for (int i = 0; i < value.list().i_size(); ++i) { if (i > 0) strings::StrAppend(&ret, ", "); strings::StrAppend(&ret, value.list().i(i)); } } else if (value.list().f_size() > 0) { for (int i = 0; i < value.list().f_size(); ++i) { if (i > 0) strings::StrAppend(&ret, ", "); strings::StrAppend(&ret, value.list().f(i)); } } else if (value.list().b_size() > 0) { for (int i = 0; i < value.list().b_size(); ++i) { if (i > 0) strings::StrAppend(&ret, ", "); strings::StrAppend(&ret, value.list().b(i) ? "True" : "False"); } } else if (value.list().type_size() > 0) { for (int i = 0; i < value.list().type_size(); ++i) { if (i > 0) strings::StrAppend(&ret, ", "); strings::StrAppend(&ret, DataTypeToPython(value.list().type(i), dtype_module)); } } else if (value.list().shape_size() > 0) { for (int i = 0; i < value.list().shape_size(); ++i) { if (i > 0) strings::StrAppend(&ret, ", "); strings::StrAppend(&ret, ShapeToPython(value.list().shape(i))); } } else if (value.list().tensor_size() > 0) { for (int i = 0; i < value.list().tensor_size(); ++i) { if (i > 0) strings::StrAppend(&ret, ", "); strings::StrAppend(&ret, TensorToPython(value.list().tensor(i))); } } else if (value.list().func_size() > 0) { for (int i = 0; i < value.list().func_size(); ++i) { if (i > 0) strings::StrAppend(&ret, ", "); strings::StrAppend(&ret, StringToPython(value.list().func(i).name())); } } return ret; } string AttrValueToPython(const string& type, const AttrValue& value, const string& dtype_module) { if (type == "string") { return StringToPython(value.s()); } else if (type == "int") { return strings::StrCat(value.i()); } else if (type == "float") { if (std::isnan(value.f()) || std::isinf(value.f())) { return strings::StrCat("float('", value.f(), "')"); } else { static_assert(FLT_DIG < 10, "FLT_DIG is too big"); std::ostringstream s; s.imbue(std::locale::classic()); s << std::setprecision(FLT_DIG) << value.f(); if (s.good()) { return s.str(); } return strings::StrCat(v
#include "tensorflow/python/framework/python_op_gen.h" #include <unordered_set> #include <vector> #include "absl/strings/escaping.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/op_def.pb.h" #include "tensorflow/core/framework/op_gen_lib.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/python/framework/kythe_metadata.pb.h" #include "tensorflow/python/framework/op_reg_offset.pb.h" namespace tensorflow { namespace { void ExpectHasSubstr(const string& s, const string& expected) { EXPECT_TRUE(absl::StrContains(s, expected)) << "'Generated ops " << " does not contain '" << expected << "'"; } void ExpectDoesNotHaveSubstr(const string& s, const string& expected) { EXPECT_FALSE(absl::StrContains(s, expected)) << "'Generated ops contains '" << expected << "'"; } void ExpectSubstrOrder(const string& s, const string& before, const string& after) { int before_pos = s.find(before); int after_pos = s.find(after); ASSERT_NE(std::string::npos, before_pos); ASSERT_NE(std::string::npos, after_pos); EXPECT_LT(before_pos, after_pos) << before << "' is not before '" << after; } TEST(PythonOpGen, TypeAnnotateAllOps) { OpList ops; OpRegistry::Global()->Export(false, &ops); ApiDefMap api_def_map(ops); string code = GetPythonOps(ops, api_def_map, OpRegOffsets(), {}, {}); const string all_types = ", _atypes.BFloat16, _atypes.Bool, _atypes.Complex128, " "_atypes.Complex64, _atypes.Float16, _atypes.Float32, _atypes.Float64, " "_atypes.Float8e4m3fn, _atypes.Float8e5m2, _atypes.Half, _atypes.Int16, " "_atypes.Int32, _atypes.Int4, _atypes.Int64, _atypes.Int8, " "_atypes.QInt16, _atypes.QInt32, _atypes.QInt8, _atypes.QUInt16, " "_atypes.QUInt8, _atypes.Resource, _atypes.String, _atypes.UInt16, " "_atypes.UInt32, _atypes.UInt4, _atypes.UInt64, _atypes.UInt8, " "_atypes.Variant)"; const string fake_param_typevar = "TV_FakeParam_dtype = TypeVar(\"TV_FakeParam_dtype\"" + all_types; const string fake_param = "def fake_param_eager_fallback(dtype: TV_FakeParam_dtype, shape, name, " "ctx) -> Annotated[Any, TV_FakeParam_dtype]:"; const string fake_param_fallback = "def fake_param_eager_fallback(dtype: TV_FakeParam_dtype, shape, name, " "ctx) -> Annotated[Any, TV_FakeParam_dtype]:"; ExpectHasSubstr(code, fake_param_typevar); ExpectHasSubstr(code, fake_param); ExpectHasSubstr(code, fake_param_fallback); const string to_bool_typevar = "TV_ToBool_T = TypeVar(\"TV_ToBool_T\"" + all_types; const string to_bool_ = "def to_bool(input: Annotated[Any, TV_ToBool_T], " "name=None) -> " "Annotated[Any, _atypes.Bool]:"; const string to_bool_fallback = "def to_bool_eager_fallback(input: " "Annotated[Any, TV_ToBool_T], name, ctx) " "-> Annotated[Any, _atypes.Bool]:"; ExpectHasSubstr(code, to_bool_typevar); ExpectHasSubstr(code, to_bool_); ExpectHasSubstr(code, to_bool_fallback); } TEST(PythonOpGen, TypeAnnotateSingleTypeTensor) { constexpr char kBaseOpDef[] = R"( op { name: "Bar" input_arg { name: "x" type: DT_STRING } input_arg { name: "y" type: DT_QINT8 } output_arg { name: "output" type: DT_BOOL } summary: "Summary for op Bar." description: "Description for op Bar." } )"; OpList op_defs; OpRegistry::Global()->Export(false, &op_defs); protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs); ApiDefMap api_def_map(op_defs); string code = GetPythonOps(op_defs, api_def_map, OpRegOffsets(), {}, {}); const string typed_bar = "def bar(x: Annotated[Any, _atypes.String], y: " "Annotated[Any, _atypes.QInt8], " "name=None) -> Annotated[Any, _atypes.Bool]:"; ExpectHasSubstr(code, typed_bar); const string untyped_bar = "def bar(x, y, name=None):"; ExpectDoesNotHaveSubstr(code, untyped_bar); } TEST(PythonOpGen, TypeAnnotateMultiTypeTensor) { constexpr char kBaseOpDef[] = R"( op { name: "Foo" input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T2" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_UINT8 type: DT_INT8 } } } attr { name: "T2" type: "type" allowed_values { list { type: DT_STRING type: DT_FLOAT type: DT_DOUBLE } } } summary: "Summary for op Foo." description: "Description for op Foo." } )"; OpList op_defs; OpRegistry::Global()->Export(false, &op_defs); protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs); ApiDefMap api_def_map(op_defs); string code = GetPythonOps(op_defs, api_def_map, OpRegOffsets(), {}, {}); const string typed_foo = "def foo(x: Annotated[Any, TV_Foo_T], y: " "Annotated[Any, TV_Foo_T2], name=None) " "-> Annotated[Any, TV_Foo_T]:"; ExpectHasSubstr(code, typed_foo); } TEST(PythonOpGen, GenerateCorrectTypeVars) { constexpr char kBaseOpDef[] = R"( op { name: "Foo" input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T2" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_UINT8 type: DT_INT8 } } } attr { name: "T2" type: "type" allowed_values { list { type: DT_STRING type: DT_FLOAT type: DT_DOUBLE } } } summary: "Summary for op Foo." description: "Description for op Foo." } )"; OpList op_defs; OpRegistry::Global()->Export(false, &op_defs); protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs); ApiDefMap api_def_map(op_defs); string code = GetPythonOps(op_defs, api_def_map, OpRegOffsets(), {}, {}); const string typevars_foo = R"( TV_Foo_T = TypeVar("TV_Foo_T", _atypes.Int8, _atypes.UInt8) TV_Foo_T2 = TypeVar("TV_Foo_T2", _atypes.Float32, _atypes.Float64, _atypes.String) )"; ExpectHasSubstr(code, typevars_foo); } TEST(PythonOpGen, TypeAnnotateFallback) { constexpr char kBaseOpDef[] = R"( op { name: "Foo" input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T2" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_UINT8 type: DT_INT8 } } } attr { name: "T2" type: "type" allowed_values { list { type: DT_STRING type: DT_FLOAT type: DT_DOUBLE } } } summary: "Summary for op Foo." description: "Description for op Foo." } )"; OpList op_defs; OpRegistry::Global()->Export(false, &op_defs); protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs); ApiDefMap api_def_map(op_defs); string code = GetPythonOps(op_defs, api_def_map, OpRegOffsets(), {}, {}); const string typed_foo_fallback = "def foo_eager_fallback(x: Annotated[Any, TV_Foo_T], y: " "Annotated[Any, TV_Foo_T2], name, ctx) -> " "Annotated[Any, TV_Foo_T]:"; ExpectHasSubstr(code, typed_foo_fallback); } TEST(PythonOpGen, GenerateTypeVarAboveOp) { constexpr char kBaseOpDef[] = R"( op { name: "Foo" input_arg { name: "x" type_attr: "T" } input_arg { name: "y" type_attr: "T2" } output_arg { name: "output" type_attr: "T" } attr { name: "T" type: "type" allowed_values { list { type: DT_UINT8 type: DT_INT8 } } } attr { name: "T2" type: "type" allowed_values { list { type: DT_STRING type: DT_FLOAT type: DT_DOUBLE } } } summary: "Summary for op Foo." description: "Description for op Foo." } )"; OpList op_defs; OpRegistry::Global()->Export(false, &op_defs); protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs); ApiDefMap api_def_map(op_defs); string code = GetPythonOps(op_defs, api_def_map, OpRegOffsets(), {}, {}); const string typevar_foo = "TV_Foo_"; const string def_foo = "def foo"; ExpectSubstrOrder(code, typevar_foo, def_foo); } TEST(PythonOpGen, TypeAnnotateDefaultParams) { constexpr char kBaseOpDef[] = R"( op { name: "FooBar" input_arg { name: "x" type: DT_FLOAT } output_arg { name: "output" type: DT_BOOL } attr { name: "t" type: "type" allowed_values { list { type: DT_HALF type: DT_INT8 } } } attr { name: "var1" type: "bool" default_value { b: false } } attr { name: "var2" type: "int" default_value { i: 0 } } summary: "Summary for op FooBar." description: "Description for op FooBar." } )"; OpList op_defs; OpRegistry::Global()->Export(false, &op_defs); protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs); ApiDefMap api_def_map(op_defs); string code = GetPythonOps(op_defs, api_def_map, OpRegOffsets(), {}, {}); const string params = "def foo_bar(x: Annotated[Any, _atypes.Float32], t: " "TV_FooBar_t, " "var1:bool=False, var2:int=0, name=None)"; const string params_fallback = "def foo_bar_eager_fallback(x: " "Annotated[Any, _atypes.Float32], t: " "TV_FooBar_t, var1: bool, var2: int, name, ctx)"; ExpectHasSubstr(code, params); ExpectHasSubstr(code, params_fallback); } TEST(PythonOpGen, NoTypingSequenceTensors) { constexpr char kBaseOpDef[] = R"( op { name: "Baz" input_arg { name: "inputs" number_attr: "N" type_list_attr: "T" } output_arg { name: "output1" type: DT_BOOL } output_arg { name: "output2" type: DT_BOOL } attr { name: "T" type: "bool" } attr { name: "N" type: "int" } summary: "Summary for op Baz." description: "Description for op Baz." } )"; OpList op_defs; OpRegistry::Global()->Export(false, &op_defs); protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs); ApiDefMap api_def_map(op_defs); string code = GetPythonOps(op_defs, api_def_map, OpRegOffsets(), {}, {}); const string baz_def_line = "def baz(inputs, name=None):"; ExpectHasSubstr(code, baz_def_line); } TEST(PythonOpGen, InsertCommentsForSourceFileLocation) { std::vector<string> source_file_list{"some_ops.cc", "another_ops.cc"}; OpList op_defs; ApiDefMap api_def_map(op_defs); string code = GetPythonOps(op_defs, api_def_map, OpRegOffsets(), {}, source_file_list); ExpectHasSubstr(code, "Original C++ source file: some_ops.cc, another_ops.cc"); } GeneratedCodeInfo DecodeAnnotation(string anno) { std::vector<string> sp = absl::StrSplit(anno, ':'); string gci_str; absl::Base64Unescape(sp[1], &gci_str); GeneratedCodeInfo gci; gci.ParseFromString(gci_str); return gci; } TEST(PythonOpGen, GenerateMetadataWhenOpRegOffsetsIsPresent) { constexpr char kBaseOpDef[] = R"( op { name: "Baz" } )"; OpList op_defs; OpRegistry::Global()->Export(false, &op_defs); protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs); ApiDefMap api_def_map(op_defs); OpRegOffsets offsets; auto* offset = offsets.add_offsets(); offset->set_name("Baz"); offset->set_filepath("some_ops.cc"); offset->set_start(0); offset->set_end(3); string code = GetPythonOps(op_defs, api_def_map, offsets, {}, {}); int target_begin = code.find(absl::StrCat("def baz")) + 4; int target_end = target_begin + 3; std::vector<string> sp = absl::StrSplit(code, '\n'); string last_line = sp.back(); ASSERT_TRUE(absl::StrContains(last_line, "# kythe.proto.metadata.GeneratedCodeInfo:")); GeneratedCodeInfo gci = DecodeAnnotation(last_line); EXPECT_EQ(gci.meta_size(), 1); EXPECT_EQ(gci.meta(0).source_begin(), 0); EXPECT_EQ(gci.meta(0).source_end(), 3); EXPECT_EQ(gci.meta(0).target_begin(), target_begin); EXPECT_EQ(gci.meta(0).target_end(), target_end); } TEST(PythonOpGen, GenerateMetadataForMultipleOutputOp) { constexpr char kBaseOpDef[] = R"( op { name: "Baz" output_arg { name: "output1" type: DT_BOOL } output_arg { name: "output2" type: DT_BOOL } } )"; OpList op_defs; OpRegistry::Global()->Export(false, &op_defs); protobuf::TextFormat::ParseFromString(kBaseOpDef, &op_defs); ApiDefMap api_def_map(op_defs); OpRegOffsets offsets; auto* offset = offsets.add_offsets(); offset->set_name("Baz"); offset->set_filepath("some_ops.cc"); offset->set_start(0); offset->set_end(3); string code = GetPythonOps(op_defs, api_def_map, offsets, {}, {}); int target_begin = code.find(absl::StrCat("def baz")) + 4; int target_end = target_begin + 3; std::vector<string> sp = absl::StrSplit(code, '\n'); string last_line = sp.back(); ASSERT_TRUE(absl::StrContains(last_line, "# kythe.proto.metadata.GeneratedCodeInfo:")); GeneratedCodeInfo gci = DecodeAnnotation(last_line); EXPECT_EQ(gci.meta_size(), 1); EXPECT_EQ(gci.meta(0).source_begin(), 0); EXPECT_EQ(gci.meta(0).source_end(), 3); EXPECT_EQ(gci.meta(0).target_begin(), target_begin); EXPECT_EQ(gci.meta(0).target_end(), target_end); } TEST(PythonOpGen, NotGenerateMetadataWhenOpRegOffsetsIsEmpty) { OpList op_defs; ApiDefMap api_def_map(op_defs); string code = GetPythonOps(op_defs, api_def_map, OpRegOffsets(), {}, {}); ExpectDoesNotHaveSubstr(code, "# kythe.proto.metadata.GeneratedCodeInfo:"); } } }
1,058
cpp
tensorflow/tensorflow
op_def_util
tensorflow/core/framework/op_def_util.cc
tensorflow/core/framework/op_def_util_test.cc
#ifndef TENSORFLOW_CORE_FRAMEWORK_OP_DEF_UTIL_H_ #define TENSORFLOW_CORE_FRAMEWORK_OP_DEF_UTIL_H_ #include <string> #include "tensorflow/core/framework/api_def.pb.h" #include "tensorflow/core/framework/op_def.pb.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/core/stringpiece.h" #include "tensorflow/core/platform/protobuf.h" namespace tensorflow { Status ValidateOpDef(const OpDef& op_def); Status CheckOpDeprecation(const OpDef& op_def, int graph_def_version); Status ValidateAttrValue(const AttrValue& attr_value, const OpDef::AttrDef& attr); const OpDef::AttrDef* FindAttr(StringPiece name, const OpDef& op_def); OpDef::AttrDef* FindAttrMutable(StringPiece name, OpDef* op_def); const OpDef::ArgDef* FindInputArg(StringPiece name, const OpDef& op_def); const ApiDef::Arg* FindInputArg(StringPiece name, const ApiDef& api_def); std::string SummarizeOpDef(const OpDef& op_def); Status OpDefCompatible(const OpDef& old_op, const OpDef& new_op); Status OpDefAddedDefaultsUnchanged(const OpDef& old_op, const OpDef& penultimate_op, const OpDef& new_op); Status OpDefAttrDefaultsUnchanged(const OpDef& old_op, const OpDef& new_op); void RemoveDescriptionsFromOpDef(OpDef* op_def); void RemoveDescriptionsFromOpList(OpList* op_list); void RemoveNonDeprecationDescriptionsFromOpDef(OpDef* op_def); bool AttrDefEqual(const OpDef::AttrDef& a1, const OpDef::AttrDef& a2); uint64 AttrDefHash(const OpDef::AttrDef& a); bool RepeatedAttrDefEqual(const protobuf::RepeatedPtrField<OpDef::AttrDef>& a1, const protobuf::RepeatedPtrField<OpDef::AttrDef>& a2); uint64 RepeatedAttrDefHash(const protobuf::RepeatedPtrField<OpDef::AttrDef>& a); bool OpDefEqual(const OpDef& o1, const OpDef& o2); uint64 OpDefHash(const OpDef& o); } #endif #include "tensorflow/core/framework/op_def_util.h" #include <algorithm> #include <cstring> #include <vector> #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "tensorflow/core/framework/attr_value.pb.h" #include "tensorflow/core/framework/attr_value_util.h" #include "tensorflow/core/framework/op_def.pb.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/core/stringpiece.h" #include "tensorflow/core/lib/gtl/map_util.h" #include "tensorflow/core/lib/strings/proto_serialization.h" #include "tensorflow/core/lib/strings/scanner.h" #include "tensorflow/core/platform/mutex.h" #include "tensorflow/core/platform/types.h" namespace tensorflow { namespace { bool HasAttrStyleType(const OpDef::ArgDef& arg) { return arg.type() != DT_INVALID || !arg.type_attr().empty() || !arg.type_list_attr().empty(); } Status AllowedTypeValue(DataType dt, const OpDef::AttrDef& attr) { const AttrValue& allowed_values(attr.allowed_values()); for (auto allowed : allowed_values.list().type()) { if (dt == allowed) { return OkStatus(); } } string allowed_str; for (int i = 0; i < allowed_values.list().type_size(); ++i) { if (!allowed_str.empty()) { strings::StrAppend(&allowed_str, ", "); } strings::StrAppend(&allowed_str, DataTypeString(allowed_values.list().type(i))); } return errors::InvalidArgument( "Value for attr '", attr.name(), "' of ", DataTypeString(dt), " is not in the list of allowed values: ", allowed_str); } Status AllowedStringValue(const string& str, const OpDef::AttrDef& attr) { const AttrValue& allowed_values(attr.allowed_values()); for (const auto& allowed : allowed_values.list().s()) { if (str == allowed) { return OkStatus(); } } string allowed_str; for (const string& allowed : allowed_values.list().s()) { if (!allowed_str.empty()) { strings::StrAppend(&allowed_str, ", "); } strings::StrAppend(&allowed_str, "\"", allowed, "\""); } return errors::InvalidArgument( "Value for attr '", attr.name(), "' of \"", str, "\" is not in the list of allowed values: ", allowed_str); } } Status ValidateAttrValue(const AttrValue& attr_value, const OpDef::AttrDef& attr) { TF_RETURN_WITH_CONTEXT_IF_ERROR(AttrValueHasType(attr_value, attr.type()), " for attr '", attr.name(), "'"); if (attr.has_minimum()) { if (attr.type() == "int") { if (attr_value.i() < attr.minimum()) { return errors::InvalidArgument( "Value for attr '", attr.name(), "' of ", attr_value.i(), " must be at least minimum ", attr.minimum()); } } else { int length = -1; if (attr.type() == "list(string)") { length = attr_value.list().s_size(); } else if (attr.type() == "list(int)") { length = attr_value.list().i_size(); } else if (attr.type() == "list(float)") { length = attr_value.list().f_size(); } else if (attr.type() == "list(bool)") { length = attr_value.list().b_size(); } else if (attr.type() == "list(type)") { length = attr_value.list().type_size(); } else if (attr.type() == "list(shape)") { length = attr_value.list().shape_size(); } else if (attr.type() == "list(tensor)") { length = attr_value.list().tensor_size(); } else if (attr.type() == "list(func)") { length = attr_value.list().func_size(); } if (length < attr.minimum()) { return errors::InvalidArgument( "Length for attr '", attr.name(), "' of ", length, " must be at least minimum ", attr.minimum()); } } } if (attr.has_allowed_values()) { if (attr.type() == "type") { TF_RETURN_IF_ERROR(AllowedTypeValue(attr_value.type(), attr)); } else if (attr.type() == "list(type)") { for (int dt : attr_value.list().type()) { TF_RETURN_IF_ERROR(AllowedTypeValue(static_cast<DataType>(dt), attr)); } } else if (attr.type() == "string") { TF_RETURN_IF_ERROR(AllowedStringValue(attr_value.s(), attr)); } else if (attr.type() == "list(string)") { for (const string& str : attr_value.list().s()) { TF_RETURN_IF_ERROR(AllowedStringValue(str, attr)); } } else { return errors::Unimplemented( "Support for allowed_values not implemented for type ", attr.type()); } } return OkStatus(); } const OpDef::AttrDef* FindAttr(StringPiece name, const OpDef& op_def) { for (int i = 0; i < op_def.attr_size(); ++i) { if (op_def.attr(i).name() == name) { return &op_def.attr(i); } } return nullptr; } OpDef::AttrDef* FindAttrMutable(StringPiece name, OpDef* op_def) { for (int i = 0; i < op_def->attr_size(); ++i) { if (op_def->attr(i).name() == name) { return op_def->mutable_attr(i); } } return nullptr; } const OpDef::ArgDef* FindInputArg(StringPiece name, const OpDef& op_def) { for (int i = 0; i < op_def.input_arg_size(); ++i) { if (op_def.input_arg(i).name() == name) { return &op_def.input_arg(i); } } return nullptr; } const ApiDef::Arg* FindInputArg(StringPiece name, const ApiDef& api_def) { for (int i = 0; i < api_def.in_arg_size(); ++i) { if (api_def.in_arg(i).name() == name) { return &api_def.in_arg(i); } } return nullptr; } #define VALIDATE(EXPR, ...) \ do { \ if (!(EXPR)) { \ return errors::InvalidArgument( \ __VA_ARGS__, "; in OpDef: ", op_def.ShortDebugString()); \ } \ } while (false) static Status ValidateArg(const OpDef::ArgDef& arg, const OpDef& op_def, bool output, absl::flat_hash_set<StringPiece>* names) { const string suffix = strings::StrCat( output ? " for output '" : " for input '", arg.name(), "'"); VALIDATE(names->emplace(arg.name()).second, "Duplicate name: ", arg.name()); VALIDATE(HasAttrStyleType(arg), "Missing type", suffix); if (!arg.number_attr().empty()) { const OpDef::AttrDef* attr = FindAttr(arg.number_attr(), op_def); VALIDATE(attr != nullptr, "No attr with name '", arg.number_attr(), "'", suffix); VALIDATE(attr->type() == "int", "Attr '", attr->name(), "' used as length", suffix, " has type ", attr->type(), " != int"); VALIDATE(attr->has_minimum(), "Attr '", attr->name(), "' used as length", suffix, " must have minimum"); VALIDATE(attr->minimum() >= 0, "Attr '", attr->name(), "' used as length", suffix, " must have minimum >= 0"); VALIDATE(arg.type_list_attr().empty(), "Can't have both number_attr and type_list_attr", suffix); VALIDATE((arg.type() != DT_INVALID ? 1 : 0) + (!arg.type_attr().empty() ? 1 : 0) == 1, "Exactly one of type, type_attr must be set", suffix); } else { const int num_type_fields = (arg.type() != DT_INVALID ? 1 : 0) + (!arg.type_attr().empty() ? 1 : 0) + (!arg.type_list_attr().empty() ? 1 : 0); VALIDATE(num_type_fields == 1, "Exactly one of type, type_attr, type_list_attr must be set", suffix); } if (!arg.type_attr().empty()) { const OpDef::AttrDef* attr = FindAttr(arg.type_attr(), op_def); VALIDATE(attr != nullptr, "No attr with name '", arg.type_attr(), "'", suffix); VALIDATE(attr->type() == "type", "Attr '", attr->name(), "' used as type_attr", suffix, " has type ", attr->type(), " != type"); } else if (!arg.type_list_attr().empty()) { const OpDef::AttrDef* attr = FindAttr(arg.type_list_attr(), op_def); VALIDATE(attr != nullptr, "No attr with name '", arg.type_list_attr(), "'", suffix); VALIDATE(attr->type() == "list(type)", "Attr '", attr->name(), "' used as type_list_attr", suffix, " has type ", attr->type(), " != list(type)"); } else { VALIDATE(!IsRefType(arg.type()), "Illegal use of ref type '", DataTypeString(arg.type()), "'. Use 'Ref(type)' instead", suffix); } return OkStatus(); } bool IsValidOpName(StringPiece sp) { using ::tensorflow::strings::Scanner; Scanner scanner(sp); scanner.One(Scanner::UPPERLETTER).Any(Scanner::LETTER_DIGIT_UNDERSCORE); while (true) { if (!scanner.GetResult()) return false; if (scanner.empty()) return true; scanner.One(Scanner::RANGLE) .One(Scanner::UPPERLETTER) .Any(Scanner::LETTER_DIGIT_UNDERSCORE); } } Status ValidateOpDef(const OpDef& op_def) { if (!absl::StartsWith(op_def.name(), "_")) { VALIDATE(IsValidOpName(op_def.name()), "Invalid name: ", op_def.name(), " (Did you use CamelCase?)"); } absl::flat_hash_set<StringPiece> names; for (const auto& attr : op_def.attr()) { VALIDATE(names.emplace(attr.name()).second, "Duplicate name: ", attr.name()); DataType dt; VALIDATE(!DataTypeFromString(attr.name(), &dt), "Attr can't have name ", attr.name(), " that matches a data type"); StringPiece type(attr.type()); bool is_list = absl::ConsumePrefix(&type, "list("); bool found = false; for (StringPiece valid : {"string", "int", "float", "bool", "type", "shape", "tensor", "func"}) { if (absl::ConsumePrefix(&type, valid)) { found = true; break; } } VALIDATE(found, "Unrecognized type '", type, "' in attr '", attr.name(), "'"); if (is_list) { VALIDATE(absl::ConsumePrefix(&type, ")"), "'list(' is missing ')' in attr ", attr.name(), "'s type ", attr.type()); } VALIDATE(type.empty(), "Extra '", type, "' at the end of attr ", attr.name(), "'s type ", attr.type()); if (attr.has_minimum()) { VALIDATE(attr.type() == "int" || is_list, "Attr '", attr.name(), "' has minimum for unsupported type ", attr.type()); if (is_list) { VALIDATE(attr.minimum() >= 0, "Attr '", attr.name(), "' with list type must have a non-negative minimum, not ", attr.minimum()); } } else { VALIDATE(attr.minimum() == 0, "Attr '", attr.name(), "' with has_minimum = false but minimum ", attr.minimum(), " not equal to default of 0"); } if (attr.has_allowed_values()) { const string list_type = is_list ? attr.type() : strings::StrCat("list(", attr.type(), ")"); TF_RETURN_WITH_CONTEXT_IF_ERROR( AttrValueHasType(attr.allowed_values(), list_type), " for attr '", attr.name(), "' in Op '", op_def.name(), "'"); } if (attr.has_default_value()) { TF_RETURN_WITH_CONTEXT_IF_ERROR( ValidateAttrValue(attr.default_value(), attr), " in Op '", op_def.name(), "'"); } } for (const auto& arg : op_def.input_arg()) { TF_RETURN_IF_ERROR(ValidateArg(arg, op_def, false, &names)); } for (const auto& arg : op_def.output_arg()) { TF_RETURN_IF_ERROR(ValidateArg(arg, op_def, true, &names)); } return OkStatus(); } #undef VALIDATE Status CheckOpDeprecation(const OpDef& op_def, int graph_def_version) { if (op_def.has_deprecation()) { const OpDeprecation& dep = op_def.deprecation(); if (graph_def_version >= dep.version()) { return errors::Unimplemented( "Op ", op_def.name(), " is not available in GraphDef version ", graph_def_version, ". It has been removed in version ", dep.version(), ". ", dep.explanation(), "."); } else { static mutex mu(LINKER_INITIALIZED); static auto* warned = new absl::flat_hash_set<string>(); bool warn; { mutex_lock lock(mu); warn = warned->insert(op_def.name()).second; } if (warn) { LOG(WARNING) << "Op " << op_def.name() << " is deprecated." << " It will cease to work in GraphDef version " << dep.version() << ". " << dep.explanation() << "."; } } } return OkStatus(); } namespace { string SummarizeArgs(const protobuf::RepeatedPtrField<OpDef::ArgDef>& args) { string ret; for (const OpDef::ArgDef& arg : args) { if (!ret.empty()) strings::StrAppend(&ret, ", "); strings::StrAppend(&ret, arg.name(), ":"); if (arg.is_ref()) strings::StrAppend(&ret, "Ref("); if (!arg.number_attr().empty()) { strings::StrAppend(&ret, arg.number_attr(), "*"); } if (arg.type() != DT_INVALID) { strings::StrAppend(&ret, DataTypeString(arg.type())); } else { strings::StrAppend(&ret, arg.type_attr()); } if (arg.is_ref()) strings::StrAppend(&ret, ")"); } return ret; } } string SummarizeOpDef(const OpDef& op_def) { string ret = strings::StrCat("Op<name=", op_def.name()); strings::StrAppend(&ret, "; signature=", SummarizeArgs(op_def.input_arg()), " -> ", SummarizeArgs(op_def.output_arg())); for (int i = 0; i < op_def.attr_size(); ++i) { strings::StrAppend(&ret, "; attr=", op_def.attr(i).name(), ":", op_def.attr(i).type()); if (op_def.attr(i).has_default_value()) { strings::StrAppend(&ret, ",default=", SummarizeAttrValue(op_def.attr(i).default_value())); } if (op_def.attr(i).has_minimum()) { strings::StrAppend(&ret, ",min=", op_def.attr(i).minimum()); } if (op_def.attr(i).has_allowed_values()) { strings::StrAppend(&ret, ",allowed=", SummarizeAttrValue(op_def.attr(i).allowed_values())); } } if (op_def.is_commutative()) { strings::StrAppend(&ret, "; is_commutative=true"); } if (op_def.is_aggregate()) { strings::StrAppend(&ret, "; is_aggregate=true"); } if (op_def.is_stateful()) { strings::StrAppend(&ret, "; is_stateful=true"); } if (op_def.allows_uninitialized_input()) { strings::StrAppend(&ret, "; allows_uninitialized_input=true"); } if (op_def.is_distributed_communication()) { strings::StrAppend(&ret, "; is_distributed_communication=true"); } strings::StrAppend(&ret, ">"); return ret; } namespace { template <class T> bool IsSubsetOf(const T& sub, const T& super) { for (const auto& o : sub) { bool found = false; for (const auto& n : super) { if (o == n) { found = true; break; } } if (!found) return false; } return true; } bool MoreRestrictive(const OpDef::AttrDef& old_attr, const OpDef::AttrDef& new_attr) { if (!new_attr.has_allowed_values()) return false; if (!old_attr.has_allowed_values()) return true; if (!IsSubsetOf(old_attr.allowed_values().list().type(), new_attr.allowed_values().list().type())) { return true; } if (!IsSubsetOf(old_attr.allowed_values().list().s(), new_attr.allowed_values().list().s())) { return true; } return false; } string AllowedStr(const OpDef::AttrDef& attr) { if (!attr.has_allowed_values()) return "no restriction"; return SummarizeAttrValue(attr.allowed_values()); } string DefaultAttrStr(const OpDef::AttrDef& attr) { if (!attr.has_default_value()) return "no default"; return SummarizeAttrValue(attr.default_value()); } bool HigherMinimum(const OpDef::AttrDef& old_attr, const OpDef::AttrDef& new_attr) { if (!new_attr.has_minimum()) return false; if (!old_attr.has_minimum()) return true; return new_attr.minimum() > old_attr.minimum(); } string MinStr(const OpDef::AttrDef& attr) { if (!attr.has_minimum()) return "no minimum"; return strings::StrCat(attr.minimum()); } typedef absl::flat_hash_map<StringPiece, const OpDef::AttrDef*> AttrMap; void FillAttrMap(const OpDef& op_def, AttrMap* attr_map) { for (const auto& attr : op_def.attr()) { (*attr_map)[attr.name()] = &attr; } } void AddComma(string* s, bool* add_comma) { if (*add_comma) { strings::StrAppend(s, ", "); } else { *add_comma = true; } } void AddName(string* s, bool name, const OpDef::ArgDef& arg) { if (name) { strings::StrAppend(s, arg.name(), ":"); } } string ComputeArgSignature( const protobuf::RepeatedPtrField<OpDef::ArgDef>& args, const AttrMap& old_attrs, const AttrMap& new_attrs, std::vector<bool>* ref, bool names) { string s; bool add_comma = false; for (const OpDef::ArgDef& arg : args) { if (!arg.type_list_attr().empty()) { const OpDef::AttrDef* old_attr = gtl::FindPtrOrNull(old_attrs, arg.type_list_attr()); if (old_attr) { AddComma(&s, &add_comma); AddName(&s, names, arg); strings::StrAppend(&s, arg.type_list_attr()); ref->push_back(arg.is_ref()); } else { const OpDef::AttrDef* new_attr = gtl::FindPtrOrNull(new_attrs, arg.type_list_attr()); const auto& type_list = new_attr->default_value().list().type(); if (type_list.empty()) continue; for (int i = 0; i < type_list.size(); ++i) { AddComma(&s, &add_comma); AddName(&s, names, arg); strings::StrAppend( &s, DataTypeString(static_cast<DataType>(type_list.Get(i)))); ref->push_back(arg.is_ref()); } } } else { int num = 1; string type; AddName(&type, names, arg); if (!arg.number_attr().empty()) { const OpDef::AttrDef* old_attr = gtl::FindPtrOrNull(old_attrs, arg.number_attr()); if (old_attr) { strings::StrAppend(&type, arg.number_attr(), " * "); } else { const OpDef::AttrDef* new_attr = gtl::FindPtrOrNull(new_attrs, arg.number_attr()); num = new_attr->default_value().i(); } } if (arg.type() != DT_INVALID) { strings::StrAppend(&type, DataTypeString(arg.type())); } else { const OpDef::AttrDef* old_attr = gtl::FindPtrOrNull(old_attrs, arg.type_attr()); if (old_attr) { strings::StrAppend(&type, arg.type_attr()); } else { const OpDef::AttrDef* new_attr = gtl::FindPtrOrNull(new_attrs, arg.type_attr()); strings::StrAppend(&type, DataTypeString(new_attr->default_value().type())); } } for (int i = 0; i < num; ++i) { AddComma(&s, &add_comma); strings::StrAppend(&s, type); ref->push_back(arg.is_ref()); } } } return s; } } Status OpDefCompatible(const OpDef& old_op, const OpDef& new_op) { #define VALIDATE(CONDITION, ...) \ if (!(CONDITION)) { \ return errors::InvalidArgument("Incompatible Op change: ", __VA_ARGS__, \ "; old: ", SummarizeOpDef(old_op), \ "; new: ", SummarizeOpDef(new_op)); \ } VALIDATE(old_op.name() == new_op.name(), "Name mismatch"); AttrMap new_attrs, old_attrs; FillAttrMap(old_op, &old_attrs); FillAttrMap(new_op, &new_attrs); for (const auto& old_attr : old_op.attr()) { const OpDef::AttrDef* new_attr = gtl::FindPtrOrNull(new_attrs, old_attr.name()); VALIDATE(new_attr != nullptr, "Attr '", old_attr.name(), "' removed"); VALIDATE(old_attr.type() == new_attr->type(), "Attr '", old_attr.name(), "' changed type '", old_attr.type(), "' -> '", new_attr->type(), "'"); VALIDATE(!MoreRestrictive(old_attr, *new_attr), "Attr '", old_attr.name(), "' has a stricter set of allowed values; from ", AllowedStr(old_attr), " to ", AllowedStr(*new_attr)); VALIDATE(!HigherMinimum(old_attr, *new_attr), "Attr '", old_attr.name(), "' has a higher minimum; from ", MinStr(old_attr), " to ", MinStr(*new_attr)); } for (const auto& new_attr : new_op.attr()) { const OpDef::AttrDef* old_attr = gtl::FindPtrOrNull(old_attrs, new_attr.name()); VALIDATE(old_attr != nullptr || new_attr.has_default_value(), "Attr '", new_attr.name(), "' added without default"); } std::vector<bool> old_in_ref, new_in_ref, old_out_ref, new_out_ref; const string old_in_sig = ComputeArgSignature( old_op.input_arg(), old_attrs, new_attrs, &old_in_ref, false ); const string new_in_sig = ComputeArgSignature( new_op.input_arg(), old_attrs, new_attrs, &new_in_ref, false ); VALIDATE(old_in_sig == new_in_sig, "Input signature mismatch '", old_in_sig, "' vs. '", new_in_sig, "'"); VALIDATE(old_in_ref.size() == new_in_ref.size(), "Unexpected change in input ref lists."); for (int i = 0, end = old_in_ref.size(); i < end; ++i) { VALIDATE(old_in_ref[i] || !new_in_ref[i], "Input ", i, " changed from non-ref to ref"); } const string old_out_sig = ComputeArgSignature(old_op.output_arg(), old_attrs, new_attrs, &old_out_ref, true ); const string new_out_sig = ComputeArgSignature(new_op.output_arg(), old_attrs, new_attrs, &new_out_ref, true ); VALIDATE(old_out_sig == new_out_sig, "Output signature mismatch '", old_out_sig, "' vs. '", new_out_sig, "'"); VALIDATE(old_out_ref.size() == new_out_ref.size(), "Unexpected change in output ref lists"); for (int i = 0, end = old_out_ref.size(); i < end; ++i) { VALIDATE(!old_out_ref[i] || new_out_ref[i], "Output ", i, " changed from ref to non-ref"); } return OkStatus(); } Status OpDefAddedDefaultsUnchanged(const OpDef& old_op, const OpDef& penultimate_op, const OpDef& new_op) { AttrMap new_attrs, old_attrs; FillAttrMap(old_op, &old_attrs); FillAttrMap(new_op, &new_attrs); for (const auto& penultimate_attr : penultimate_op.attr()) { const OpDef::AttrDef* old_attr = gtl::FindPtrOrNull(old_attrs, penultimate_attr.name()); if (old_attr != nullptr) continue; const OpDef::AttrDef* new_attr = gtl::FindPtrOrNull(new_attrs, penultimate_attr.name()); if (new_attr == nullptr) { return errors::InvalidArgument("Missing attr '", penultimate_attr.name(), "' in op: ", SummarizeOpDef(new_op)); } if (!penultimate_attr.has_default_value() || !new_attr->has_default_value()) { return errors::InvalidArgument("Missing default for attr '", penultimate_attr.name(), "' in op: ", SummarizeOpDef(new_op)); } if (!AreAttrValuesEqual(penultimate_attr.default_value(), new_attr->default_value())) { return errors::InvalidArgument( "Can't change default value for attr '", penultimate_attr.name(), "' from ", SummarizeAttrValue(penultimate_attr.default_value()), " in op: ", SummarizeOpDef(new_op)); } } return OkStatus(); } Status OpDefAttrDefaultsUnchanged(const OpDef& old_op, const OpDef& new_op) { AttrMap new_attrs, old_attrs; FillAttrMap(old_op, &old_attrs); FillAttrMap(new_op, &new_attrs); for (const auto& old_attr : old_op.attr()) { const OpDef::AttrDef* new_attr = gtl::FindPtrOrNull(new_attrs, old_attr.name()); if (new_attr == nullptr) continue; if (new_attr->has_default_value() && !old_attr.has_default_value()) { continue; } if (old_attr.has_default_value() && !new_attr->has_default_value()) { return
#include "tensorflow/core/framework/op_def_util.h" #include "tensorflow/core/framework/op_def.pb.h" #include "tensorflow/core/framework/op_def_builder.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/lib/strings/strcat.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/protobuf.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace { OpDef FromText(const string& text) { OpDef op_def; EXPECT_TRUE(protobuf::TextFormat::MergeFromString(text, &op_def)); return op_def; } OpDef::AttrDef ADef(const string& text) { OpDef::AttrDef attr_def; EXPECT_TRUE(protobuf::TextFormat::MergeFromString(text, &attr_def)); return attr_def; } class ValidateOpDefTest : public ::testing::Test { protected: Status TestProto(const string& text) { return ValidateOpDef(FromText(text)); } Status TestBuilder(const OpDefBuilder& builder) { OpRegistrationData op_reg_data; Status status = builder.Finalize(&op_reg_data); TF_EXPECT_OK(status); if (!status.ok()) { return status; } else { return ValidateOpDef(op_reg_data.op_def); } } }; namespace { void ExpectFailure(const Status& status, const string& message) { EXPECT_FALSE(status.ok()) << "Did not see error with: " << message; if (!status.ok()) { LOG(INFO) << "message: " << status; EXPECT_TRUE(absl::StrContains(status.ToString(), message)) << "Actual: " << status << "\nExpected to contain: " << message; } } } TEST_F(ValidateOpDefTest, OpDefValid) { TF_EXPECT_OK(TestBuilder(OpDefBuilder("X").Attr("a: int"))); TF_EXPECT_OK(TestBuilder(OpDefBuilder("X").Input("a: int32"))); TF_EXPECT_OK(TestBuilder(OpDefBuilder("X").Output("a: bool"))); TF_EXPECT_OK(TestBuilder(OpDefBuilder("X").Attr("t: type").Input("a: t"))); TF_EXPECT_OK(TestBuilder(OpDefBuilder("X").Attr("a: int = 3"))); TF_EXPECT_OK(TestBuilder(OpDefBuilder("X").Attr("a: int >= -5"))); TF_EXPECT_OK(TestBuilder(OpDefBuilder("X").Attr("a: int >= -5"))); TF_EXPECT_OK(TestBuilder(OpDefBuilder("X").Attr("a: int >= -5 = 3"))); TF_EXPECT_OK(TestBuilder(OpDefBuilder("X").Attr("a: numbertype"))); TF_EXPECT_OK(TestBuilder(OpDefBuilder("Uppercase"))); TF_EXPECT_OK(TestBuilder(OpDefBuilder("Namespace>X").Attr("a: int"))); TF_EXPECT_OK(TestBuilder(OpDefBuilder("Namespace>X>Y").Attr("a: int"))); } TEST_F(ValidateOpDefTest, InvalidName) { ExpectFailure(TestBuilder(OpDefBuilder("lower").Attr("a: int")), "Invalid name"); ExpectFailure(TestBuilder(OpDefBuilder("BadSuffix 7%")), "Invalid name"); ExpectFailure(TestBuilder(OpDefBuilder(">OpName").Attr("a: int")), "Invalid name"); ExpectFailure(TestBuilder(OpDefBuilder("OpName>").Attr("a: int")), "Invalid name"); ExpectFailure(TestBuilder(OpDefBuilder("OpName>b").Attr("a: int")), "Invalid name"); ExpectFailure(TestBuilder(OpDefBuilder("OpName>A>>B").Attr("a: int")), "Invalid name"); } TEST_F(ValidateOpDefTest, DuplicateName) { ExpectFailure( TestBuilder(OpDefBuilder("DupeName").Input("a: int32").Input("a: float")), "Duplicate name: a"); ExpectFailure( TestBuilder( OpDefBuilder("DupeName").Input("a: int32").Output("a: float")), "Duplicate name: a"); ExpectFailure( TestBuilder( OpDefBuilder("DupeName").Output("a: int32").Output("a: float")), "Duplicate name: a"); ExpectFailure( TestBuilder(OpDefBuilder("DupeName").Input("a: int32").Attr("a: float")), "Duplicate name: a"); ExpectFailure( TestBuilder(OpDefBuilder("DupeName").Output("a: int32").Attr("a: float")), "Duplicate name: a"); ExpectFailure( TestBuilder(OpDefBuilder("DupeName").Attr("a: int").Attr("a: float")), "Duplicate name: a"); } TEST_F(ValidateOpDefTest, BadAttrName) { ExpectFailure(TestBuilder(OpDefBuilder("BadAttrtude").Attr("int32: int")), "Attr can't have name int32 that matches a data type"); ExpectFailure(TestBuilder(OpDefBuilder("BadAttrtude").Attr("float: string")), "Attr can't have name float that matches a data type"); } TEST_F(ValidateOpDefTest, BadAttrType) { ExpectFailure( TestProto("name: 'BadAttrType' attr { name: 'a' type: 'illegal' }"), "Unrecognized type"); ExpectFailure( TestProto("name: 'BadAttrType' attr { name: 'a' type: 'list(illegal)' }"), "Unrecognized type"); ExpectFailure( TestProto("name: 'BadAttrType' attr { name: 'a' type: 'int extra' }"), "Extra ' extra' at the end"); ExpectFailure( TestProto( "name: 'BadAttrType' attr { name: 'a' type: 'list(int extra)' }"), "'list(' is missing ')' in attr"); ExpectFailure( TestProto( "name: 'BadAttrType' attr { name: 'a' type: 'list(int) extra' }"), "Extra ' extra' at the end"); } TEST_F(ValidateOpDefTest, BadAttrDefault) { ExpectFailure( TestProto("name: 'BadAttrDef' attr { name: 'a' " "type: 'int' default_value { s: 'x' } }"), "AttrValue had value with type 'string' when 'int' expected\n\t for " "attr 'a'\n\t in Op 'BadAttrDef'"); ExpectFailure(TestProto("name: 'BadAttrDef' attr { name: 'a' " "type: 'int' default_value { f: 0.5 } }"), "AttrValue had value with type 'float' when 'int' expected\n" "\t for attr 'a'\n\t in Op 'BadAttrDef'"); ExpectFailure( TestProto("name: 'BadAttrDef' attr { name: 'a' type: 'int' " "default_value { i: 5 list { i: [2] } } }"), "AttrValue had value with type 'list(int)' when 'int' expected\n\t for " "attr 'a'\n\t in Op 'BadAttrDef'"); ExpectFailure( TestProto("name: 'BadAttrDef' attr { name: 'a' " "type: 'list(int)' default_value { f: 0.5 } }"), "AttrValue had value with type 'float' when 'list(int)' expected\n\t " "for attr 'a'\n\t in Op 'BadAttrDef'"); ExpectFailure( TestProto("name: 'BadAttrDef' attr { name: 'a' type: 'list(int)' " "default_value { list { i: [5] f: [0.5] } } }"), "AttrValue had value with type 'list(float)' when 'list(int)' " "expected\n\t for attr 'a'\n\t in Op 'BadAttrDef'"); ExpectFailure(TestProto("name: 'BadAttrDef' attr { name: 'a' " "type: 'type' default_value { } }"), "AttrValue missing value with expected type 'type'\n\t for " "attr 'a'\n\t in Op 'BadAttrDef'"); ExpectFailure(TestProto("name: 'BadAttrDef' attr { name: 'a' " "type: 'shape' default_value { } }"), "AttrValue missing value with expected type 'shape'\n\t for " "attr 'a'\n\t in Op 'BadAttrDef'"); ExpectFailure(TestProto("name: 'BadAttrDef' attr { name: 'a' " "type: 'tensor' default_value { } }"), "AttrValue missing value with expected type 'tensor'\n\t for " "attr 'a'\n\t in Op 'BadAttrDef'"); TF_EXPECT_OK( TestProto("name: 'GoodAttrDef' attr { name: 'a' " "type: 'list(int)' default_value { } }")); TF_EXPECT_OK( TestProto("name: 'GoodAttrDef' attr { name: 'a' " "type: 'list(int)' default_value { list { } } }")); TF_EXPECT_OK( TestBuilder(OpDefBuilder("GoodAttrDef").Attr("a: list(int) = []"))); ExpectFailure(TestProto("name: 'BadAttrDef' attr { name: 'a' " "type: 'list(int)' has_minimum: true minimum: 2 " "default_value { list { } } }"), "Length for attr 'a' of 0 must be at least minimum 2\n\t in Op " "'BadAttrDef'"); ExpectFailure( TestBuilder(OpDefBuilder("GoodAttrDef").Attr("a: list(bool) >=2 = []")), "Length for attr 'a' of 0 must be at least minimum 2\n\t in Op " "'GoodAttrDef'"); ExpectFailure(TestProto("name: 'BadAttrDef' attr { name: 'a' type: " "'list(string)' has_minimum: true minimum: 2 " "default_value { list { s: ['foo'] } } }"), "Length for attr 'a' of 1 must be at least minimum 2\n\t in Op " "'BadAttrDef'"); ExpectFailure( TestBuilder( OpDefBuilder("GoodAttrDef").Attr("a: list(type) >=2 = [DT_STRING]")), "Length for attr 'a' of 1 must be at least minimum 2\n\t in Op " "'GoodAttrDef'"); } TEST_F(ValidateOpDefTest, NoRefTypes) { ExpectFailure(TestBuilder(OpDefBuilder("BadAttrDef").Input("i: float_ref")), "Illegal use of ref type 'float_ref'. " "Use 'Ref(type)' instead for input 'i'"); ExpectFailure( TestBuilder(OpDefBuilder("BadAttrDef").Attr("T: type = DT_INT32_REF")), "AttrValue must not have reference type value of int32_ref"); ExpectFailure( TestBuilder( OpDefBuilder("BadAttrDef").Attr("T: list(type) = [DT_STRING_REF]")), "AttrValue must not have reference type value of string_ref"); } TEST_F(ValidateOpDefTest, BadAttrMin) { ExpectFailure(TestProto("name: 'BadAttrMin' attr { name: 'a' type: 'string' " "has_minimum: true minimum: 0 }"), "minimum for unsupported type string"); ExpectFailure( TestProto("name: 'BadAttrMin' attr { name: 'a' type: 'int' default_value " "{ i: 2 } has_minimum: true minimum: 7 }"), "Value for attr 'a' of 2 must be at least minimum 7\n\t in Op " "'BadAttrMin'"); ExpectFailure( TestProto("name: 'BadAttrMin' attr { name: 'a' " "type: 'list(string)' has_minimum: true minimum: -5 }"), "list type must have a non-negative minimum, not -5"); TF_EXPECT_OK( TestProto("name: 'GoodAttrMin' attr { name: 'a' type: 'list(string)' " "has_minimum: true minimum: 1 }")); ExpectFailure(TestProto("name: 'NoHasMin' attr { name: 'a' " "type: 'list(string)' minimum: 3 }"), "Attr 'a' with has_minimum = false but minimum 3 not equal to " "default of 0"); } TEST_F(ValidateOpDefTest, BadAttrAllowed) { TF_EXPECT_OK(TestBuilder( OpDefBuilder("GoodAttrtude").Attr("x: numbertype = DT_INT32"))); ExpectFailure( TestBuilder( OpDefBuilder("BadAttrtude").Attr("x: numbertype = DT_STRING")), "attr 'x' of string is not in the list of allowed values"); ExpectFailure( TestBuilder(OpDefBuilder("BadAttrtude") .Attr("x: list(realnumbertype) = [DT_COMPLEX64]")), "attr 'x' of complex64 is not in the list of allowed values"); ExpectFailure( TestBuilder(OpDefBuilder("BadAttrtude") .Attr("x: list(realnumbertype) = [DT_COMPLEX128]")), "attr 'x' of complex128 is not in the list of allowed values"); TF_EXPECT_OK(TestBuilder( OpDefBuilder("GoodAttrtude").Attr("x: {'foo', 'bar'} = 'bar'"))); ExpectFailure( TestBuilder( OpDefBuilder("BadAttrtude").Attr("x: {'foo', 'bar'} = 'baz'")), "attr 'x' of \"baz\" is not in the list of allowed values"); ExpectFailure(TestBuilder(OpDefBuilder("BadAttrtude") .Attr("x: list({'foo', 'bar'}) = ['baz']")), "attr 'x' of \"baz\" is not in the list of allowed values"); ExpectFailure(TestProto("name: 'BadAttrtude' attr { name: 'a' " "type: 'string' allowed_values { s: 'not list' } }"), "with type 'string' when 'list(string)' expected"); ExpectFailure( TestProto("name: 'BadAttrtude' attr { name: 'a' " "type: 'string' allowed_values { list { i: [6] } } }"), "with type 'list(int)' when 'list(string)' expected"); } TEST_F(ValidateOpDefTest, BadArgType) { ExpectFailure(TestProto("name: 'BadArg' input_arg { name: 'a' " "type: DT_INT32 } input_arg { name: 'b' }"), "Missing type for input 'b'"); ExpectFailure(TestProto("name: 'BadArg' input_arg { name: 'a' " "type: DT_INT32 } output_arg { name: 'b' }"), "Missing type for output 'b'"); ExpectFailure( TestProto("name: 'BadArg' input_arg { name: 'a' type: " "DT_INT32 type_attr: 'x' } attr { name: 'x' type: 'type' }"), "Exactly one of type, type_attr, type_list_attr must be set for input " "'a'"); ExpectFailure(TestProto("name: 'BadArg' input_arg { name: 'a' " "type_attr: 'x' } attr { name: 'x' type: 'int' }"), "Attr 'x' used as type_attr for input 'a' has type int"); ExpectFailure( TestProto("name: 'BadArg' input_arg { name: 'a' " "type_attr: 'x' } attr { name: 'x' type: 'list(type)' }"), "Attr 'x' used as type_attr for input 'a' has type list(type)"); ExpectFailure( TestProto("name: 'BadArg' input_arg { name: 'a' " "type_list_attr: 'x' } attr { name: 'x' type: 'int' }"), "Attr 'x' used as type_list_attr for input 'a' has type int"); ExpectFailure( TestProto("name: 'BadArg' input_arg { name: 'a' " "type_list_attr: 'x' } attr { name: 'x' type: 'type' }"), "Attr 'x' used as type_list_attr for input 'a' has type type"); ExpectFailure(TestProto("name: 'BadArg' input_arg { name: 'a' " "type_attr: 'x' }"), "No attr with name 'x' for input 'a'"); ExpectFailure( TestProto("name: 'BadArg' input_arg { name: 'a' number_attr: 'n' " "type_attr: 'x' } attr { name: 'x' type: 'list(type)' } " "attr { name: 'n' type: 'int' has_minimum: true minimum: 1 }"), "Attr 'x' used as type_attr for input 'a' has type list(type)"); TF_EXPECT_OK(TestProto( "name: 'Arg' input_arg { name: 'a' type_list_attr: 'x' } " "attr { name: 'x' type: 'list(type)' } attr { name: 'n' type: 'int' " "has_minimum: true minimum: 1 }")); TF_EXPECT_OK(TestProto( "name: 'Arg' input_arg { name: 'a' type: DT_INT32 number_attr: 'n' } " "attr { name: 'n' type: 'int' has_minimum: true minimum: 0 }")); ExpectFailure(TestProto("name: 'Arg' input_arg { name: 'a' type: DT_INT32 " "number_attr: 'n' }"), "No attr with name 'n'"); ExpectFailure( TestProto( "name: 'Arg' input_arg { name: 'a' type: " "DT_INT32 number_attr: 'n' } attr { name: 'n' type: 'string' }"), "Attr 'n' used as length for input 'a' has type string"); ExpectFailure( TestProto("name: 'Arg' input_arg { name: 'a' type: " "DT_INT32 number_attr: 'n' } attr { name: 'n' type: 'int' }"), "Attr 'n' used as length for input 'a' must have minimum;"); ExpectFailure( TestProto("name: 'Arg' input_arg { name: 'a' type: DT_INT32 number_attr: " "'n' } attr { name: 'n' type: 'int' has_minimum: true minimum: " "-5 }"), "Attr 'n' used as length for input 'a' must have minimum >= 0;"); ExpectFailure( TestProto("name: 'Arg' input_arg { name: 'a' number_attr: 'n' } attr { " "name: 'n' type: 'int' has_minimum: true minimum: 2 }"), "Missing type for input 'a'; in OpDef:"); ExpectFailure(TestProto("name: 'BadArg' input_arg { name: 'a' number_attr: " "'n' type_list_attr: 'x' } attr { name: 'n' type: " "'int' has_minimum: true minimum: 1 } attr { name: " "'x' type: 'list(type)' }"), "Can't have both number_attr and type_list_attr for input 'a'"); } void ExpectDifferent(const OpDef::AttrDef& a1, const OpDef::AttrDef& a2) { EXPECT_FALSE(AttrDefEqual(a1, a2)); EXPECT_FALSE(AttrDefEqual(a2, a1)); EXPECT_NE(AttrDefHash(a1), AttrDefHash(a2)); } TEST(AttrDefUtilTest, EqualAndHash) { OpDef::AttrDef a = ADef( "name: 'foo' type: 'string' description: 'cool' has_minimum: true " "minimum: 2 default_value { i: 2 } allowed_values { i: 5 }"); EXPECT_TRUE(AttrDefEqual(a, a)); EXPECT_EQ(AttrDefHash(a), AttrDefHash(a)); ExpectDifferent( a, ADef("name: 'FOO' type: 'string' description: 'cool' has_minimum: true " "minimum: 2 default_value { i: 2 } allowed_values { i: 5 }")); ExpectDifferent( a, ADef("name: 'foo' type: 'int32' description: 'cool' has_minimum: true " "minimum: 2 default_value { i: 2 } allowed_values { i: 5 }")); ExpectDifferent( a, ADef("name: 'foo' type: 'string' description: 'COOL' has_minimum: true " "minimum: 2 default_value { i: 2 } allowed_values { i: 5 }")); ExpectDifferent( a, ADef("name: 'foo' type: 'string' description: 'cool' has_minimum: false " "minimum: 2 default_value { i: 2 } allowed_values { i: 5 }")); ExpectDifferent( a, ADef("name: 'foo' type: 'string' description: 'cool' has_minimum: true " "minimum: 3 default_value { i: 2 } allowed_values { i: 5 }")); ExpectDifferent( a, ADef("name: 'foo' type: 'string' description: 'cool' has_minimum: true " "minimum: 2 default_value { i: 3 } allowed_values { i: 5 }")); ExpectDifferent( a, ADef("name: 'foo' type: 'string' description: 'cool' has_minimum: true " "minimum: 2 default_value { i: 2 } allowed_values { i: 6 }")); a = ADef( "name: 'foo' type: 'string' description: 'cool' has_minimum: true " "minimum: 2"); EXPECT_TRUE(AttrDefEqual(a, a)); EXPECT_EQ(AttrDefHash(a), AttrDefHash(a)); ExpectDifferent( a, ADef("name: 'FOO' type: 'string' description: 'cool' has_minimum: true " "minimum: 2")); ExpectDifferent( a, ADef("name: 'foo' type: 'int32' description: 'cool' has_minimum: true " "minimum: 2")); ExpectDifferent( a, ADef("name: 'foo' type: 'string' description: 'COOL' has_minimum: true " "minimum: 2")); ExpectDifferent( a, ADef("name: 'foo' type: 'string' description: 'cool' has_minimum: false " "minimum: 2")); ExpectDifferent( a, ADef("name: 'foo' type: 'string' description: 'cool' has_minimum: true " "minimum: 3")); } protobuf::RepeatedPtrField<OpDef::AttrDef> Rep( const std::vector<OpDef::AttrDef>& defs) { protobuf::RepeatedPtrField<OpDef::AttrDef> rep; for (const OpDef::AttrDef& def : defs) { rep.Add()->MergeFrom(def); } return rep; } void ExpectEqual(const protobuf::RepeatedPtrField<OpDef::AttrDef>& a1, const protobuf::RepeatedPtrField<OpDef::AttrDef>& a2) { EXPECT_TRUE(RepeatedAttrDefEqual(a1, a2)); EXPECT_TRUE(RepeatedAttrDefEqual(a2, a1)); EXPECT_EQ(RepeatedAttrDefHash(a1), RepeatedAttrDefHash(a2)); } void ExpectDifferent(const protobuf::RepeatedPtrField<OpDef::AttrDef>& a1, const protobuf::RepeatedPtrField<OpDef::AttrDef>& a2) { EXPECT_FALSE(RepeatedAttrDefEqual(a1, a2)); EXPECT_FALSE(RepeatedAttrDefEqual(a2, a1)); EXPECT_NE(RepeatedAttrDefHash(a1), RepeatedAttrDefHash(a2)); } TEST(AttrDefUtilTest, EqualAndHash_Repeated) { OpDef::AttrDef a1 = ADef( "name: 'foo1' type: 'string' description: 'cool' has_minimum: true " "minimum: 2 default_value { i: 2 } allowed_values { i: 5 }"); OpDef::AttrDef a2 = ADef( "name: 'foo2' type: 'string' description: 'cool' has_minimum: true " "minimum: 2 default_value { i: 2 } allowed_values { i: 5 }"); OpDef::AttrDef a3 = ADef( "name: 'foo1' type: 'string' description: 'cool' has_minimum: true " "minimum: 3 default_value { i: 2 } allowed_values { i: 5 }"); OpDef::AttrDef a4 = ADef( "name: 'foo3' type: 'string' description: 'cool' has_minimum: true " "minimum: 3 default_value { i: 2 } allowed_values { i: 5 }"); ExpectEqual(Rep({}), Rep({})); ExpectEqual(Rep({a1}), Rep({a1})); ExpectEqual(Rep({a1, a2}), Rep({a1, a2})); ExpectEqual(Rep({a1, a2}), Rep({a2, a1})); ExpectEqual(Rep({a1, a4}), Rep({a4, a1})); ExpectDifferent(Rep({a1}), Rep({})); ExpectDifferent(Rep({a1}), Rep({a2})); ExpectDifferent(Rep({a1}), Rep({a3})); ExpectDifferent(Rep({a1}), Rep({a4})); ExpectDifferent(Rep({a1}), Rep({a1, a2})); ExpectDifferent(Rep({a1, a2}), Rep({a1, a4})); ExpectDifferent(Rep({a1, a2}), Rep({a1, a2, a4})); } void ExpectEqual(const OpDef& o1, const OpDef& o2) { EXPECT_TRUE(OpDefEqual(o1, o2)); EXPECT_TRUE(OpDefEqual(o2, o1)); EXPECT_EQ(OpDefHash(o1), OpDefHash(o2)); } void ExpectDifferent(const OpDef& o1, const OpDef& o2) { EXPECT_FALSE(OpDefEqual(o1, o2)); EXPECT_FALSE(OpDefEqual(o2, o1)); EXPECT_NE(OpDefHash(o1), OpDefHash(o2)); } TEST(OpDefEqualityTest, EqualAndHash) { string a1 = "attr { name: 'a' type: 'string' } "; string a2 = "attr { name: 'b' type: 'string' } "; string a3 = "attr { name: 'c' type: 'int32' } "; OpDef o1 = FromText(strings::StrCat("name: 'MatMul' ", a1)); OpDef o2 = FromText(strings::StrCat("name: 'MatMul' ", a2)); OpDef o3 = FromText(strings::StrCat("name: 'MatMul' ", a1, a2)); OpDef o4 = FromText(strings::StrCat("name: 'MatMul' ", a2, a1)); ExpectEqual(o1, o1); ExpectEqual(o3, o4); ExpectDifferent(o1, o2); ExpectDifferent(o1, o3); } TEST(OpDefAttrDefaultsUnchangedTest, Foo) { const auto& op1 = FromText("name: 'op1' attr { name: 'n' type: 'string'}"); const auto& op2 = FromText( "name: 'op2' attr { name: 'n' type: 'string' default_value: {s: 'x'}}"); const auto& op3 = FromText( "name: 'op3' attr { name: 'n' type: 'string' default_value: {s: 'y'}}"); TF_EXPECT_OK(OpDefAttrDefaultsUnchanged(op1, op2)); Status changed_attr = OpDefAttrDefaultsUnchanged(op2, op3); ExpectFailure(changed_attr, "Attr 'n' has changed it's default value; from \"x\" to \"y\""); Status removed_attr = OpDefAttrDefaultsUnchanged(op2, op1); ExpectFailure(removed_attr, "Attr 'n' has removed it's default; from \"x\" to no default"); } } }
1,059
cpp
tensorflow/tensorflow
python_op_gen_annotator
tensorflow/python/framework/python_op_gen_annotator.cc
tensorflow/python/framework/python_op_gen_annotator_test.cc
#ifndef TENSORFLOW_PYTHON_FRAMEWORK_PYTHON_OP_GEN_ANNOTATOR_H_ #define TENSORFLOW_PYTHON_FRAMEWORK_PYTHON_OP_GEN_ANNOTATOR_H_ #include <unordered_map> #include <vector> #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "tensorflow/core/framework/op_def.pb.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/python/framework/op_reg_offset.pb.h" namespace tensorflow { namespace python_op_gen_internal { inline constexpr absl::string_view kKytheCorpus = "github.com/tensorflow/tensorflow"; class GeneratedCodeAnnotator { public: void AddAnnotation(const OpDef& op_def, absl::string_view function_name, uint32_t offset_start); void SetBase(uint32_t pos) { base_pos_ = pos; } string BuildKytheMetadata(); void FillSourceOffsets(const OpRegOffsets& op_reg_offsets); struct ByteOffsets { uint32_t source_start = 0; uint32_t source_end = 0; uint32_t generated_start = 0; uint32_t generated_end = 0; string file_path; }; private: uint32_t base_pos_ = 0; std::unordered_map<string, ByteOffsets> byte_offsets_map_; }; } } #endif #include "tensorflow/python/framework/python_op_gen_annotator.h" #include <cstdint> #include <memory> #include <string> #include <unordered_map> #include <vector> #include "absl/strings/escaping.h" #include "absl/strings/str_format.h" #include "absl/strings/string_view.h" #include "tensorflow/python/framework/kythe_metadata.pb.h" #include "tensorflow/python/framework/op_reg_offset.pb.h" namespace tensorflow { namespace python_op_gen_internal { void GeneratedCodeAnnotator::AddAnnotation(const OpDef& op_def, absl::string_view function_name, uint32_t offset_start) { const uint32_t start_byte = base_pos_ + offset_start; const uint32_t end_byte = start_byte + function_name.size(); byte_offsets_map_[op_def.name()].generated_start = start_byte; byte_offsets_map_[op_def.name()].generated_end = end_byte; } void GeneratedCodeAnnotator::FillSourceOffsets( const OpRegOffsets& op_reg_offsets) { for (const OpRegOffset& offset : op_reg_offsets.offsets()) { if (byte_offsets_map_.find(offset.name()) != byte_offsets_map_.end()) { byte_offsets_map_[offset.name()].file_path = offset.filepath(); byte_offsets_map_[offset.name()].source_start = offset.start(); byte_offsets_map_[offset.name()].source_end = offset.end(); } } } string GeneratedCodeAnnotator::BuildKytheMetadata() { GeneratedCodeInfo generated_code_info; generated_code_info.set_type(GeneratedCodeInfo::KYTHE0); for (const auto& [name, offsets] : byte_offsets_map_) { if (offsets.file_path.empty()) { continue; } MappingRule* meta = generated_code_info.add_meta(); meta->set_type(MappingRule::ANCHOR_ANCHOR); meta->set_edge("/kythe/edge/imputes"); meta->set_source_begin(offsets.source_start); meta->set_source_end(offsets.source_end); meta->set_target_begin(offsets.generated_start); meta->set_target_end(offsets.generated_end); VName* vname = meta->mutable_source_vname(); vname->set_signature(absl::StrFormat( "@%d:%d@tensorflow_op#%s#%s#%s", offsets.source_start, offsets.source_end, name, kKytheCorpus, offsets.file_path)); vname->set_corpus(std::string(kKytheCorpus)); vname->set_path(offsets.file_path); vname->set_language("c++"); } return "# kythe.proto.metadata.GeneratedCodeInfo:" + absl::Base64Escape(generated_code_info.SerializeAsString()); } } }
#include "tensorflow/python/framework/python_op_gen_annotator.h" #include <utility> #include "absl/strings/escaping.h" #include "absl/strings/str_format.h" #include "absl/strings/str_split.h" #include "tensorflow/core/platform/protobuf.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/python/framework/kythe_metadata.pb.h" namespace tensorflow { namespace python_op_gen_internal { namespace { using ::testing::StartsWith; GeneratedCodeInfo ParseMetadata(string metadata) { GeneratedCodeInfo generated_code_info; std::pair<string, string> p = absl::StrSplit(metadata, ':'); string serialized_generated_code_info; absl::Base64Unescape(p.second, &serialized_generated_code_info); generated_code_info.ParseFromString(serialized_generated_code_info); return generated_code_info; } TEST(PythonOpGenAnnotatorTest, AddAnnotationWithoutSourceOffsets) { GeneratedCodeAnnotator annotator; OpDef fakeOpDef; fakeOpDef.set_name("fake_op"); annotator.AddAnnotation(fakeOpDef, "fake_op", 0); string meta = annotator.BuildKytheMetadata(); ASSERT_THAT(meta, StartsWith("# kythe.proto.metadata.GeneratedCodeInfo:")); GeneratedCodeInfo actual = ParseMetadata(meta); GeneratedCodeInfo expected; ASSERT_TRUE(protobuf::TextFormat::ParseFromString("type: KYTHE0", &expected)); EXPECT_EQ(actual.SerializeAsString(), expected.SerializeAsString()); } TEST(PythonOpGenAnnotatorTest, AddAnnotationWithSourceOffsets) { GeneratedCodeAnnotator annotator; OpDef fakeOpDef; fakeOpDef.set_name("fake_op"); OpRegOffsets fakeOffsets; ASSERT_TRUE(protobuf::TextFormat::ParseFromString( R"pb( offsets { name: "fake_op", filepath: "file/path/to/fake_op.cc", start: 7, end: 11, } )pb", &fakeOffsets)); annotator.AddAnnotation(fakeOpDef, "fake_op", 100); annotator.FillSourceOffsets(fakeOffsets); string meta = annotator.BuildKytheMetadata(); ASSERT_THAT(meta, StartsWith("# kythe.proto.metadata.GeneratedCodeInfo:")); GeneratedCodeInfo actual = ParseMetadata(meta); EXPECT_EQ(actual.meta(0).type(), MappingRule::ANCHOR_ANCHOR); EXPECT_EQ(actual.meta(0).edge(), "/kythe/edge/imputes"); EXPECT_EQ( actual.meta(0).source_vname().signature(), absl::StrFormat("@7:11@tensorflow_op#fake_op#%s#file/path/to/fake_op.cc", kKytheCorpus)); EXPECT_EQ(actual.meta(0).source_vname().path(), "file/path/to/fake_op.cc"); EXPECT_EQ(actual.meta(0).source_begin(), 7); EXPECT_EQ(actual.meta(0).source_end(), 11); EXPECT_EQ(actual.meta(0).target_begin(), 100); EXPECT_EQ(actual.meta(0).target_end(), 107); } TEST(PythonOpGenAnnotatorTest, AddAnnotationWithSourceOffsetsAndNonZeroBase) { GeneratedCodeAnnotator annotator; OpDef fakeOpDef; fakeOpDef.set_name("fake_op"); OpRegOffsets fakeOffsets; ASSERT_TRUE(protobuf::TextFormat::ParseFromString( R"pb( offsets { name: "fake_op", filepath: "file/path/to/fake_op.cc", start: 7, end: 11, } )pb", &fakeOffsets)); annotator.SetBase(10); annotator.AddAnnotation(fakeOpDef, "fake_op", 100); annotator.FillSourceOffsets(fakeOffsets); string meta = annotator.BuildKytheMetadata(); ASSERT_THAT(meta, StartsWith("# kythe.proto.metadata.GeneratedCodeInfo:")); GeneratedCodeInfo actual = ParseMetadata(meta); EXPECT_EQ(actual.meta(0).type(), MappingRule::ANCHOR_ANCHOR); EXPECT_EQ(actual.meta(0).edge(), "/kythe/edge/imputes"); EXPECT_EQ( actual.meta(0).source_vname().signature(), absl::StrFormat("@7:11@tensorflow_op#fake_op#%s#file/path/to/fake_op.cc", kKytheCorpus)); EXPECT_EQ(actual.meta(0).source_vname().path(), "file/path/to/fake_op.cc"); EXPECT_EQ(actual.meta(0).source_begin(), 7); EXPECT_EQ(actual.meta(0).source_end(), 11); EXPECT_EQ(actual.meta(0).target_begin(), 110); EXPECT_EQ(actual.meta(0).target_end(), 117); } TEST(PythonOpGenAnnotatorTest, AddMultipleAnnotation) { GeneratedCodeAnnotator annotator; OpDef fakeOpDef; OpRegOffsets fakeOffsets; ASSERT_TRUE(protobuf::TextFormat::ParseFromString( R"pb( offsets { name: "fake_op_1", filepath: "file/path/to/fake_op.cc", start: 7, end: 11, } offsets { name: "fake_op_2", filepath: "file/path/to/fake_op.cc", start: 101, end: 103, } )pb", &fakeOffsets)); fakeOpDef.set_name("fake_op_1"); annotator.AddAnnotation(fakeOpDef, "fake_op_1", 10); fakeOpDef.set_name("fake_op_2"); annotator.AddAnnotation(fakeOpDef, "fake_op_2", 100); annotator.FillSourceOffsets(fakeOffsets); string meta = annotator.BuildKytheMetadata(); ASSERT_THAT(meta, StartsWith("# kythe.proto.metadata.GeneratedCodeInfo:")); GeneratedCodeInfo actual = ParseMetadata(meta); EXPECT_EQ(actual.meta_size(), 2); } } } }
1,060
cpp
tensorflow/tensorflow
math_ops
third_party/xla/xla/service/llvm_ir/math_ops.cc
tensorflow/core/ops/math_ops_test.cc
#ifndef XLA_SERVICE_LLVM_IR_MATH_OPS_H_ #define XLA_SERVICE_LLVM_IR_MATH_OPS_H_ #include "llvm/IR/IRBuilder.h" #include "llvm/IR/Value.h" namespace xla { namespace llvm_ir { llvm::Value* EmitFastTanh(llvm::IRBuilder<>* b, llvm::Value* input, bool with_fma = false); llvm::Value* EmitErfF32(llvm::IRBuilder<>* b, llvm::Value* x); } } #endif #include "xla/service/llvm_ir/math_ops.h" #include "xla/service/llvm_ir/llvm_util.h" namespace xla { namespace llvm_ir { llvm::Value* EmitFastTanh(llvm::IRBuilder<>* b, llvm::Value* input, bool with_fma) { llvm::Type* type = input->getType(); const float plus_clamp = with_fma ? 7.99881172180175781f : 7.90531110763549805f; const float minus_clamp = -plus_clamp; const auto kCanUseApprox = 0.0004; auto abs_x = llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::fabs, {input}, {type}, b); auto use_aprox = b->CreateFCmpOLT(abs_x, llvm::ConstantFP::get(type, kCanUseApprox)); llvm::Value* input_clamped = llvm_ir::EmitFloatMin( llvm_ir::EmitFloatMax(input, llvm::ConstantFP::get(type, minus_clamp), b, true), llvm::ConstantFP::get(type, plus_clamp), b, true); static constexpr std::array<float, 7> numerator_coeffs{ -2.76076847742355e-16f, 2.00018790482477e-13f, -8.60467152213735e-11f, 5.12229709037114e-08f, 1.48572235717979e-05f, 6.37261928875436e-04f, 4.89352455891786e-03f}; static constexpr std::array<float, 4> denominator_coeffs{ 1.19825839466702e-06f, 1.18534705686654e-04f, 2.26843463243900e-03f, 4.89352518554385e-03f}; llvm::Value* input_squared = b->CreateFMul(input_clamped, input_clamped); llvm::Value* numerator = llvm::ConstantFP::get(type, numerator_coeffs[0]); for (int i = 1; i < numerator_coeffs.size(); i++) { numerator = b->CreateFAdd(b->CreateFMul(input_squared, numerator), llvm::ConstantFP::get(type, numerator_coeffs[i])); } numerator = b->CreateFMul(input_clamped, numerator); llvm::Value* denominator = llvm::ConstantFP::get(type, denominator_coeffs[0]); for (int i = 1; i < denominator_coeffs.size(); i++) { denominator = b->CreateFAdd(b->CreateFMul(input_squared, denominator), llvm::ConstantFP::get(type, denominator_coeffs[i])); } return b->CreateSelect(use_aprox, input, b->CreateFDiv(numerator, denominator)); } llvm::Value* EmitErfF32(llvm::IRBuilder<>* b, llvm::Value* x) { auto type = x->getType(); constexpr float kErfInvOneMinusHalfULP = 3.832506856900711f; auto call_fabs = [b](llvm::Value* operand_value) { return llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::fabs, {operand_value}, {operand_value->getType()}, b); }; auto fcmp_le = [b](llvm::Value* lhs_value, llvm::Value* rhs_value) { return b->CreateFCmpOLE(lhs_value, rhs_value); }; llvm::Value* const clamp = fcmp_le( llvm::ConstantFP::get(type, kErfInvOneMinusHalfULP), call_fabs(x)); llvm::Value* const alpha_1 = llvm::ConstantFP::get(type, 1.128379143519084f); llvm::Value* const alpha_3 = llvm::ConstantFP::get(type, 0.18520832239976145f); llvm::Value* const alpha_5 = llvm::ConstantFP::get(type, 0.050955695062380861f); llvm::Value* const alpha_7 = llvm::ConstantFP::get(type, 0.0034082910107109506f); llvm::Value* const alpha_9 = llvm::ConstantFP::get(type, 0.00022905065861350646f); llvm::Value* const beta_0 = llvm::ConstantFP::get(type, 1.0f); llvm::Value* const beta_2 = llvm::ConstantFP::get(type, 0.49746925110067538f); llvm::Value* const beta_4 = llvm::ConstantFP::get(type, 0.11098505178285362f); llvm::Value* const beta_6 = llvm::ConstantFP::get(type, 0.014070470171167667f); llvm::Value* const beta_8 = llvm::ConstantFP::get(type, 0.0010179625278914885f); llvm::Value* const beta_10 = llvm::ConstantFP::get(type, 0.000023547966471313185f); llvm::Value* const beta_12 = llvm::ConstantFP::get(type, -1.1791602954361697e-7f); llvm::Value* const x2 = b->CreateFMul(x, x); auto call_fma = [b](llvm::Value* multiplier, llvm::Value* multiplicand, llvm::Value* addend) { return llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::fma, {multiplier, multiplicand, addend}, {multiplier->getType()}, b); }; llvm::Value* p = call_fma(x2, alpha_9, alpha_7); p = call_fma(x2, p, alpha_5); p = call_fma(x2, p, alpha_3); p = call_fma(x2, p, alpha_1); p = b->CreateFMul(x, p); llvm::Value* q = call_fma(x2, beta_12, beta_10); q = call_fma(x2, q, beta_8); q = call_fma(x2, q, beta_6); q = call_fma(x2, q, beta_4); q = call_fma(x2, q, beta_2); q = call_fma(x2, q, beta_0); auto call_copysign = [b](llvm::Value* mag, llvm::Value* sign) { return llvm_ir::EmitCallToIntrinsic(llvm::Intrinsic::copysign, {mag, sign}, {mag->getType()}, b); }; auto* result = b->CreateSelect(clamp, call_copysign(llvm::ConstantFP::get(type, 1.0), x), b->CreateFDiv(p, q)); return result; } } }
#include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/shape_inference_testutil.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.pb.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { TEST(MathOpsTest, AddN_ShapeFn) { ShapeInferenceTestOp op("AddN"); auto set_n = [&op](int n) { std::vector<NodeDefBuilder::NodeOut> src_list; src_list.reserve(n); for (int i = 0; i < n; ++i) src_list.emplace_back("a", 0, DT_FLOAT); TF_ASSERT_OK(NodeDefBuilder("test", "AddN") .Input(src_list) .Attr("N", n) .Finalize(&op.node_def)); }; set_n(2); INFER_OK(op, "?;?", "in0|in1"); INFER_OK(op, "[1];[?]", "in0"); INFER_OK(op, "[1];?", "in0"); INFER_OK(op, "[?];[1]", "in1"); INFER_OK(op, "?;[1]", "in1"); set_n(2); INFER_OK(op, "[1,2];[?,2]", "in0"); INFER_OK(op, "[1,2];[1,2]", "in0|in1"); INFER_OK(op, "[?,2];[1,2]", "in1"); set_n(3); INFER_OK(op, "[1,?];[?,2];[1,2]", "in2"); INFER_OK(op, "[1,2];[?,2];[1,?]", "in0"); INFER_OK(op, "?;?;[1,2]", "in2"); set_n(2); INFER_OK(op, "?;[1,2]", "in1"); INFER_OK(op, "[1,?];[?,2]", "[d0_0,d1_1]"); INFER_OK(op, "[?,2,?];[?,?,3]", "[d0_0|d1_0,d0_1,d1_2]"); INFER_OK(op, "[?,2];[1,?]", "[d1_0,d0_1]"); set_n(3); INFER_ERROR("Dimension 1 in both shapes must be equal, but are 2 and 4", op, "[1,2];?;[1,4]"); INFER_ERROR("From merging shape 0 with other shapes.", op, "[1,2];?;[1,4]"); set_n(4); INFER_ERROR("Shapes must be equal rank, but are 2 and 3", op, "?;[1,2];?;[1,2,3]"); INFER_ERROR("From merging shape 1 with other shapes.", op, "?;[1,2];?;[1,2,3]"); } TEST(MathOpsTest, UnchangedShape_ShapeFn) { ShapeInferenceTestOp op("Cast"); INFER_OK(op, "?", "in0"); INFER_OK(op, "[?]", "in0"); INFER_OK(op, "[1,?,3,4]", "in0"); } TEST(MathOpsTest, Segment_ShapeFn) { for (const auto* op_name : {"SegmentMax", "SegmentMean", "SegmentMin", "SegmentProd", "SegmentSum"}) { ShapeInferenceTestOp op(op_name); INFER_OK(op, "?;?", "?"); INFER_OK(op, "?;[100]", "?"); INFER_OK(op, "[?];?", "[?]"); INFER_OK(op, "[?];[100]", "[?]"); INFER_OK(op, "[1];?", "[?]"); INFER_OK(op, "[1];[100]", "[?]"); INFER_OK(op, "[?,?];?", "[?,d0_1]"); INFER_OK(op, "[?,2];[100]", "[?,d0_1]"); INFER_OK(op, "[?,2,?,4];[100]", "[?,d0_1,d0_2,d0_3]"); INFER_OK(op, "[1,?];?", "[?,d0_1]"); INFER_OK(op, "[1,2];[100]", "[?,d0_1]"); INFER_OK(op, "[1,2,?,4];[100]", "[?,d0_1,d0_2,d0_3]"); INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;[1,2]"); INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "[];[1]"); } } TEST(MathOpsTest, BroadcastBinaryOps_ShapeFn) { auto test_shapes = [&](ShapeInferenceTestOp& op, bool incompatible_shape_error) { INFER_OK(op, "?;?", "?"); INFER_OK(op, "[1,2];?", "?"); INFER_OK(op, "?;[1,2]", "?"); INFER_OK(op, "[?];[1]", "[d0_0]"); INFER_OK(op, "[1];[?]", "[d1_0]"); INFER_OK(op, "[?];[2]", incompatible_shape_error ? "[d1_0]" : "?"); INFER_OK(op, "[2];[?]", incompatible_shape_error ? "[d0_0]" : "?"); INFER_OK(op, "[?];[?]", "[?]"); INFER_OK(op, "[];[?]", "[d1_0]"); INFER_OK(op, "[?];[]", "[d0_0]"); INFER_OK(op, "[1];[1]", "[d0_0|d1_0]"); INFER_OK(op, "[];[1]", "[d1_0]"); INFER_OK(op, "[1];[]", "[d0_0]"); INFER_OK(op, "[2];[2]", "[d0_0|d1_0]"); INFER_OK(op, "[];[2]", "[d1_0]"); INFER_OK(op, "[1];[2]", "[d1_0]"); INFER_OK(op, "[2];[1]", "[d0_0]"); INFER_OK(op, "[2];[]", "[d0_0]"); INFER_OK(op, "[2];[?]", incompatible_shape_error ? "[d0_0]" : "?"); INFER_OK(op, "[0];[0]", "[d0_0|d1_0]"); INFER_OK(op, "[];[0]", "[d1_0]"); INFER_OK(op, "[1];[0]", "[d1_0]"); INFER_OK(op, "[0];[1]", "[d0_0]"); INFER_OK(op, "[0];[]", "[d0_0]"); INFER_OK(op, "[2];[?,?]", incompatible_shape_error ? "[d1_0,d0_0]" : "?"); INFER_OK(op, "[2,2];[?,?,?]", incompatible_shape_error ? "[d1_0,d0_0,d0_1]" : "?"); INFER_OK(op, "[?,1,2,3,4,5];[3,1,?]", incompatible_shape_error ? "[d0_0,d0_1,d0_2,d0_3|d1_0,d0_4,d0_5]" : "?"); INFER_OK(op, "[3,1,?];[?,1,2,3,4,5]", incompatible_shape_error ? "[d1_0,d1_1,d1_2,d1_3|d0_0,d1_4,d1_5]" : "?"); if (incompatible_shape_error) { INFER_ERROR("Dimensions must be equal", op, "[2];[3]"); } else { INFER_OK(op, "[2];[3]", "[]"); } }; for (string op_name : {"Add", "Complex", "Div", "Equal", "Greater", "GreaterEqual", "Igamma", "Igammac", "Zeta", "Polygamma", "Less", "LessEqual", "LogicalAnd", "LogicalOr", "Maximum", "Minimum", "Mod", "Mul", "NotEqual", "Pow", "Sub", "SquaredDifference", "DivNoNan"}) { ShapeInferenceTestOp op(op_name); AddNodeAttr("incompatible_shape_error", true, &op.node_def); test_shapes(op, true); if ((op_name == "Equal") || (op_name == "NotEqual")) { ShapeInferenceTestOp op(op_name); AddNodeAttr("incompatible_shape_error", false, &op.node_def); test_shapes(op, false); } } } TEST(MathOpsTest, Select_ShapeFn) { ShapeInferenceTestOp op("Select"); INFER_OK(op, "?;?;?", "in1|in2"); INFER_OK(op, "[];[1];?", "in1"); INFER_OK(op, "[];?;?", "in1|in2"); INFER_OK(op, "[1];?;?", "in1|in2"); INFER_OK(op, "[1,2];?;?", "in1|in2?"); INFER_OK(op, "?;[];?", "in1"); INFER_OK(op, "?;?;[]", "in2"); INFER_OK(op, "?;[1];?", "in1"); INFER_OK(op, "?;?;[1]", "in2"); INFER_OK(op, "?;[1,2];?", "in1"); INFER_OK(op, "?;?;[1,2]", "in2"); INFER_ERROR("Shapes must be equal rank, but are 0 and 1", op, "[1];[];?"); INFER_ERROR("Shapes must be equal rank, but are 1 and 2", op, "[];[1];[1,2]"); INFER_ERROR("Shapes must be equal rank, but are 1 and 2", op, "[1,2];[1];?"); INFER_OK(op, "[2];[?];[?]", "in1|in2"); INFER_OK(op, "[?];[?,?,3];[1,2,?]", "[d2_0,d2_1,d1_2]"); INFER_OK(op, "[2];[?,?,3];[?,2,?]", "[d1_0|d2_0,d2_1,d1_2]"); INFER_ERROR("must be equal", op, "[1];[2,?,3];[?,2,?]"); INFER_ERROR("Shapes must be equal rank, but are 3 and 2", op, "[2,?];[?,?,3];[?,2,?]"); INFER_OK(op, "[2,?,?];[?,?,3];[?,2,?]", "[d0_0,d2_1,d1_2]"); INFER_ERROR("Dimension 2 in both shapes must be equal, but are 3 and 5", op, "[2,?,5];[?,?,3];[?,2,?]"); const OpRegistrationData* op_reg_data; TF_ASSERT_OK(OpRegistry::Global()->LookUp(op.name, &op_reg_data)); typedef std::vector<std::pair<PartialTensorShape, DataType>> ShapeDtypeV; std::vector<std::unique_ptr<ShapeDtypeV>> handle_data; std::unique_ptr<shape_inference::InferenceContext> c; auto run_inference_for_handles = [&]() -> Status { CHECK(op_reg_data->shape_inference_fn != nullptr); c.reset(new shape_inference::InferenceContext( TF_GRAPH_DEF_VERSION, op.node_def, op_reg_data->op_def, {PartialTensorShape(), PartialTensorShape(), PartialTensorShape()}, {}, {}, handle_data)); TF_CHECK_OK(c->construction_status()); Status s = c->Run(op_reg_data->shape_inference_fn); LOG(INFO) << "Inference got " << s; return s; }; auto shape_proto = [](std::initializer_list<int64_t> dim_sizes) { TensorShapeProto p; for (auto i : dim_sizes) p.add_dim()->set_size(i); return p; }; auto i0 = PartialTensorShape({1, -1}); auto i1 = PartialTensorShape({-1, 2}); PartialTensorShape unknown_shape; auto scalar = PartialTensorShape({}); handle_data.emplace_back( new ShapeDtypeV{{scalar, DT_FLOAT}, {unknown_shape, DT_INT32}}); handle_data.emplace_back(new ShapeDtypeV{{i0, DT_FLOAT}, {i1, DT_INT32}}); handle_data.emplace_back( new ShapeDtypeV{{i1, DT_FLOAT}, {unknown_shape, DT_INT32}}); TF_ASSERT_OK(run_inference_for_handles()); auto* out = c->output_handle_shapes_and_types(0); ASSERT_EQ(2, out->size()); EXPECT_EQ("[1,2]", c->DebugString(out->at(0).shape)); EXPECT_EQ(DT_FLOAT, out->at(0).dtype); EXPECT_EQ("[?,2]", c->DebugString(out->at(1).shape)); EXPECT_EQ(DT_INT32, out->at(1).dtype); handle_data[2]->at(0).first = shape_proto({2, 2}); EXPECT_TRUE(absl::StrContains(run_inference_for_handles().message(), "must be equal, but are 1 and 2")); handle_data[2]->at(0).first = i1; handle_data[2]->at(1).second = DT_INT64; EXPECT_TRUE(absl::StrContains(run_inference_for_handles().message(), "pointing to different dtypes")); handle_data[2]->at(1).second = DT_INT32; handle_data[2]->push_back({i1, DT_FLOAT}); EXPECT_TRUE(absl::StrContains(run_inference_for_handles().message(), "pointing to different numbers of tensors")); handle_data[2]->pop_back(); } TEST(MathOpsTest, Range_ShapeFn) { ShapeInferenceTestOp op("Range"); TF_ASSERT_OK(NodeDefBuilder("test", "Range") .Input({"start", {}, DT_INT32}) .Input({"limit", {}, DT_INT32}) .Input({"delta", {}, DT_INT32}) .Attr("Tidx", DT_INT32) .Finalize(&op.node_def)); op.input_tensors.resize(3); INFER_OK(op, "?;?;?", "[?]"); INFER_ERROR("Shape must be rank 0 but is rank 2", op, "[1,2];?;?"); INFER_ERROR("for 'start'", op, "[1,2];?;?"); INFER_ERROR("Shape must be rank 0 but is rank 2", op, "?;[1,2];?"); INFER_ERROR("for 'limit'", op, "?;[1,2];?"); INFER_ERROR("Shape must be rank 0 but is rank 2", op, "?;?;[1,2]"); INFER_ERROR("for 'delta'", op, "?;?;[1,2]"); Tensor start_t = test::AsScalar(1); op.input_tensors[0] = &start_t; INFER_OK(op, "?;?;?", "[?]"); Tensor limit_t = test::AsScalar(1); op.input_tensors[1] = &limit_t; INFER_OK(op, "?;?;?", "[?]"); Tensor delta_t = test::AsScalar(1); op.input_tensors[2] = &delta_t; INFER_OK(op, "?;?;?", "[0]"); delta_t = test::AsScalar(0); INFER_ERROR("Requires delta != 0", op, "?;?;?"); delta_t = test::AsScalar(3); limit_t = test::AsScalar(-1); INFER_ERROR("Requires start <= limit when delta > 0: 1/-1", op, "?;?;?"); delta_t = test::AsScalar(-1); INFER_OK(op, "?;?;?", "[2]"); limit_t = test::AsScalar(4); INFER_ERROR("Requires start >= limit when delta < 0: 1/4", op, "?;?;?"); limit_t = test::AsScalar(100); start_t = test::AsScalar(2); delta_t = test::AsScalar(3); INFER_OK(op, "?;?;?", "[33]"); } TEST(MathOpsTest, LinSpace_ShapeFn) { ShapeInferenceTestOp op("LinSpace"); op.input_tensors.resize(3); INFER_OK(op, "?;?;?", "[?]"); INFER_ERROR("Shape must be rank 0 but is rank 2", op, "[1,2];?;?"); INFER_ERROR("for 'start'", op, "[1,2];?;?"); INFER_ERROR("Shape must be rank 0 but is rank 2", op, "?;[1,2];?"); INFER_ERROR("for 'stop'", op, "?;[1,2];?"); INFER_ERROR("Shape must be rank 0 but is rank 2", op, "?;?;[1,2]"); INFER_ERROR("for 'num'", op, "?;?;[1,2]"); Tensor num_t = test::AsScalar(1); op.input_tensors[2] = &num_t; INFER_OK(op, "?;?;?", "[1]"); num_t = test::AsScalar(2); INFER_OK(op, "?;?;?", "[2]"); num_t = test::AsScalar(-1); INFER_ERROR("Requires num > 0: -1", op, "?;?;?"); } TEST(MathOpsTest, UnsortedSegmentSum_ShapeFn) { ShapeInferenceTestOp op("UnsortedSegmentSum"); op.input_tensors.resize(3); INFER_OK(op, "?;?;?", "?"); INFER_OK(op, "?;[?];?", "?"); INFER_ERROR("Shape must be rank 0 but is rank 2", op, "?;?;[1,2]"); INFER_ERROR("Dimensions must be equal, but are 2 and 3", op, "[1,?,2];[1,?,3];?"); INFER_OK(op, "?;[3];?", "?"); INFER_ERROR("Shape must be at least rank 3 but is rank 2", op, "[1,2];[1,2,3];?"); Tensor num_segments_t = test::AsScalar(100); op.input_tensors[2] = &num_segments_t; INFER_OK(op, "[?,2,3,?,5];[1,2,?];[]", "[100,d0_3,d0_4]"); num_segments_t = test::AsScalar(-1); INFER_ERROR(("Dimension size, given by scalar input 2, must be " "non-negative but is -1"), op, "[3];[3];?"); } TEST(MathOpsTest, SparseSegment_ShapeFn) { ShapeInferenceTestOp op("SparseSegmentSum"); op.input_tensors.resize(3); INFER_OK(op, "?;?;?", "?"); INFER_OK(op, "[2,4,3];[3];[3]", "[?,d0_1,d0_2]"); INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[2,4,3];[];[3]"); INFER_ERROR("Shape must be rank 1 but is rank 2", op, "[2,4,3];[3];[3,4]"); INFER_ERROR("Dimension 0 in both shapes must be equal, but are 3 and 4", op, "[2,4,3];[3];[4]"); } TEST(MathOpsTest, SparseSegmentGrad_ShapeFn) { ShapeInferenceTestOp op("SparseSegmentMeanGrad"); op.input_tensors.resize(4); INFER_OK(op, "?;?;?;?", "?"); INFER_OK(op, "[2,4,3];[3];[3];[]", "[?,d0_1,d0_2]"); Tensor num_segments_t = test::AsScalar(100); op.input_tensors[3] = &num_segments_t; INFER_OK(op, "[2,4,3];[3];[3];[]", "[100,d0_1,d0_2]"); INFER_ERROR("Shape must be rank 0 but is rank 2", op, "[2,4,3];[3];[3];[1,1]"); num_segments_t = test::AsScalar(-100); op.input_tensors[3] = &num_segments_t; INFER_ERROR("Cannot specify a negative value", op, "[2,4,3];[3];[3];[]"); } TEST(MathOpsTest, BatchMatMul_ShapeFn) { ShapeInferenceTestOp op("BatchMatMul"); auto set_adj = [&op](bool adj_x, bool adj_y) { TF_ASSERT_OK(NodeDefBuilder("test", "BatchMatMul") .Input({"a", 0, DT_FLOAT}) .Input({"b", 0, DT_FLOAT}) .Attr("adj_x", adj_x) .Attr("adj_y", adj_y) .Finalize(&op.node_def)); }; set_adj(false, false); INFER_ERROR("at least rank 2", op, "[1];?"); INFER_ERROR("at least rank 2", op, "?;[2]"); INFER_OK(op, "?;?", "?"); INFER_OK(op, "[?,?];[?,?]", "[d0_0,d1_1]"); INFER_OK(op, "[?,?,?,?];?", "[d0_0,d0_1,d0_2,?]"); set_adj(false, false); INFER_OK(op, "[1,2,3,4];[1,2,?,?]", "[d0_0,d0_1,d0_2,d1_3]"); INFER_ERROR("are 2 and 3", op, "[?,1,2];[?,3,1]"); set_adj(true, false); INFER_OK(op, "[1,2,3,4];[1,2,?,?]", "[d0_0,d0_1,d0_3,d1_3]"); INFER_ERROR("are 2 and 3", op, "[?,2,1];[?,3,1]"); set_adj(false, true); INFER_OK(op, "[1,2,?,?];[1,2,3,4]", "[d0_0,d0_1,d0_2,d1_2]"); INFER_ERROR("are 2 and 3", op, "[?,1,2];[?,1,3]"); set_adj(true, true); INFER_OK(op, "[1,2,?,?];[1,2,3,4]", "[d0_0,d0_1,d0_3,d1_2]"); INFER_ERROR("are 2 and 3", op, "[?,2,1];[?,1,3]"); } TEST(MathOpsTest, ArgOps_ShapeFn) { ShapeInferenceTestOp op("ArgMax"); op.input_tensors.resize(2); INFER_OK(op, "?;?", "?"); INFER_OK(op, "[2];?", "[]"); INFER_OK(op, "[];?", "[]"); INFER_ERROR("must be rank 0", op, "[2];[1]"); INFER_OK(op, "[2,3,4];?", "[?,?]"); INFER_OK(op, "[2,3,4,5,6];?", "[?,?,?,?]"); Tensor dimension = test::AsScalar(0); op.input_tensors[1] = &dimension; INFER_OK(op, "[2,3,4];[]", "[d0_1,d0_2]"); dimension = test::AsScalar(1); op.input_tensors[1] = &dimension; INFER_OK(op, "[2,3,4];[]", "[d0_0,d0_2]"); dimension = test::AsScalar(2); op.input_tensors[1] = &dimension; INFER_OK(op, "[2,3,4];[]", "[d0_0,d0_1]"); dimension = test::AsScalar(10); op.input_tensors[1] = &dimension; INFER_ERROR("must be in the range [-3, 3)", op, "[2,3,4];[]"); dimension = test::AsScalar(-10); op.input_tensors[1] = &dimension; INFER_ERROR("must be in the range [-3, 3)", op, "[2,3,4];[]"); dimension = test::AsScalar(-1); op.input_tensors[1] = &dimension; INFER_OK(op, "[2,3,4];[]", "[d0_0,d0_1]"); } TEST(MathOpsTest, Betainc_ShapeFn) { ShapeInferenceTestOp op("Betainc"); INFER_OK(op, "?;?;?", "?"); INFER_OK(op, "[?,?];?;?", "in0"); INFER_OK(op, "[?,2];?;[1,?]", "[d2_0,d0_1]"); INFER_OK(op, "[?,2,?];[1,?,?];[?,?,3]", "[d1_0,d0_1,d2_2]"); INFER_OK(op, "[?,2,?];[];[?,?,3]", "[d0_0|d2_0,d0_1,d2_2]"); INFER_OK(op, "[];[];[?,?,3]", "in2"); INFER_OK(op, "[];[];?", "in2"); INFER_OK(op, "[];[];[1,2,3,4]", "in2"); INFER_OK(op, "[];[];[]", "in0"); INFER_ERROR("must be equal", op, "[1,2];[];[1,4]"); INFER_ERROR("must be equal", op, "[1,2];[];[1,2,3]"); } TEST(MathOpsTest, Requantize_ShapeFn) { ShapeInferenceTestOp op("Requantize"); INFER_OK(op, "?;?;?;?;?", "in0;[];[]"); INFER_OK(op, "?;[];[];[];[]", "in0;[];[]"); INFER_ERROR("must be rank 0", op, "?;[1];?;?;?"); INFER_ERROR("must be rank 0", op, "?;?;[2];?;?"); INFER_ERROR("must be rank 0", op, "?;?;?;[3];?"); INFER_ERROR("must be rank 0", op, "?;?;?;?;[4]"); } TEST(MathOpstest, RequantizationRange_ShapeFn) { ShapeInferenceTestOp op("RequantizationRange"); INFER_OK(op, "?;?;?", "[];[]"); INFER_OK(op, "?;[];[]", "[];[]"); INFER_ERROR("must be rank 0", op, "?;[1];?"); INFER_ERROR("must be rank 0", op, "?;?;[2]"); } TEST(MathOpsTest, Cross_ShapeFn) { ShapeInferenceTestOp op("Cross"); INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "[];[]"); INFER_ERROR("Dimension 0 in both shapes must be equal, but", op, "[3];[5]"); INFER_ERROR("Dimension must be 3 but", op, "[3,5];[3,5]"); INFER_OK(op, "?;?", "in0"); INFER_OK(op, "[?];[?]", "in0"); INFER_OK(op, "[1,?,3];[?,?,?]", "in0"); } TEST(MathOpsTest, HistogramFixedWidth_ShapeFn) { ShapeInferenceTestOp op("HistogramFixedWidth"); INFER_ERROR("Shape must be rank 1 but is rank 0", op, "[];[];[]"); INFER_ERROR("Dimension must be 2 but is 3", op, "[];[3];[]"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "[];[2];[2]"); INFER_OK(op, "?;?;?", "[?]"); INFER_OK(op, "[?];[2];[]", "[?]"); INFER_OK(op, "[?];[2];?", "[?]"); } TEST(MathOpsTest, QuantizedAdd_ShapeFn) { ShapeInferenceTestOp op("QuantizedAdd"); INFER_OK(op, "?;?;?;?;?;?", "?;[];[]"); INFER_OK(op, "?;?;[];[];[];[]", "?;[];[]"); INFER_OK(op, "[1,2];?;[];[];[];[]", "?;[];[]"); INFER_OK(op, "[];[2];[];[];[];[]", "[d1_0];[];[]"); INFER_ERROR("must be rank 0", op, "?;?;[1];?;?;?"); INFER_ERROR("must be rank 0", op, "?;?;?;[2];?;?"); INFER_ERROR("must be rank 0", op, "?;?;?;?;[3];?"); INFER_ERROR("must be rank 0", op, "?;?;?;?;?;[4]"); } TEST(MathOpsTest, Bincount_ShapeFn) { ShapeInferenceTestOp op("Bincount"); INFER_ERROR("Shape must be rank 0 but is rank 1", op, "?;[1];?"); INFER_OK(op, "?;?;?", "[?]"); INFER_OK(op, "?;[];?", "[?]"); INFER_OK(op, "[?];[];?", "[?]"); INFER_OK(op, "[?];[];[?]", "[?]"); } TEST(MathOpsTest, SobolSample) { ShapeInferenceTestOp op("SobolSample"); INFER_ERROR("must be rank 0", op, "[1];?;?"); INFER_ERROR("must be rank 0", op, "?;[1];?"); INFER_ERROR("must be rank 0", op, "?;?;[1]"); INFER_OK(op, "[];[];[]", "[?,?]"); } TEST(MathOpsTest, EqualOp) { ShapeInferenceTestOp op("Equal"); AddNodeAttr("incompatible_shape_error", true, &op.node_def); INFER_OK(op, "?;?", "?"); INFER_OK(op, "[1,2];?", "?"); INFER_OK(op, "?;[1,2]", "?"); INFER_OK(op, "[1,2,3];[1]", "[d0_0,d0_1,d0_2]"); INFER_OK(op, "[?,2,1];[1,3]", "[d0_0,d0_1,d1_1]"); INFER_OK(op, "[1,?,3];[3,1]", "[d0_0,d1_0,d0_2]"); INFER_OK(op, "[1,2,3];[2,1,3]", "[d1_0,d0_1,d0_2]"); INFER_OK(op, "[?,10,1];[?,1,4]", "[?,d0_1,d1_2]"); INFER_OK(op, "[10,?,1];[1,?,4]", "[d0_0,?,d1_2]"); } }
1,061
cpp
tensorflow/tensorflow
nn_ops
tensorflow/core/ops/nn_ops.cc
tensorflow/core/kernels/nn_ops_test.cc
#ifndef TENSORFLOW_C_EXPERIMENTAL_OPS_NN_OPS_H_ #define TENSORFLOW_C_EXPERIMENTAL_OPS_NN_OPS_H_ #include "tensorflow/c/eager/abstract_context.h" #include "tensorflow/c/eager/abstract_tensor_handle.h" namespace tensorflow { namespace ops { Status SparseSoftmaxCrossEntropyWithLogits( AbstractContext* ctx, AbstractTensorHandle* const features, AbstractTensorHandle* const labels, AbstractTensorHandle** loss, AbstractTensorHandle** backprop, const char* name = nullptr, const char* raw_device_name = nullptr); Status ReluGrad(AbstractContext* ctx, AbstractTensorHandle* const gradients, AbstractTensorHandle* const features, AbstractTensorHandle** backprops, const char* name = nullptr, const char* raw_device_name = nullptr); Status Relu(AbstractContext* ctx, AbstractTensorHandle* const features, AbstractTensorHandle** activations, const char* name = nullptr, const char* raw_device_name = nullptr); Status BiasAdd(AbstractContext* ctx, AbstractTensorHandle* const value, AbstractTensorHandle* const bias, AbstractTensorHandle** output, const char* data_format = "NHWC", const char* name = nullptr, const char* raw_device_name = nullptr); Status BiasAddGrad(AbstractContext* ctx, AbstractTensorHandle* const out_backprop, AbstractTensorHandle** output, const char* data_format = "NHWC", const char* name = nullptr, const char* raw_device_name = nullptr); } } #endif #include <algorithm> #include <cmath> #include <vector> #include "tensorflow/core/framework/common_shape_fns.h" #include "tensorflow/core/framework/kernel_shape_util.h" #include "tensorflow/core/framework/numeric_op.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/shape_inference.h" #include "tensorflow/core/lib/core/bits.h" #include "tensorflow/core/lib/math/math_util.h" #include "tensorflow/core/util/mirror_pad_mode.h" #include "tensorflow/core/util/padding.h" #include "tensorflow/core/util/tensor_format.h" namespace tensorflow { using shape_inference::DimensionHandle; using shape_inference::InferenceContext; using shape_inference::ShapeHandle; namespace { Status FractionalPoolShapeFn(InferenceContext* c) { ShapeHandle input; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 4, &input)); std::vector<float> pooling_ratio; TF_RETURN_IF_ERROR(c->GetAttr("pooling_ratio", &pooling_ratio)); if (pooling_ratio.size() != 4) { return errors::InvalidArgument( "pooling_ratio field must specify 4 dimensions"); } std::vector<DimensionHandle> output_dims; for (int i = 0; i < 4; ++i) { DimensionHandle d = c->Dim(input, i); if (c->ValueKnown(d)) { auto val = static_cast<int64_t>(std::floor(c->Value(d) / pooling_ratio[i])); if (val < 0) { return errors::InvalidArgument("Size computed for dim ", i, " is negative: ", val); } output_dims.push_back(c->MakeDim(val)); } else { output_dims.push_back(c->UnknownDim()); } } for (std::size_t i = 0; i < pooling_ratio.size(); ++i) { if (pooling_ratio[i] < 1) { return errors::InvalidArgument( "pooling_ratio cannot be smaller than 1, got: ", pooling_ratio[i]); } } c->set_output(0, c->MakeShape(output_dims)); c->set_output(1, c->Vector(output_dims[1])); c->set_output(2, c->Vector(output_dims[2])); return absl::OkStatus(); } } REGISTER_OP("AvgPool") .Input("value: T") .Output("output: T") .Attr("ksize: list(int) >= 4") .Attr("strides: list(int) >= 4") .Attr(GetPaddingAttrString()) .Attr(GetConvnetDataFormatAttrString()) .Attr("T: {half, bfloat16, float, double}") .SetShapeFn(shape_inference::AvgPoolShape); REGISTER_OP("AvgPoolGrad") .Input("orig_input_shape: int32") .Input("grad: T") .Output("output: T") .Attr("ksize: list(int) >= 4") .Attr("strides: list(int) >= 4") .Attr(GetPaddingAttrString()) .Attr(GetConvnetDataFormatAttrString()) .Attr("T: {half, bfloat16, float, double}") .SetShapeFn(shape_inference::AvgPoolGradShape); REGISTER_OP("BatchNormWithGlobalNormalization") .Input("t: T") .Input("m: T") .Input("v: T") .Input("beta: T") .Input("gamma: T") .Output("result: T") .Attr("T: numbertype") .Attr("variance_epsilon: float") .Attr("scale_after_normalization: bool") .Deprecated(9, "Use tf.nn.batch_normalization()") .SetShapeFn([](InferenceContext* c) { ShapeHandle input; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 4, &input)); DimensionHandle last_dim = c->Dim(input, 3); for (int i = 1; i < 5; ++i) { ShapeHandle vec; TF_RETURN_IF_ERROR(c->WithRank(c->input(i), 1, &vec)); TF_RETURN_IF_ERROR(c->Merge(last_dim, c->Dim(vec, 0), &last_dim)); } ShapeHandle out; TF_RETURN_IF_ERROR(c->ReplaceDim(input, 3, last_dim, &out)); c->set_output(0, out); return absl::OkStatus(); }); REGISTER_OP("BatchNormWithGlobalNormalizationGrad") .Input("t: T") .Input("m: T") .Input("v: T") .Input("gamma: T") .Input("backprop: T") .Output("dx: T") .Output("dm: T") .Output("dv: T") .Output("db: T") .Output("dg: T") .Attr("T: numbertype") .Attr("variance_epsilon: float") .Attr("scale_after_normalization: bool") .Deprecated(9, "Use tf.nn.batch_normalization()") .SetShapeFn([](InferenceContext* c) { ShapeHandle input; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 4, &input)); TF_RETURN_IF_ERROR( c->Merge(input, c->input(4), &input)); DimensionHandle last_dim = c->Dim(input, 3); for (int i = 1; i < 4; ++i) { ShapeHandle vec; TF_RETURN_IF_ERROR(c->WithRank(c->input(i), 1, &vec)); TF_RETURN_IF_ERROR(c->Merge(last_dim, c->Dim(vec, 0), &last_dim)); } ShapeHandle dx; TF_RETURN_IF_ERROR(c->ReplaceDim(input, 3, last_dim, &dx)); c->set_output(0, dx); ShapeHandle vector_shape = c->Vector(last_dim); c->set_output(1, vector_shape); c->set_output(2, vector_shape); c->set_output(3, vector_shape); c->set_output(4, vector_shape); return absl::OkStatus(); }); REGISTER_OP("FusedBatchNorm") .Input("x: T") .Input("scale: T") .Input("offset: T") .Input("mean: T") .Input("variance: T") .Output("y: T") .Output("batch_mean: T") .Output("batch_variance: T") .Output("reserve_space_1: T") .Output("reserve_space_2: T") .Attr("T: {float}") .Attr("epsilon: float = 0.0001") .Attr("exponential_avg_factor: float = 1.0") .Attr(GetConvnetDataFormatAttrString()) .Attr("is_training: bool = true") .SetShapeFn(shape_inference::FusedBatchNormShape); REGISTER_OP("FusedBatchNormV2") .Input("x: T") .Input("scale: U") .Input("offset: U") .Input("mean: U") .Input("variance: U") .Output("y: T") .Output("batch_mean: U") .Output("batch_variance: U") .Output("reserve_space_1: U") .Output("reserve_space_2: U") .Attr("T: {half, bfloat16, float}") .Attr("U: {float}") .Attr("epsilon: float = 0.0001") .Attr("exponential_avg_factor: float = 1.0") .Attr(GetConvnetDataFormatAttrString()) .Attr("is_training: bool = true") .SetShapeFn(shape_inference::FusedBatchNormShape); REGISTER_OP("FusedBatchNormV3") .Input("x: T") .Input("scale: U") .Input("offset: U") .Input("mean: U") .Input("variance: U") .Output("y: T") .Output("batch_mean: U") .Output("batch_variance: U") .Output("reserve_space_1: U") .Output("reserve_space_2: U") .Output("reserve_space_3: U") .Attr("T: {half, bfloat16, float}") .Attr("U: {bfloat16, float}") .Attr("epsilon: float = 0.0001") .Attr("exponential_avg_factor: float = 1.0") .Attr(GetConvnetDataFormat2D3DAttrString()) .Attr("is_training: bool = true") .SetShapeFn(shape_inference::FusedBatchNormV3Shape); REGISTER_OP("_FusedBatchNormEx") .Input("x: T") .Input("scale: U") .Input("offset: U") .Input("mean: U") .Input("variance: U") .Input("side_input: num_side_inputs * T") .Output("y: T") .Output("batch_mean: U") .Output("batch_variance: U") .Output("reserve_space_1: U") .Output("reserve_space_2: U") .Output("reserve_space_3: U") .Attr("T: {half, float, bfloat16}") .Attr("U: {float}") .Attr("epsilon: float = 0.0001") .Attr("exponential_avg_factor: float = 1.0") .Attr("num_side_inputs: int >= 0 = 0") .Attr("activation_mode: string = \"Identity\"") .Attr(GetConvnetDataFormatAttrString()) .Attr("is_training: bool = true") .SetShapeFn(shape_inference::FusedBatchNormExShape) .Doc(R"doc( Internal FusedBatchNorm operation: reserved for internal use. Do not invoke this operator directly in Python. A fusion optimization is expected to create these operators. )doc"); REGISTER_OP("FusedBatchNormGrad") .Input("y_backprop: T") .Input("x: T") .Input("scale: T") .Input("reserve_space_1: T") .Input("reserve_space_2: T") .Output("x_backprop: T") .Output("scale_backprop: T") .Output("offset_backprop: T") .Output("reserve_space_3: T") .Output("reserve_space_4: T") .Attr("T: {float}") .Attr("epsilon: float = 0.0001") .Attr(GetConvnetDataFormatAttrString()) .Attr("is_training: bool = true") .SetShapeFn(shape_inference::FusedBatchNormGradShape); REGISTER_OP("FusedBatchNormGradV2") .Input("y_backprop: T") .Input("x: T") .Input("scale: float") .Input("reserve_space_1: U") .Input("reserve_space_2: U") .Output("x_backprop: T") .Output("scale_backprop: U") .Output("offset_backprop: U") .Output("reserve_space_3: U") .Output("reserve_space_4: U") .Attr("T: {half, bfloat16, float}") .Attr("U: {float}") .Attr("epsilon: float = 0.0001") .Attr(GetConvnetDataFormatAttrString()) .Attr("is_training: bool = true") .SetShapeFn(shape_inference::FusedBatchNormGradShape); REGISTER_OP("FusedBatchNormGradV3") .Input("y_backprop: T") .Input("x: T") .Input("scale: float") .Input("reserve_space_1: U") .Input("reserve_space_2: U") .Input("reserve_space_3: U") .Output("x_backprop: T") .Output("scale_backprop: U") .Output("offset_backprop: U") .Output("reserve_space_4: U") .Output("reserve_space_5: U") .Attr("T: {half, bfloat16, float}") .Attr("U: {float}") .Attr("epsilon: float = 0.0001") .Attr(GetConvnetDataFormat2D3DAttrString()) .Attr("is_training: bool = true") .SetShapeFn(shape_inference::FusedBatchNormGradShape); REGISTER_OP("_FusedBatchNormGradEx") .Input("y_backprop: T") .Input("x: T") .Input("scale: float") .Input("reserve_space_1: U") .Input("reserve_space_2: U") .Input("reserve_space_3: U") .Input("offset: float") .Input("y: T") .Output("x_backprop: T") .Output("scale_backprop: U") .Output("offset_backprop: U") .Output("reserve_space_4: U") .Output("reserve_space_5: U") .Output("side_input_backprop: num_side_inputs * T") .Attr("T: {half, float}") .Attr("U: {float}") .Attr("epsilon: float = 0.0001") .Attr("num_side_inputs: int >= 0 = 0") .Attr("activation_mode: string = \"Identity\"") .Attr(GetConvnetDataFormat2D3DAttrString()) .Attr("is_training: bool = true") .SetShapeFn(shape_inference::FusedBatchNormGradExShape) .Doc(R"doc( Internal FusedBatchNormGrad operation: reserved for internal use. Do not invoke this operator directly in Python. A fusion optimization is expected to create these operators. )doc"); REGISTER_OP("BiasAdd") .Attr("T: numbertype") .Input("value: T") .Input("bias: T") .Attr(GetConvnetDataFormatAttrString()) .Output("output: T") .SetShapeFn(shape_inference::BiasAddShape); REGISTER_OP("BiasAddGrad") .Attr("T: numbertype") .Input("out_backprop: T") .Attr(GetConvnetDataFormatAttrString()) .Output("output: T") .SetShapeFn(shape_inference::BiasAddGradShape); REGISTER_OP("BiasAddV1") .Attr("T: numbertype") .Input("value: T") .Input("bias: T") .Output("output: T") .SetShapeFn(shape_inference::BiasAddShape); REGISTER_OP("Conv") .Input("input: T") .Input("filter: T") .Output("output: T") .Attr("T: {half, bfloat16, float, double, int32}") .Attr("strides: list(int)") .Attr(GetPaddingAttrStringWithExplicit()) .Attr(GetExplicitPaddingsAttrString()) .Attr( "data_format: { 'CHANNELS_FIRST', 'CHANNELS_LAST' } = 'CHANNELS_LAST' ") .Attr("dilations: list(int) = []") .Attr("batch_dims: int = 1") .Attr("groups: int = 1") .SetShapeFn(shape_inference::ConvShape); REGISTER_OP("Conv2D") .Input("input: T") .Input("filter: T") .Output("output: T") .Attr("T: {half, bfloat16, float, double, int32}") .Attr("strides: list(int)") .Attr("use_cudnn_on_gpu: bool = true") .Attr(GetPaddingAttrStringWithExplicit()) .Attr(GetExplicitPaddingsAttrString()) .Attr(GetConvnetDataFormatAttrString()) .Attr("dilations: list(int) = [1, 1, 1, 1]") .SetShapeFn(shape_inference::Conv2DShapeWithExplicitPadding); REGISTER_OP("Conv2DBackpropInput") .Input("input_sizes: int32") .Input("filter: T") .Input("out_backprop: T") .Output("output: T") .Attr("T: {half, bfloat16, float, double, int32}") .Attr("strides: list(int)") .Attr("use_cudnn_on_gpu: bool = true") .Attr(GetPaddingAttrStringWithExplicit()) .Attr(GetExplicitPaddingsAttrString()) .Attr(GetConvnetDataFormatAttrString()) .Attr("dilations: list(int) = [1, 1, 1, 1]") .SetShapeFn(shape_inference::Conv2DBackpropInputShape); REGISTER_OP("Conv2DBackpropInputV2") .Input("input: T") .Input("filter: T") .Input("out_backprop: T") .Output("output: T") .Attr("T: {half, bfloat16, float, double, int32}") .Attr("strides: list(int)") .Attr("use_cudnn_on_gpu: bool = true") .Attr(GetPaddingAttrStringWithExplicit()) .Attr(GetExplicitPaddingsAttrString()) .Attr(GetConvnetDataFormatAttrString()) .Attr("dilations: list(int) = [1, 1, 1, 1]") .SetShapeFn([](InferenceContext* c) { return UnchangedShapeWithRank(c, 4); }); REGISTER_OP("Conv2DBackpropFilter") .Input("input: T") .Input("filter_sizes: int32") .Input("out_backprop: T") .Output("output: T") .Attr("T: {half, bfloat16, float, double}") .Attr("strides: list(int)") .Attr("use_cudnn_on_gpu: bool = true") .Attr(GetPaddingAttrStringWithExplicit()) .Attr(GetExplicitPaddingsAttrString()) .Attr(GetConvnetDataFormatAttrString()) .Attr("dilations: list(int) = [1, 1, 1, 1]") .SetShapeFn([](InferenceContext* c) { ShapeHandle s; TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(1, &s)); TF_RETURN_IF_ERROR(c->WithRank(s, 4, &s)); c->set_output(0, s); return absl::OkStatus(); }); REGISTER_OP("Conv2DBackpropFilterV2") .Input("input: T") .Input("filter: T") .Input("out_backprop: T") .Output("output: T") .Attr("T: {half, bfloat16, float, double}") .Attr("strides: list(int)") .Attr("use_cudnn_on_gpu: bool = true") .Attr(GetPaddingAttrStringWithExplicit()) .Attr(GetExplicitPaddingsAttrString()) .Attr(GetConvnetDataFormatAttrString()) .Attr("dilations: list(int) = [1, 1, 1, 1]") .SetShapeFn([](InferenceContext* c) { ShapeHandle out; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 4, &out)); c->set_output(0, out); return absl::OkStatus(); }); REGISTER_OP("_FusedConv2D") .Input("input: T") .Input("filter: T") .Input("args: TArgs") .Input("host_args : num_host_args * float") .Output("output: T") .Attr("T: {half, float, double, int8, qint8}") .Attr("TArgs: list(type)") .Attr("num_args: int >= 0") .Attr("num_host_args: int >= 0 =0") .Attr("strides: list(int)") .Attr(GetPaddingAttrStringWithExplicit()) .Attr(GetExplicitPaddingsAttrString()) .Attr("data_format: { 'NHWC', 'NCHW', 'NCHW_VECT_C' } = 'NHWC'") .Attr("filter_format: {'HWIO', 'OIHW', 'OIHW_VECT_I'} = 'HWIO'") .Attr("dilations: list(int) = [1, 1, 1, 1]") .Attr("use_cudnn_on_gpu: bool = true") .Attr("fused_ops: list(string) = []") .Attr("epsilon: float = 0.0001") .Attr("leakyrelu_alpha: float = 0.2") .SetShapeFn(shape_inference::Conv2DShapeWithExplicitPadding) .Doc(R"doc( Performs a convolution followed by a specified series of operations. The inputs to the convolution are `input` and `filter`. The series of operations that follows is specified by the `fused_ops` attribute, which is a list of TF op names specified as strings (e.g. "Relu"). They are performed in order, where the (first) input to each op is the output of the preceding op. The first input and the output of each fused_op must be of type T. Currently supported fused_op combinations are: [X] and [X,A], where X is one of {"BiasAdd","FusedBatchNorm"} and A is one of {"Elu","Relu","Relu6"}. * The first input to op X is the Conv2D result, and the additional input(s) to X are specified by `args`. * If there is an op A specified, the output of op X is the input to op A, and op A produces the _FusedConv2D output. Otherwise, op X produces the _FusedConv2D output. *NOTE*: Do not invoke this operator directly in Python. Grappler is expected to create these operators. )doc"); namespace { Status CommonFusedConvCalculations(InferenceContext* c, bool has_resize) { ShapeHandle input; TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 4, &input)); ShapeHandle resized = input; int paddings_index = 1; int filter_index = 2; if (has_resize) { paddings_index = 2; filter_index = 3; ShapeHandle unused_size; TF_RETURN_IF_ERROR(c->Merge(c->input(1), c->Vector(2), &unused_size)); const Tensor* size = c->input_tensor(1); DimensionHandle new_height = c->UnknownDim(); DimensionHandle new_width = c->UnknownDim(); if (size != nullptr) { new_height = c->MakeDim(size->flat<int32>()(0)); new_width = c->MakeDim(size->flat<int32>()(1)); } TF_RETURN_IF_ERROR(c->ReplaceDim(resized, 1, new_height, &resized)); TF_RETURN_IF_ERROR(c->ReplaceDim(resized, 2, new_width, &resized)); } ShapeHandle paddings; TF_RETURN_IF_ERROR(c->WithRank(c->input(paddings_index), 2, &paddings)); TF_RETURN_IF_ERROR( c->WithRank(resized, c->Value(c->Dim(paddings, 0)), &resized)); TF_RETURN_IF_ERROR( c->Merge(paddings, c->Matrix(c->Rank(resized), 2), &paddings)); const Tensor* paddings_t = c->input_tensor(paddings_index); ShapeHandle padded; if (paddings_t != nullptr) { std::vector<DimensionHandle> output_dims; for (int i = 0; i < 4; ++i) { DimensionHandle dim = c->Dim(resized, i); int64_t p0 = static_cast<int64_t>(paddings_t->matrix<int32>()(i, 0)); int64_t p1 = static_cast<int64_t>(paddings_t->matrix<int32>()(i, 1)); if (p0 < 0 || p1 < 0) { return errors::InvalidArgument("Paddings must be non-negative"); } TF_RETURN_IF_ERROR(c->Add(dim, p0 + p1, &dim)); output_dims.push_back(dim); } padded = c->MakeShape(output_dims); } else { padded = c->UnknownShapeOfRank(4); } ShapeHandle filter; TF_RETURN_IF_ERROR(c->WithRank(c->input(filter_index), 4, &filter)); std::vector<int32> strides; TF_RETURN_IF_ERROR(c->GetAttr("strides", &strides)); if (strides.size() != 4) { return errors::InvalidArgument( "Operation requires the stride attribute to contain 4 values, but ", "got: ", strides.size()); } int32_t stride_rows = strides[1]; int32_t stride_cols = strides[2]; DimensionHandle batch_size_dim = c->Dim(padded, 0); DimensionHandle in_rows_dim = c->Dim(padded, 1); DimensionHandle in_cols_dim = c->Dim(padded, 2); DimensionHandle filter_rows_dim = c->Dim(filter, 0); DimensionHandle filter_cols_dim = c->Dim(filter, 1); DimensionHandle output_depth_dim = c->Dim(filter, 3); DimensionHandle unused; TF_RETURN_IF_ERROR(c->Merge(c->Dim(padded, 3), c->Dim(filter, 2), &unused)); Padding padding; TF_RETURN_IF_ERROR(c->GetAttr("padding", &padding)); DimensionHandle output_rows, output_cols; TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDims( c, in_rows_dim, filter_rows_dim, stride_rows, padding, &output_rows)); TF_RETURN_IF_ERROR(GetWindowedOutputSizeFromDims( c, in_cols_dim, filter_cols_dim, stride_cols, padding, &output_cols)); ShapeHandle output_shape = c->MakeShape( {batch_size_dim, output_rows, output_cols, output_depth_dim}); c->set_output(0, output_shape); return absl::OkStatus(); } } REGISTER_OP("DataFormatDimMap") .Input("x: T") .Output("y: T") .Attr("T: {int32, int64} = DT_INT32") .Attr("src_format: string = 'NHWC'") .Attr("dst_format: string = 'NCHW'") .SetShapeFn(shape_inference::UnchangedShape); REGISTER_OP("DataFormatVecPermute") .Input("x: T") .Output("y: T") .Attr("T: {int32, int64} = DT_INT32") .Attr("src_format: string = 'NHWC'") .Attr("dst_format: string = 'NCHW'") .SetShapeFn(shape_inference::UnchangedShape); REGISTER_OP("FusedResizeAndPadConv2D") .Input("input: T") .Input("size: int32") .Input("paddings: int32") .Input("filter: T") .Output("output: T") .Attr("T: {half, float, double}") .Attr("resize_align_corners: bool = false") .Attr(GetMirrorPadModeAttrString()) .Attr("strides: list(int)") .Attr(GetPaddingAttrString()) .SetShapeFn([](InferenceContext* c) { return CommonFusedConvCalculations(c, true); }); REGISTER_OP("FusedPadConv2D") .Input("input: T") .Input("paddings: int32") .Input("filter: T") .Output("output: T") .Attr("T: {half, bfloat16, float, double}") .Attr(GetMirrorPadModeAttrString()) .Attr("strides: list(int)") .Attr(GetPaddingAttrString()) .SetShapeFn([](InferenceContext* c) { return CommonFusedConvCalculations(c, false); }); REGISTER_OP("DepthwiseConv2dNative") .Input("input: T") .Input("filter: T") .Output("output: T") .Attr("T: {half, bfloat16, float, double}") .Attr("strides: list(int)") .Attr(GetPaddingAttrStringWithExplicit()) .Attr(GetExplicitPaddingsAttrString()) .Attr(GetConvnetDataFormatAttrString()) .Attr("dilations: list(int) = [1, 1, 1, 1]") .SetShapeFn(shape_inference::DepthwiseConv2DNativeShapeWithExplicitPadding); REGISTER_OP("DepthwiseConv2dNativeBackpropInput") .Input("input_sizes: int32") .Input("filter: T") .Input("out_backprop: T") .Output("output: T") .Attr("T: {half, bfloat16, float, double}") .Attr("strides: list(int)") .Attr(GetPaddingAttrStringWithExplicit()) .Attr(GetExplicitPaddingsAttrString()) .Attr(GetConvnetDataFormatAttrString()) .Attr("dilations: list(int) = [1, 1, 1, 1]") .SetShapeFn([](InferenceContext* c) { ShapeHandle s; TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(0, &s)); TF_RETURN_IF_ERROR(c->WithRank(s, 4, &s)); c->set_output(0, s); return absl::OkStatus(); }); REGISTER_OP("DepthwiseConv2dNativeBackpropFilter") .Input("input: T") .Input("filter_sizes: int32") .Input("out_backprop: T") .Output("output: T") .Attr("T: {half, bfloat16, float, double}") .Attr("strides: list(int)") .Attr(GetPaddingAttrStringWithExplicit()) .Attr(GetExplicitPaddingsAttrString()) .Attr(GetConvnetDataFormatAttrString()) .Attr("dilations: list(int) = [1, 1, 1, 1]") .SetShapeFn([](InferenceContext* c) { ShapeHandle s; TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(1, &s)); TF_RETURN_IF_ERROR(c->WithRank(s, 4, &s)); c->set_output(0, s); return absl::OkStatus(); }); REGISTER_OP("_FusedDepthwiseConv2dNative") .Input("input: T") .Input("filter: T") .Input("args: num_args * T") .Output("output: T") .Attr("T: {half, bfloat16, float, double}") .Attr("num_args: int >= 0") .Attr("strides: list(int)") .Attr(GetPaddingAttrString()) .Attr(GetConvnetDataFormatAttrString()) .Attr("dilations: list(int) = [1, 1, 1, 1]") .Attr("fused_ops: list(string) = []") .Attr("epsilon: float = 0.0001") .Attr("leakyrelu_alpha: float = 0.2") .SetShapeFn(shape_inference::DepthwiseConv2DNativeShape); REGISTER_OP("Conv3D") .Input("input: T") .Input("filter: T") .Output("output: T") .Attr("T: {half, bfloat16, float, double}") .Attr("strides: list(int) >= 5") .Attr(GetPaddingAttrString()) .Attr(GetConvnet3dDataFormatAttrString()) .Attr("dilations: list(int) = [1, 1, 1, 1, 1]") .SetShapeFn(shape_inference::Conv3DShape); REGISTER_OP("Conv3DBackpropInput") .Input("input: T") .Input("filter: T") .Input("out_backprop: T") .Output("output: T") .Attr("T: {half, float, double}") .Attr("strides: list(int) >= 5") .Attr(GetPaddingAttrString()) .Deprecated(10, "Use Conv3DBackpropInputV2") .Attr("dilations: list(int) = [1, 1, 1, 1, 1]") .SetShapeFn([](InferenceContext* c) { return UnchangedShapeWithRank(c, 5); }); REGISTER_OP("Conv3DBackpropFilter") .Input("input: T") .Input("filter: T") .Input("out_backprop: T") .Output("output: T") .Attr("T: {half, float, double}") .Attr("strides: list(int) >= 5") .Attr(GetPaddingAttrString()) .Deprecated(10, "Use Conv3DBackpropFilterV2") .Attr("dilations: list(int) = [1, 1, 1, 1, 1]") .SetShapeFn([](InferenceContext* c) { ShapeHandle out; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 5, &out)); c->set_output(0, out); return absl::OkStatus(); }); REGISTER_OP("Conv3DBackpropInputV2") .Input("input_sizes: Tshape") .Input("filter: T") .Input("out_backprop: T") .Output("output: T") .Attr("T: {half, bfloat16, float, double}") .Attr("strides: list(int) >= 5") .Attr(GetPaddingAttrString()) .Attr(GetConvnet3dDataFormatAttrString()) .Attr("dilations: list(int) = [1, 1, 1, 1, 1]") .Attr("Tshape: {int32, int64} = DT_INT32") .SetShapeFn([](InferenceContext* c) { ShapeHandle s; TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(0, &s)); TF_RETURN_IF_ERROR(c->WithRank(s, 5, &s)); c->set_output(0, s); return absl::OkStatus(); }); REGISTER_OP("Conv3DBackpropFilterV2") .Input("input: T") .Input("filter_sizes: int32") .Input("out_backprop: T") .Output("output: T") .Attr("T: {half, bfloat16, float, double}") .Attr("strides: list(int) >= 5") .Attr(GetPaddingAttrString()) .Attr(GetConvnet3dDataFormatAttrString()) .Attr("dilations: list(int) = [1, 1, 1, 1, 1]") .SetShapeFn([](InferenceContext* c) { ShapeHandle s; TF_RETURN_IF_ERROR(c->MakeShapeFromShapeTensor(1, &s)); TF_RETURN_IF_ERROR(c->WithRank(s, 5, &s)); c->set_output(0, s); return absl::OkStatus(); }); REGISTER_OP("AvgPool3D") .Input("input: T") .Output("output: T") .Attr("ksize: list(int) >= 5") .Attr("strides: list(int) >= 5") .Attr(GetPaddingAttrString()) .Attr(GetConvnet3dDataFormatAttrString()) .Attr("T: {half, bfloat16, float, double}") .SetShapeFn(shape_inference::Pool3DShape); REGISTER_OP("AvgPool3DGrad") .Input("orig_input_shape: int32") .Input("grad: T") .Output("output: T") .Attr("ksize: list(int) >= 5") .Attr("strides: list(int) >= 5") .Attr(GetPaddingAttrString()) .Attr(GetConvnet3dDataFormatAttrString()) .Attr("T: {half, bfloat16, float, double}") .SetShapeFn(shape_inference::AvgPool3DGradShape); REGISTER_OP("MaxPool3D") .Input("input: T") .Output("output: T") .Attr("ksize: list(int) >= 5") .Attr("strides: list(int) >= 5") .Attr(GetPaddingAttrString()) .Attr(GetConvnet3dDataFormatAttrString()) .Attr("T: {half, bfloat16, float}") .SetShapeFn(shape_inference::Pool3DShape); REGISTER_OP("MaxPool3DGrad") .Input("orig_input: TInput") .Input("orig_output: TInput") .Input("grad: T") .Output("output: T") .Attr("ksize: list(int) >= 5") .Attr("strides: list(int) >= 5") .Attr(GetPaddingAttrString()) .Attr(GetConvnet3dDataFormatAttrString()) .Attr("T: {half, bfloat16, float} = DT_FLOAT") .Attr("TInput: {half, bfloat16, float} = DT_FLOAT") .SetShapeFn(shape_inference::MaxPool3DGradShape); REGISTER_OP("MaxPool3DGradGrad") .Input("orig_input: T") .Input("orig_output: T") .Input("grad: T") .Output("output: T") .Attr("ksize: list(int) >= 5 ") .Attr("strides: list(int) >= 5") .Attr(GetPaddingAttrString()) .Attr(GetConvnet3dDataFormatAttrString()) .Attr("T: realnumbertype") .SetShapeFn([](InferenceContext* c) { TF_RETURN_IF
#include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/shape_inference_testutil.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { TEST(NNOpsTest, TopK_ShapeFn) { ShapeInferenceTestOp op("TopK"); auto set_k = [&op](int k) { TF_ASSERT_OK(NodeDefBuilder("test", "Pack") .Input({{"a", 0, DT_FLOAT}}) .Attr("k", k) .Finalize(&op.node_def)); }; set_k(20); INFER_OK(op, "?", "?;?"); INFER_OK(op, "[20]", "[20];[20]"); INFER_OK(op, "[21]", "[20];[20]"); INFER_OK(op, "[1,?,21]", "[d0_0,d0_1,20];[d0_0,d0_1,20]"); INFER_OK(op, "[1,?,21,?]", "[d0_0,d0_1,d0_2,20];[d0_0,d0_1,d0_2,20]"); INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "[]"); INFER_ERROR("input must have last dimension >= k = 20 but is 1", op, "[1]"); INFER_ERROR("input must have last dimension >= k = 20 but is 4", op, "[1,2,3,4]"); set_k(-1); INFER_ERROR("Need k >= 0, got -1", op, "[1,2,3,4]"); } TEST(NNOpsTest, TopKV2_ShapeFn) { ShapeInferenceTestOp op("TopKV2"); op.input_tensors.resize(2); Tensor k_t; op.input_tensors[1] = &k_t; k_t = test::AsScalar<int32>(20); INFER_OK(op, "?;[]", "?;?"); INFER_OK(op, "[20];[]", "[20];[20]"); INFER_OK(op, "[1,?,21];[]", "[d0_0,d0_1,20];[d0_0,d0_1,20]"); INFER_OK(op, "[1,?,21,?];[]", "[d0_0,d0_1,d0_2,20];[d0_0,d0_1,d0_2,20]"); INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "[];[]"); INFER_ERROR("input must have last dimension >= k = 20 but is 1", op, "[1];[]"); INFER_ERROR("input must have last dimension >= k = 20 but is 4", op, "[1,2,3,4];[]"); k_t = test::AsScalar<int32>(-1); INFER_ERROR( "Dimension size, given by scalar input 1, must be non-negative but is -1", op, "[1,2,3,4];[]"); } TEST(NNOpsTest, NthElement_ShapeFn) { ShapeInferenceTestOp op("NthElement"); op.input_tensors.resize(2); Tensor n_t; op.input_tensors[1] = &n_t; n_t = test::AsScalar<int32>(20); INFER_OK(op, "?;[]", "?"); INFER_OK(op, "[21];[]", "[]"); INFER_OK(op, "[2,?,?];[]", "[d0_0,d0_1]"); INFER_OK(op, "[?,3,?,21];[]", "[d0_0,d0_1,d0_2]"); INFER_ERROR("Shape must be at least rank 1 but is rank 0", op, "[];[]"); INFER_ERROR("Input must have last dimension > n = 20 but is 1", op, "[1];[]"); INFER_ERROR("Input must have last dimension > n = 20 but is 20", op, "[1,2,3,20];[]"); n_t = test::AsScalar<int32>(-1); INFER_ERROR( "Dimension size, given by scalar input 1, must be non-negative but is -1", op, "[1,2,3,4];[]"); } TEST(NNOpsTest, BatchNormWithGlobalNormalization_ShapeFn) { ShapeInferenceTestOp op("BatchNormWithGlobalNormalization"); INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?;?;?;?"); INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;[1,2,3];?;?;?"); INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;[1,2,3];?;?"); INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;?;[1,2,3];?"); INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;?;?;[1,2,3]"); INFER_OK(op, "?;?;?;?;?", "[?,?,?,?]"); INFER_OK(op, "?;[1];?;?;?", "[?,?,?,d1_0]"); INFER_OK(op, "?;?;[1];?;?", "[?,?,?,d2_0]"); INFER_OK(op, "?;?;?;[1];?", "[?,?,?,d3_0]"); INFER_OK(op, "?;?;?;?;[1]", "[?,?,?,d4_0]"); INFER_OK(op, "[1,2,3,4];[4];[4];[4];[4]", "[d0_0,d0_1,d0_2,d0_3|d1_0|d2_0|d3_0|d4_0]"); } TEST(NNOpsTest, QuantizedBatchNormWithGlobalNormalization_ShapeFn) { ShapeInferenceTestOp op("QuantizedBatchNormWithGlobalNormalization"); INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?;?;?;?;?;?;?;?;?;?;?;?;?;?"); INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;?;[1,2,3];?;?;?;?;?;?;?;?;?;?;?"); INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;?;?;?;?;[1,2,3];?;?;?;?;?;?;?;?"); INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;?;?;?;?;?;?;?;[1,2,3];?;?;?;?;?"); INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;?;?;?;?;?;?;?;?;?;?;[1,2,3];?;?"); INFER_OK(op, "?;[];[];?;[];[];?;[];[];?;[];[];?;[];[]", "[?,?,?,?];[];[]"); INFER_OK(op, "?;[];[];[1];[];[];?;[];[];?;[];[];?;[];[]", "[?,?,?,d3_0];[];[]"); INFER_OK(op, "?;[];[];?;[];[];[1];[];[];?;[];[];?;[];[]", "[?,?,?,d6_0];[];[]"); INFER_OK(op, "?;[];[];?;[];[];?;[];[];[1];[];[];?;[];[]", "[?,?,?,d9_0];[];[]"); INFER_OK(op, "?;[];[];?;[];[];?;[];[];?;[];[];[1];[];[]", "[?,?,?,d12_0];[];[]"); INFER_OK(op, "[1,2,3,4];[];[];[4];[];[];[4];[];[];[4];[];[];[4];[];[]", "[d0_0,d0_1,d0_2,d0_3|d3_0|d6_0|d9_0|d12_0];[];[]"); } TEST(NNOpsTest, BatchNormWithGlobalNormalizationGrad_ShapeFn) { ShapeInferenceTestOp op("BatchNormWithGlobalNormalizationGrad"); INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?;?;?;?"); INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;[1,2,3];?;?;?"); INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;[1,2,3];?;?"); INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;?;[1,2,3];?"); INFER_ERROR("Shapes must be equal rank, but are 4 and 3", op, "?;?;?;?;[1,2,3]"); INFER_OK(op, "?;?;?;?;?", "[?,?,?,?];[?];[?];[?];[?]"); INFER_OK(op, "?;[1];?;?;?", "[?,?,?,d1_0];[d1_0];[d1_0];[d1_0];[d1_0]"); INFER_OK(op, "?;?;[1];?;?", "[?,?,?,d2_0];[d2_0];[d2_0];[d2_0];[d2_0]"); INFER_OK(op, "?;?;?;[1];?", "[?,?,?,d3_0];[d3_0];[d3_0];[d3_0];[d3_0]"); INFER_OK(op, "[1,?,3,?];[?];[?];[?];[?,2,?,4]", "[d0_0,d4_1,d0_2,d4_3];[d4_3];[d4_3];[d4_3];[d4_3]"); } TEST(NNOpsTest, FusedBatchNorm_ShapeFn) { ShapeInferenceTestOp op("FusedBatchNorm"); auto set_op = [&op](bool is_training, float exponential_avg_factor, string data_format) { TF_ASSERT_OK(NodeDefBuilder("test", "FusedBatchNorm") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("data_format", data_format) .Attr("is_training", is_training) .Attr("exponential_avg_factor", exponential_avg_factor) .Finalize(&op.node_def)); }; set_op(true, 1.0, "NHWC"); INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?;?;?;?"); INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;[1,2,3];?;?;?"); INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;[1,2,3];?;?"); INFER_OK(op, "?;?;?;?;?", "[?,?,?,?];[?];[?];[?];[?]"); INFER_OK(op, "?;[1];?;?;?", "[?,?,?,d1_0];[d1_0];[d1_0];[d1_0];[d1_0]"); INFER_OK(op, "?;?;[1];?;?", "[?,?,?,d2_0];[d2_0];[d2_0];[d2_0];[d2_0]"); INFER_OK(op, "[1,2,3,4];[4];[4];?;?", "[d0_0,d0_1,d0_2,d0_3|d1_0|d2_0];" "[d0_3|d1_0|d2_0];[d0_3|d1_0|d2_0];" "[d0_3|d1_0|d2_0];[d0_3|d1_0|d2_0]"); set_op(true, 0.5, "NHWC"); INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?;?;?;?"); INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;[1,2,3];?;?;?"); INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;[1,2,3];?;?"); INFER_OK(op, "?;?;?;?;?", "[?,?,?,?];[?];[?];[?];[?]"); INFER_OK(op, "?;[1];?;?;?", "[?,?,?,d1_0];[d1_0];[d1_0];[d1_0];[d1_0]"); INFER_OK(op, "?;?;[1];?;?", "[?,?,?,d2_0];[d2_0];[d2_0];[d2_0];[d2_0]"); INFER_OK(op, "[1,2,3,4];[4];[4];?;?", "[d0_0,d0_1,d0_2,d0_3|d1_0|d2_0];" "[d0_3|d1_0|d2_0];[d0_3|d1_0|d2_0];" "[d0_3|d1_0|d2_0];[d0_3|d1_0|d2_0]"); set_op(true, 1.0, "NCHW"); INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?;?;?;?"); INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;[1,2,3];?;?;?"); INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;[1,2,3];?;?"); INFER_OK(op, "?;?;?;?;?", "[?,?,?,?];[?];[?];[?];[?]"); INFER_OK(op, "?;[1];?;?;?", "[?,d1_0,?,?];[d1_0];[d1_0];[d1_0];[d1_0]"); INFER_OK(op, "?;?;[1];?;?", "[?,d2_0,?,?];[d2_0];[d2_0];[d2_0];[d2_0]"); INFER_OK(op, "[1,4,2,3];[4];[4];?;?", "[d0_0,d0_1|d1_0|d2_0,d0_2,d0_3];" "[d0_1|d1_0|d2_0];[d0_1|d1_0|d2_0];" "[d0_1|d1_0|d2_0];[d0_1|d1_0|d2_0]"); set_op(false, 1.0, "NHWC"); INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?;?;?;?"); INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;[1,2,3];?;?;?"); INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;[1,2,3];?;?"); INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;?;[1,2,3];?"); INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;?;?;[1,2,3]"); INFER_OK(op, "?;?;?;?;?", "[?,?,?,?];[?];[?];[?];[?]"); INFER_OK(op, "?;[1];?;?;?", "[?,?,?,d1_0];[d1_0];[d1_0];[d1_0];[d1_0]"); INFER_OK(op, "?;?;[1];?;?", "[?,?,?,d2_0];[d2_0];[d2_0];[d2_0];[d2_0]"); INFER_OK(op, "?;?;?;[1];?", "[?,?,?,d3_0];[d3_0];[d3_0];[d3_0];[d3_0]"); INFER_OK(op, "?;?;?;?;[1]", "[?,?,?,d4_0];[d4_0];[d4_0];[d4_0];[d4_0]"); INFER_OK(op, "[1,2,3,4];[4];[4];[4];[4]", "[d0_0,d0_1,d0_2,d0_3|d1_0|d2_0|d3_0|d4_0];" "[d0_3|d1_0|d2_0|d3_0|d4_0];[d0_3|d1_0|d2_0|d3_0|d4_0];" "[d0_3|d1_0|d2_0|d3_0|d4_0];[d0_3|d1_0|d2_0|d3_0|d4_0]"); set_op(false, 1.0, "NCHW"); INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?;?;?;?"); INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;[1,2,3];?;?;?"); INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;[1,2,3];?;?"); INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;?;[1,2,3];?"); INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;?;?;[1,2,3]"); INFER_OK(op, "?;?;?;?;?", "[?,?,?,?];[?];[?];[?];[?]"); INFER_OK(op, "?;[1];?;?;?", "[?,d1_0,?,?];[d1_0];[d1_0];[d1_0];[d1_0]"); INFER_OK(op, "?;?;[1];?;?", "[?,d2_0,?,?];[d2_0];[d2_0];[d2_0];[d2_0]"); INFER_OK(op, "?;?;?;[1];?", "[?,d3_0,?,?];[d3_0];[d3_0];[d3_0];[d3_0]"); INFER_OK(op, "?;?;?;?;[1]", "[?,d4_0,?,?];[d4_0];[d4_0];[d4_0];[d4_0]"); INFER_OK(op, "[1,4,2,3];[4];[4];[4];[4]", "[d0_0,d0_1|d1_0|d2_0|d3_0|d4_0,d0_2,d0_3];" "[d0_1|d1_0|d2_0|d3_0|d4_0];[d0_1|d1_0|d2_0|d3_0|d4_0];" "[d0_1|d1_0|d2_0|d3_0|d4_0];[d0_1|d1_0|d2_0|d3_0|d4_0]"); } TEST(NNOpsTest, FusedBatchNormGrad_ShapeFn) { ShapeInferenceTestOp op("FusedBatchNormGrad"); auto set_op = [&op](string data_format) { TF_ASSERT_OK(NodeDefBuilder("test", "FusedBatchNormGrad") .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Input(FakeInput(DT_FLOAT)) .Attr("data_format", data_format) .Finalize(&op.node_def)); }; set_op("NCHW"); INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?;?;?;?"); INFER_ERROR("Shape must be rank 4 but is rank 3", op, "?;[1,2,3];?;?;?"); INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;[1,2,3];?;?"); INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;?;[1,2,3];?"); INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;?;?;[1,2,3]"); INFER_OK(op, "?;?;?;?;?", "[?,?,?,?];[?];[?];[0];[0]"); INFER_OK(op, "?;?;[1];?;?", "[?,d2_0,?,?];[d2_0];[d2_0];[0];[0]"); INFER_OK(op, "?;?;?;[1];?", "[?,d3_0,?,?];[d3_0];[d3_0];[0];[0]"); INFER_OK(op, "?;?;?;?;[1]", "[?,d4_0,?,?];[d4_0];[d4_0];[0];[0]"); INFER_OK(op, "[1,4,2,3];[1,4,2,3];[4];[4];[4]", "[d0_0,d0_1|d2_0|d3_0|d4_0,d0_2,d0_3];" "[d0_1|d2_0|d3_0|d4_0];[d0_1|d2_0|d3_0|d4_0];[0];[0]"); set_op("NHWC"); INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?;?;?;?"); INFER_ERROR("Shape must be rank 4 but is rank 3", op, "?;[1,2,3];?;?;?"); INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;[1,2,3];?;?"); INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;?;[1,2,3];?"); INFER_ERROR("Shape must be rank 1 but is rank 3", op, "?;?;?;?;[1,2,3]"); INFER_OK(op, "?;?;?;?;?", "[?,?,?,?];[?];[?];[0];[0]"); INFER_OK(op, "?;?;[1];?;?", "[?,?,?,d2_0];[d2_0];[d2_0];[0];[0]"); INFER_OK(op, "?;?;?;[1];?", "[?,?,?,d3_0];[d3_0];[d3_0];[0];[0]"); INFER_OK(op, "?;?;?;?;[1]", "[?,?,?,d4_0];[d4_0];[d4_0];[0];[0]"); INFER_OK(op, "[1,2,3,4];[1,2,3,4];[4];[4];[4]", "[d0_0,d0_1,d0_2,d0_3|d2_0|d3_0|d4_0];" "[d0_3|d2_0|d3_0|d4_0];[d0_3|d2_0|d3_0|d4_0];[0];[0]"); } TEST(NNOpsTest, Conv2DBackpropInput_ShapeFn) { ShapeInferenceTestOp op("Conv2DBackpropInput"); INFER_ERROR("input_sizes to contain 4 values or 2 values", op, "[3];[?,?,?,?];[?,?,?,?]"); INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[4];[?,?,?,?];[?,?,?]"); INFER_OK(op, "[4];[?,?,2,?];[1,?,?,?]", "[d2_0,?,?,?]"); INFER_OK(op, "[2];[?,?,2,?];[1,?,?,?]", "[d2_0,?,?,d1_2]"); } TEST(NNOpsTest, Conv3DBackpropInput_ShapeFn) { ShapeInferenceTestOp op("Conv3DBackpropInput"); INFER_ERROR("Shape must be rank 5 but is rank 3", op, "[1,2,3];?;?"); INFER_OK(op, "?;?;?", "[?,?,?,?,?]"); INFER_OK(op, "[?,?,?,?,?];?;?", "in0"); INFER_OK(op, "[?,2,?,4,?];?;?", "in0"); } TEST(NNOpsTest, Conv3DBackpropFilter_ShapeFn) { ShapeInferenceTestOp op("Conv3DBackpropFilter"); INFER_ERROR("Shape must be rank 5 but is rank 3", op, "?;[1,2,3];?"); INFER_OK(op, "?;?;?", "[?,?,?,?,?]"); INFER_OK(op, "?;[?,?,?,?,?];?", "in1"); INFER_OK(op, "?;[?,2,?,4,?];?", "in1"); } TEST(NNOpsTest, MaxPool3DGrad_ShapeFn) { ShapeInferenceTestOp op("MaxPool3DGrad"); INFER_ERROR("Shape must be rank 5 but is rank 3", op, "[1,2,3];?;?"); INFER_OK(op, "?;?;?", "[?,?,?,?,?]"); INFER_OK(op, "[?,?,?,?,?];?;?", "in0"); INFER_OK(op, "[?,2,?,4,?];?;?", "in0"); } TEST(NNOpsTest, LRNGrad_ShapeFn) { ShapeInferenceTestOp op("LRNGrad"); INFER_OK(op, "[1,?,?,4];[?,2,?,?];[?,?,3,?]", "[d0_0,d1_1,d2_2,d0_3]"); INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?;?"); INFER_ERROR("Shapes must be equal rank, but are 4 and 3", op, "?;[1,2,3];?"); INFER_ERROR("Shapes must be equal rank, but are 4 and 3", op, "?;?;[1,2,3]"); } TEST(NNOpsTest, MaxPoolGrad_ShapeFn) { for (const char* op_name : {"MaxPoolGrad", "MaxPoolGradWithArgmax"}) { ShapeInferenceTestOp op(op_name); INFER_ERROR("Shape must be rank 4 but is rank 3", op, "[1,2,3];?;?"); INFER_OK(op, "?;?;?", "[?,?,?,?]"); INFER_OK(op, "[?,?,?,?];?;?", "in0"); INFER_OK(op, "[?,2,?,4];?;?", "in0"); } } TEST(NNOpsTest, Dilation2DBackpropInput_ShapeFn) { ShapeInferenceTestOp op("Dilation2DBackpropInput"); INFER_OK(op, "?;?;?", "in0"); INFER_OK(op, "?;[?,?,?,?,?];?", "in0"); INFER_OK(op, "?;[?,2,?,4,?];?", "in0"); } TEST(NNOpsTest, Dilation2DBackpropFilter_ShapeFn) { ShapeInferenceTestOp op("Dilation2DBackpropFilter"); INFER_OK(op, "?;?;?", "in1"); INFER_OK(op, "?;[?,?,?,?,?];?", "in1"); INFER_OK(op, "?;[?,2,?,4,?];?", "in1"); } TEST(NNOpsTest, MergeBothInputs_ShapeFn) { for (const char* op_name : {"ReluGrad", "Relu6Grad", "EluGrad", "SeluGrad", "SoftplusGrad", "SoftsignGrad"}) { ShapeInferenceTestOp op(op_name); INFER_OK(op, "?;?", "in0|in1"); INFER_OK(op, "?;[1,?,3]", "in1"); INFER_OK(op, "[1,?,3];?", "in0"); INFER_OK(op, "[1,?];[?,2]", "[d0_0,d1_1]"); INFER_ERROR("Dimension 1 in both shapes must be equal, but are 3 and 2", op, "[1,3];[?,2]"); } } TEST(NNOpsTest, SoftmaxCrossEntropyWithLogits_ShapeFn) { ShapeInferenceTestOp op("SoftmaxCrossEntropyWithLogits"); INFER_OK(op, "?;?", "[?];[?,?]"); INFER_OK(op, "[?,?];[?,?]", "[d0_0|d1_0];in0|in1"); INFER_OK(op, "[1,2];[?,2]", "[d0_0];in0"); INFER_OK(op, "[1,?];[?,2]", "[d0_0];[d0_0,d0_1|d1_1]"); INFER_OK(op, "[?,2];[1,2]", "[d1_0];in1"); INFER_ERROR("Shape must be broadcasted with rank 2", op, "[1,2,3];?"); INFER_ERROR("Shape must be broadcasted with rank 2", op, "?;[1,2,3]"); INFER_OK(op, "[1,4];[2,4]", "[d1_0];[d1_0,d0_1|d1_1]"); INFER_OK(op, "[2,4];[2,1]", "[d0_0];[d0_0|d1_0,d0_1]"); INFER_OK(op, "[1,?];[2,4]", "[d1_0];[d1_0,d0_1|d1_1]"); INFER_OK(op, "[2,4];[?,1]", "[d0_0];[d0_0|d1_0,d0_1]"); } TEST(NNOpsTest, SparseSoftmaxCrossEntropyWithLogits_ShapeFn) { ShapeInferenceTestOp op("SparseSoftmaxCrossEntropyWithLogits"); INFER_OK(op, "?;?", "[?];[?,?]"); INFER_OK(op, "[?,?];[?]", "[d0_0|d1_0];[d0_0|d1_0,d0_1]"); INFER_OK(op, "[1,2];[1]", "[d0_0|d1_0];[d0_0|d1_0,d0_1]"); INFER_OK(op, "[?,2];[1]", "[d1_0];[d1_0,d0_1]"); INFER_ERROR("Dimensions must be equal, but are 1 and 2", op, "[1,?];[2]"); INFER_ERROR("Shape must be rank 2 but is rank 3", op, "[1,2,3];?"); INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;[1,2]"); } TEST(NNOpsTest, InTopK_ShapeFn) { ShapeInferenceTestOp op("InTopK"); INFER_OK(op, "?;?", "[?]"); INFER_OK(op, "[?,?];[?]", "[d0_0|d1_0]"); INFER_OK(op, "[1,2];[1]", "[d0_0|d1_0]"); INFER_OK(op, "[?,2];[1]", "[d1_0]"); INFER_ERROR("Dimensions must be equal, but are 1 and 2", op, "[1,?];[2]"); INFER_ERROR("Shape must be rank 2 but is rank 3", op, "[1,2,3];?"); INFER_ERROR("Shape must be rank 1 but is rank 2", op, "?;[1,2]"); } TEST(NNOpsTest, Dilation2DShapeTest) { ShapeInferenceTestOp op("Dilation2D"); auto set_op = [&op](const std::vector<int32>& strides, const std::vector<int32>& rates, const string& padding) { TF_ASSERT_OK(NodeDefBuilder("test", "Dilation2D") .Input("input", 0, DT_FLOAT) .Input("filter", 0, DT_FLOAT) .Attr("strides", strides) .Attr("rates", rates) .Attr("padding", padding) .Finalize(&op.node_def)); }; set_op({1, 1, 1, 1}, {1, 1, 1, 1}, "VALID"); INFER_OK(op, "[1,2,2,2];[1,1,2]", "[d0_0,2,2,d1_2]"); set_op({1, 1, 1, 1}, {1, 2, 2, 1}, "VALID"); INFER_OK(op, "[1,7,7,2];[2,2,2]", "[d0_0,5,5,d1_2]"); } TEST(NNOpsTest, FractionalPool_ShapeFn) { for (const char* op_name : {"FractionalAvgPool", "FractionalMaxPool"}) { ShapeInferenceTestOp op(op_name); auto set_op = [&op, op_name](const std::vector<float>& pooling_ratio) { TF_ASSERT_OK(NodeDefBuilder("test", op_name) .Input("input", 0, DT_FLOAT) .Attr("pooling_ratio", pooling_ratio) .Finalize(&op.node_def)); }; set_op(std::vector<float>{2.0f, 1, 1.5f, 4.0f}); INFER_ERROR("must be rank 4", op, "[?,?,?]"); INFER_OK(op, "?", "[?,?,?,?];[?];[?]"); INFER_OK(op, "[?,?,?,?]", "[?,?,?,?];[?];[?]"); INFER_OK(op, "[10,20,30,40]", "[5,20,20,10];[20];[20]"); INFER_OK(op, "[?,20,30,40]", "[?,20,20,10];[20];[20]"); INFER_OK(op, "[10,?,30,40]", "[5,?,20,10];[?];[20]"); INFER_OK(op, "[10,20,?,40]", "[5,20,?,10];[20];[?]"); INFER_OK(op, "[10,20,30,?]", "[5,20,20,?];[20];[20]"); set_op(std::vector<float>{.5, 1.0, 1.5}); INFER_ERROR("pooling_ratio field", op, "?"); set_op(std::vector<float>{1, 2, 3, 4, 5}); INFER_ERROR("pooling_ratio field", op, "?"); set_op(std::vector<float>{-1, 2, 3, 4}); INFER_ERROR("is negative", op, "[1,2,3,4]"); } } TEST(NNOpsTest, FractionalMaxPoolGrad) { ShapeInferenceTestOp op("FractionalMaxPoolGrad"); INFER_ERROR("must be rank 4", op, "[?,?,?];?;?;?;?"); INFER_OK(op, "?;?;?;?;?", "[?,?,?,?]"); INFER_OK(op, "[?,?,3,4];?;?;?;?", "in0"); } TEST(NNOpsTest, FractionalAvgPoolGrad) { ShapeInferenceTestOp op("FractionalAvgPoolGrad"); op.input_tensors.resize(1); INFER_OK(op, "?;?;?;?", "[?,?,?,?]"); std::vector<int32> shape{1, 2, 3, 4}; Tensor shape_t = test::AsTensor<int32>(shape); op.input_tensors[0] = &shape_t; INFER_OK(op, "[5];?;?;?", "[1,2,3,4]"); } }
1,062
cpp
tensorflow/tensorflow
determinism
third_party/xla/xla/tsl/util/determinism.cc
third_party/xla/xla/service/gpu/determinism_test.cc
#ifndef XLA_TSL_UTIL_DETERMINISM_H_ #define XLA_TSL_UTIL_DETERMINISM_H_ namespace tsl { bool OpDeterminismRequired(); bool OpOrderDeterminismRequired(); void EnableOpDeterminism(bool enabled); } #endif #include "xla/tsl/util/determinism.h" #include "absl/strings/string_view.h" #include "xla/tsl/util/env_var.h" #include "tsl/platform/mutex.h" namespace tsl { namespace { class DeterminismState { public: explicit DeterminismState(absl::string_view env_var) : env_var_(env_var) {} bool Required() { mutex_lock l(*mutex_); if (state_ == Value::NOT_SET) { bool env_var_set = false; TF_CHECK_OK(tsl::ReadBoolFromEnvVar(env_var_, false, &env_var_set)); state_ = env_var_set ? Value::ENABLED : Value::DISABLED; } return state_ == Value::ENABLED; } void Enable(bool enabled) { mutex_lock l(*mutex_); state_ = enabled ? Value::ENABLED : Value::DISABLED; } private: absl::string_view env_var_; enum class Value { DISABLED, ENABLED, NOT_SET }; mutex* mutex_ = new mutex; Value state_ = Value::NOT_SET; }; } DeterminismState OpDeterminismState = DeterminismState("TF_DETERMINISTIC_OPS"); DeterminismState OpOrderDeterminismState = DeterminismState("TF_DETERMINISTIC_ORDER"); bool OpDeterminismRequired() { return OpDeterminismState.Required(); } void EnableOpDeterminism(bool enabled) { OpDeterminismState.Enable(enabled); } bool OpOrderDeterminismRequired() { return OpOrderDeterminismState.Required(); } }
#include <memory> #include <optional> #include <utility> #include <vector> #include <gtest/gtest.h> #include "absl/strings/string_view.h" #include "xla/hlo/ir/hlo_module.h" #include "xla/literal.h" #include "xla/service/gpu/autotuner_util.h" #include "xla/service/gpu/tests/gpu_codegen_test.h" #include "xla/stream_executor/device_description.h" #include "xla/stream_executor/gpu/gpu_timer.h" #include "xla/tests/hlo_test_base.h" #include "xla/tests/literal_test_util.h" #include "xla/tests/test_utils.h" #include "xla/xla.pb.h" #include "tsl/platform/statusor.h" namespace xla { namespace gpu { class DeterminismTest : public GpuCodegenTest { public: DeterminismTest() : debug_options_(HloTestBase::GetDebugOptionsForTest()) { debug_options_.set_xla_gpu_exclude_nondeterministic_ops(true); se::gpu::GpuTimer::ReturnRandomDurationsForTesting(); } void AssertDeterminism(absl::string_view hlo_string, int num_runs = 10) { std::vector<Literal> fake_arguments; std::vector<Literal*> fake_arguments_ptrs; std::optional<Literal> canonical_output; for (int i = 0; i < num_runs; ++i) { AutotunerUtil::ClearAutotuneResults(); TF_ASSERT_OK_AND_ASSIGN(std::unique_ptr<HloModule> module, ParseAndReturnVerifiedModule(hlo_string)); if (i == 0) { fake_arguments = MakeFakeArguments(module.get()).value(); for (Literal& literal : fake_arguments) { fake_arguments_ptrs.push_back(&literal); } } TF_ASSERT_OK_AND_ASSIGN(Literal output, Execute(std::move(module), fake_arguments_ptrs)); if (!canonical_output.has_value()) { canonical_output = std::move(output); } else { ASSERT_TRUE(LiteralTestUtil::Equal(*canonical_output, output)); } } } DebugOptions GetDebugOptionsForTest() override { return debug_options_; } DebugOptions debug_options_; }; TEST_F(DeterminismTest, CublasDot) { constexpr absl::string_view kHloText = R"( ENTRY e { p0 = f32[128,128] parameter(0) p1 = f32[128,128] parameter(1) ROOT d = f32[128,128] dot(p0, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} })"; #if TENSORFLOW_USE_ROCM auto rocm = backend() .default_stream_executor() ->GetDeviceDescription() .rocm_compute_capability(); if (!rocm.has_hipblaslt()) { GTEST_SKIP() << "No hipblas-lt support on this architecture!"; } #endif debug_options_.set_xla_gpu_triton_fusion_level(0); MatchOptimizedHlo(kHloText, R"(; CHECK: custom_call_target="__cublas$gemm")"); AssertDeterminism(kHloText); debug_options_.set_xla_gpu_enable_cublaslt(true); MatchOptimizedHlo(kHloText, R"(; CHECK: custom_call_target="__cublas$lt$matmul")"); AssertDeterminism(kHloText); } TEST_F(DeterminismTest, DeterministicTritonGemmUsesDefaultConfig) { #if GOOGLE_CUDA auto comp = backend() .default_stream_executor() ->GetDeviceDescription() .cuda_compute_capability(); if (!comp.IsAtLeast(se::CudaComputeCapability::VOLTA)) { GTEST_SKIP() << "Triton not used on pre-Volta GPUs"; } #elif TENSORFLOW_USE_ROCM GTEST_SKIP() << "Triton Gemm rewriter is not yet supported on ROCM"; #endif constexpr absl::string_view kHloText = R"( ENTRY e { p0 = bf16[128,128] parameter(0) p0_convert = f32[128,128] convert(p0) p1 = f32[128,128] parameter(1) ROOT d = f32[128,128] dot(p0_convert, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} })"; debug_options_.set_xla_gpu_deterministic_ops(true); AutotunerUtil::ClearAutotuneResults(); MatchOptimizedHlo(kHloText, R"( CHECK: __triton_gemm CHECK: {"block_m":"32","block_n":"32","block_k":"32","split_k":"1","num_stages":"1","num_warps":"4","num_ctas":"1"} )"); AssertDeterminism(kHloText, 3); } TEST_F(DeterminismTest, ExcludingNonDeterministicOpsDoesNotDisableAutotuning) { #if GOOGLE_CUDA auto comp = backend() .default_stream_executor() ->GetDeviceDescription() .cuda_compute_capability(); if (!comp.IsAtLeast(se::CudaComputeCapability::VOLTA)) { GTEST_SKIP() << "Triton not used on pre-Volta GPUs"; } #elif TENSORFLOW_USE_ROCM GTEST_SKIP() << "Triton Gemm rewriter is not yet supported on ROCM"; #endif debug_options_.set_xla_gpu_cublas_fallback(false); ASSERT_FALSE(debug_options_.xla_gpu_deterministic_ops()); AutotunerUtil::ClearAutotuneResults(); MatchOptimizedHlo(R"( ENTRY e { p0 = bf16[128,128] parameter(0) p0_convert = f32[128,128] convert(p0) p1 = f32[128,128] parameter(1) ROOT d = f32[128,128] dot(p0_convert, p1), lhs_contracting_dims={1}, rhs_contracting_dims={0} })", R"( CHECK: __triton_gemm CHECK-NOT: {"block_m":"32","block_n":"32","block_k":"32","split_k":"1","num_stages":"1","num_warps":"4","num_ctas":"1"} )"); } TEST_F(DeterminismTest, Conv) { constexpr absl::string_view kHloText = R"( ENTRY e { input = f32[16,3,64,64] parameter(0) filter = f32[3,3,3,64] parameter(1) conv = f32[16,64,64,64] convolution(input, filter), window={size=3x3 pad=1_1x1_1}, dim_labels=bf01_01io->bf01, feature_group_count=1 })"; AssertDeterminism(kHloText); } } }
1,063
cpp
tensorflow/tensorflow
resource_operation_safety_analysis
tensorflow/compiler/jit/resource_operation_safety_analysis.cc
tensorflow/compiler/jit/resource_operation_safety_analysis_test.cc
#ifndef TENSORFLOW_COMPILER_JIT_RESOURCE_OPERATION_SAFETY_ANALYSIS_H_ #define TENSORFLOW_COMPILER_JIT_RESOURCE_OPERATION_SAFETY_ANALYSIS_H_ #include "xla/service/graphcycles/graphcycles.h" #include "tensorflow/core/framework/function.h" #include "tensorflow/core/graph/graph.h" namespace tensorflow { Status ComputeIncompatibleResourceOperationPairs( const Graph& g, const FunctionLibraryDefinition* flib_def, const std::function<Status(const Node&, bool*)>& resource_ops_to_ignore, std::vector<std::pair<int, int>>* result); } #endif #include "tensorflow/compiler/jit/resource_operation_safety_analysis.h" #include "absl/container/flat_hash_set.h" #include "absl/memory/memory.h" #include "absl/strings/str_join.h" #include "absl/types/optional.h" #include "tensorflow/compiler/jit/xla_cluster_util.h" #include "tensorflow/compiler/tf2xla/resource_operation_table.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/graph/algorithm.h" #include "tensorflow/core/graph/tensor_id.h" #include "tensorflow/core/lib/hash/hash.h" namespace tensorflow { namespace { Status XlaResourceOpKindForNode( const Node& n, const FunctionLibraryDefinition* flib_def, const std::function<Status(const Node&, bool*)>& resource_ops_to_ignore, std::optional<XlaResourceOpKind>* out_resource_op_kind) { bool should_ignore = false; if (resource_ops_to_ignore) { TF_RETURN_IF_ERROR(resource_ops_to_ignore(n, &should_ignore)); } if (should_ignore) { *out_resource_op_kind = std::nullopt; return absl::OkStatus(); } const XlaResourceOpInfo* op_info = GetResourceOpInfoForOp(n.type_string()); if (op_info) { *out_resource_op_kind = op_info->kind(); return absl::OkStatus(); } if (MayCallFunction(n, flib_def)) { *out_resource_op_kind = XlaResourceOpKind::kReadWrite; } else { *out_resource_op_kind = std::nullopt; } return absl::OkStatus(); } bool IsEdgeSafe(XlaResourceOpKind from, XlaResourceOpKind to) { return from == XlaResourceOpKind::kRead || to == XlaResourceOpKind::kWrite; } using ResourceOp = std::pair<int, XlaResourceOpKind>; string ResourceOpToString(const ResourceOp& resource_op) { return absl::StrCat( resource_op.first, ": ", XlaResourceOpInfo::XlaResourceOpKindToString(resource_op.second)); } class ResourceOpSet { private: using Impl = absl::flat_hash_set<ResourceOp>; public: ResourceOpSet() = default; void Add(const ResourceOpSet& other) { CHECK(!frozen_); if (other.impl_ == impl_) { other.frozen_ = true; return; } if (!impl_) { other.frozen_ = true; impl_ = other.impl_; return; } for (ResourceOp resource_op : other) { Add(resource_op); } } void Add(const ResourceOp& resource_op) { CHECK(!frozen_); if (!IsCopy() && Contains(resource_op)) { return; } EnsureIsCopied(); impl_->insert(resource_op); } Impl::const_iterator begin() const { return impl_ ? impl_->begin() : GetEmptyImpl()->begin(); } Impl::const_iterator end() const { return impl_ ? impl_->end() : GetEmptyImpl()->end(); } bool Contains(const ResourceOp& resource_op) const { return impl_ != nullptr && impl_->count(resource_op); } private: bool IsCopy() const { return storage_ != nullptr; } void EnsureIsCopied() { if (storage_ == nullptr) { storage_ = std::make_unique<Impl>(); for (ResourceOp op : *this) { storage_->insert(op); } impl_ = storage_.get(); } } static Impl* GetEmptyImpl() { static Impl* empty_impl = new Impl; return empty_impl; } Impl* impl_ = nullptr; std::unique_ptr<Impl> storage_; mutable bool frozen_ = false; ResourceOpSet(const ResourceOpSet&) = delete; void operator=(const ResourceOpSet&) = delete; }; string ResourceOpSetToString(const ResourceOpSet& resource_op_set) { std::vector<string> elements_debug_string; std::transform(resource_op_set.begin(), resource_op_set.end(), std::back_inserter(elements_debug_string), ResourceOpToString); return absl::StrCat("{", absl::StrJoin(elements_debug_string, ","), "}"); } string NodeToString(const Node& n, XlaResourceOpKind resource_op_kind) { return absl::StrCat( "[", n.name(), ": ", n.type_string(), "(", XlaResourceOpInfo::XlaResourceOpKindToString(resource_op_kind), ")", "]"); } } Status ComputeIncompatibleResourceOperationPairs( const Graph& g, const FunctionLibraryDefinition* flib_def, const std::function<Status(const Node&, bool*)>& resource_ops_to_ignore, std::vector<std::pair<int, int>>* result) { CHECK(result->empty()); std::vector<Node*> rpo; GetReversePostOrder(g, &rpo, NodeComparatorName(), [](const Edge& edge) { return !edge.src()->IsNextIteration(); }); auto resource_op_set_for_node = std::make_unique<ResourceOpSet[]>(g.num_node_ids()); const bool vlog = VLOG_IS_ON(2); for (Node* n : rpo) { std::optional<XlaResourceOpKind> op_kind; TF_RETURN_IF_ERROR(XlaResourceOpKindForNode( *n, flib_def, resource_ops_to_ignore, &op_kind)); ResourceOpSet* resource_op_set = &resource_op_set_for_node[n->id()]; for (const Edge* e : n->in_edges()) { if (n->IsMerge() && e->src()->IsNextIteration()) { continue; } const ResourceOpSet& incoming_op_set = resource_op_set_for_node[e->src()->id()]; resource_op_set->Add(incoming_op_set); } if (op_kind) { for (ResourceOp incoming_op : *resource_op_set) { if (IsEdgeSafe(incoming_op.second, *op_kind)) { continue; } if (vlog) { VLOG(2) << "Unsafe edge: " << NodeToString(*g.FindNodeId(incoming_op.first), incoming_op.second) << " -> " << NodeToString(*n, *op_kind); } result->push_back({incoming_op.first, n->id()}); } if (op_kind != XlaResourceOpKind::kRead) { resource_op_set->Add({n->id(), *op_kind}); } } if (vlog) { VLOG(3) << n->name() << " -> " << ResourceOpSetToString(*resource_op_set); } } std::sort(result->begin(), result->end()); CHECK(std::unique(result->begin(), result->end()) == result->end()); return absl::OkStatus(); } }
#include "tensorflow/compiler/jit/resource_operation_safety_analysis.h" #include "tensorflow/cc/framework/ops.h" #include "tensorflow/cc/ops/array_ops.h" #include "tensorflow/cc/ops/control_flow_ops_internal.h" #include "tensorflow/cc/ops/function_ops.h" #include "tensorflow/cc/ops/functional_ops.h" #include "tensorflow/cc/ops/resource_variable_ops.h" #include "tensorflow/cc/ops/sendrecv_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/compiler/jit/defs.h" #include "tensorflow/compiler/tf2xla/xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/framework/node_def_util.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/graph/algorithm.h" #include "tensorflow/core/graph/graph_def_builder.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace { Node* MakeRead(const Scope& scope, const string& id) { Output var_handle = ops::VarHandleOp(scope.WithOpName("Var" + id), DT_FLOAT, TensorShape({})); Output read = ops::ReadVariableOp(scope.WithOpName("Read" + id), var_handle, DT_FLOAT); return read.node(); } Node* MakeWrite(const Scope& scope, const string& id) { Output var_handle = ops::VarHandleOp(scope.WithOpName("Var" + id), DT_FLOAT, TensorShape({})); Output value_to_write = ops::Const(scope.WithOpName("ValueToAssign" + id), 1.0f); ops::AssignVariableOp assign_op(scope.WithOpName("Assignee" + id), var_handle, value_to_write); return assign_op.operation.node(); } Node* MakeModify(const Scope& scope, const string& id) { Output var_handle = ops::VarHandleOp(scope.WithOpName("Var" + id), DT_FLOAT, TensorShape({})); Output value_to_write = ops::Const(scope.WithOpName("Increment" + id), 1.0f); ops::AssignAddVariableOp assign_add_op(scope.WithOpName("Increment" + id), var_handle, value_to_write); return assign_add_op.operation.node(); } Node* MakeNeutral(const Scope& scope, const string& id) { return ops::Const(scope.WithOpName("Const" + id), 42.0f).node(); } Status ComputeIncompatiblePairs(Graph* g, std::vector<std::pair<int, int>>* result) { FixupSourceAndSinkEdges(g); return ComputeIncompatibleResourceOperationPairs(*g, &g->flib_def(), {}, result); } TEST(ResourceOperationSafetyAnalysisTest, WriteRead) { Scope root = Scope::NewRootScope().ExitOnError(); Node* read = MakeRead(root, "R"); Node* write = MakeWrite(root, "W"); root.graph()->AddControlEdge(write, read); std::vector<std::pair<int, int>> incompatible_pairs; TF_ASSERT_OK(ComputeIncompatiblePairs(root.graph(), &incompatible_pairs)); ASSERT_EQ(incompatible_pairs.size(), 1); std::pair<int, int> write_read_pair = {write->id(), read->id()}; EXPECT_EQ(incompatible_pairs[0], write_read_pair); } TEST(ResourceOperationSafetyAnalysisTest, ReadWrite) { Scope root = Scope::NewRootScope().ExitOnError(); Node* read = MakeRead(root, "R"); Node* write = MakeWrite(root, "W"); root.graph()->AddControlEdge(read, write); std::vector<std::pair<int, int>> incompatible_pairs; TF_ASSERT_OK(ComputeIncompatiblePairs(root.graph(), &incompatible_pairs)); EXPECT_EQ(incompatible_pairs.size(), 0); } TEST(ResourceOperationSafetyAnalysisTest, ReadWriteNoEdges) { Scope root = Scope::NewRootScope().ExitOnError(); MakeRead(root, "R"); MakeWrite(root, "W"); std::vector<std::pair<int, int>> incompatible_pairs; TF_ASSERT_OK(ComputeIncompatiblePairs(root.graph(), &incompatible_pairs)); EXPECT_EQ(incompatible_pairs.size(), 0); } TEST(ResourceOperationSafetyAnalysisTest, ReadModify) { Scope root = Scope::NewRootScope().ExitOnError(); Node* read = MakeRead(root, "R"); Node* modify = MakeModify(root, "M"); root.graph()->AddControlEdge(read, modify); std::vector<std::pair<int, int>> incompatible_pairs; TF_ASSERT_OK(ComputeIncompatiblePairs(root.graph(), &incompatible_pairs)); EXPECT_EQ(incompatible_pairs.size(), 0); } TEST(ResourceOperationSafetyAnalysisTest, ModifyRead) { Scope root = Scope::NewRootScope().ExitOnError(); Node* read = MakeRead(root, "R"); Node* modify = MakeModify(root, "M"); root.graph()->AddControlEdge(modify, read); std::vector<std::pair<int, int>> incompatible_pairs; TF_ASSERT_OK(ComputeIncompatiblePairs(root.graph(), &incompatible_pairs)); ASSERT_EQ(incompatible_pairs.size(), 1); std::pair<int, int> modify_read_pair = {modify->id(), read->id()}; EXPECT_EQ(incompatible_pairs[0], modify_read_pair); } TEST(ResourceOperationSafetyAnalysisTest, ModifyWrite) { Scope root = Scope::NewRootScope().ExitOnError(); Node* modify = MakeModify(root, "M"); Node* write = MakeWrite(root, "W"); root.graph()->AddControlEdge(modify, write); std::vector<std::pair<int, int>> incompatible_pairs; TF_ASSERT_OK(ComputeIncompatiblePairs(root.graph(), &incompatible_pairs)); EXPECT_EQ(incompatible_pairs.size(), 0); } TEST(ResourceOperationSafetyAnalysisTest, WriteModify) { Scope root = Scope::NewRootScope().ExitOnError(); Node* modify = MakeModify(root, "M"); Node* write = MakeWrite(root, "W"); root.graph()->AddControlEdge(write, modify); std::vector<std::pair<int, int>> incompatible_pairs; TF_ASSERT_OK(ComputeIncompatiblePairs(root.graph(), &incompatible_pairs)); ASSERT_EQ(incompatible_pairs.size(), 1); std::pair<int, int> write_modify_pair = {write->id(), modify->id()}; EXPECT_EQ(incompatible_pairs[0], write_modify_pair); } TEST(ResourceOperationSafetyAnalysisTest, ReadModifyWrite) { Scope root = Scope::NewRootScope().ExitOnError(); Node* read = MakeRead(root, "R"); Node* modify = MakeModify(root, "M"); Node* write = MakeWrite(root, "W"); root.graph()->AddControlEdge(read, modify); root.graph()->AddControlEdge(modify, write); std::vector<std::pair<int, int>> incompatible_pairs; TF_ASSERT_OK(ComputeIncompatiblePairs(root.graph(), &incompatible_pairs)); EXPECT_EQ(incompatible_pairs.size(), 0); } TEST(ResourceOperationSafetyAnalysisTest, WriteModifyRead) { Scope root = Scope::NewRootScope().ExitOnError(); Node* read = MakeRead(root, "R"); Node* modify = MakeModify(root, "M"); Node* write = MakeWrite(root, "W"); root.graph()->AddControlEdge(write, modify); root.graph()->AddControlEdge(modify, read); std::vector<std::pair<int, int>> incompatible_pairs; TF_ASSERT_OK(ComputeIncompatiblePairs(root.graph(), &incompatible_pairs)); ASSERT_EQ(incompatible_pairs.size(), 3); std::pair<int, int> write_modify_pair = {write->id(), modify->id()}; std::pair<int, int> modify_read_pair = {modify->id(), read->id()}; std::pair<int, int> write_read_pair = {write->id(), read->id()}; EXPECT_EQ(incompatible_pairs[0], modify_read_pair); EXPECT_EQ(incompatible_pairs[1], write_read_pair); EXPECT_EQ(incompatible_pairs[2], write_modify_pair); } TEST(ResourceOperationSafetyAnalysisTest, WriteReadModify) { Scope root = Scope::NewRootScope().ExitOnError(); Node* read = MakeRead(root, "R"); Node* modify = MakeModify(root, "M"); Node* write = MakeWrite(root, "W"); root.graph()->AddControlEdge(write, read); root.graph()->AddControlEdge(read, modify); std::vector<std::pair<int, int>> incompatible_pairs; TF_ASSERT_OK(ComputeIncompatiblePairs(root.graph(), &incompatible_pairs)); ASSERT_EQ(incompatible_pairs.size(), 2); std::pair<int, int> write_modify_pair = {write->id(), modify->id()}; std::pair<int, int> write_read_pair = {write->id(), read->id()}; EXPECT_EQ(incompatible_pairs[0], write_read_pair); EXPECT_EQ(incompatible_pairs[1], write_modify_pair); } FunctionDefLibrary CreateFunctionDefLibWithConstFunction(const string& name) { FunctionDefLibrary flib_def; FunctionDef func = FunctionDefHelper::Create( name, {}, {"out: float"}, {}, {FunctionDefHelper::Const("one", 1.0f)}, {{"out", "out:output:0"}}); *flib_def.add_function() = std::move(func); return flib_def; } Node* MakeCall(Graph* graph, const string& callee_name, const string& node_name, Status* status) { NodeDef call_node; call_node.set_name(node_name); call_node.set_op(callee_name); return graph->AddNode(call_node, status); } TEST(ResourceOperationSafetyAnalysisTest, CallRead) { Scope root = Scope::NewRootScope().ExitOnError(); FunctionDefLibrary flib_def = CreateFunctionDefLibWithConstFunction("Const_func"); TF_ASSERT_OK(root.graph()->AddFunctionLibrary(flib_def)); Node* read = MakeRead(root, "R"); Status status; Node* call = MakeCall(root.graph(), "Const_func", "C", &status); TF_ASSERT_OK(status); root.graph()->AddControlEdge(call, read); std::vector<std::pair<int, int>> incompatible_pairs; TF_ASSERT_OK(ComputeIncompatiblePairs(root.graph(), &incompatible_pairs)); ASSERT_EQ(incompatible_pairs.size(), 1); std::pair<int, int> call_read_edge = {call->id(), read->id()}; EXPECT_EQ(incompatible_pairs[0], call_read_edge); } TEST(ResourceOperationSafetyAnalysisTest, ReadCall) { Scope root = Scope::NewRootScope().ExitOnError(); FunctionDefLibrary flib_def = CreateFunctionDefLibWithConstFunction("Const_func"); TF_ASSERT_OK(root.graph()->AddFunctionLibrary(flib_def)); Node* read = MakeRead(root, "R"); Status status; Node* call = MakeCall(root.graph(), "Const_func", "C", &status); TF_ASSERT_OK(status); root.graph()->AddControlEdge(read, call); std::vector<std::pair<int, int>> incompatible_pairs; TF_ASSERT_OK(ComputeIncompatiblePairs(root.graph(), &incompatible_pairs)); EXPECT_EQ(incompatible_pairs.size(), 0); } TEST(ResourceOperationSafetyAnalysisTest, CallWrite) { Scope root = Scope::NewRootScope().ExitOnError(); FunctionDefLibrary flib_def = CreateFunctionDefLibWithConstFunction("Const_func"); TF_ASSERT_OK(root.graph()->AddFunctionLibrary(flib_def)); Node* write = MakeWrite(root, "W"); Status status; Node* call = MakeCall(root.graph(), "Const_func", "C", &status); TF_ASSERT_OK(status); root.graph()->AddControlEdge(call, write); std::vector<std::pair<int, int>> incompatible_pairs; TF_ASSERT_OK(ComputeIncompatiblePairs(root.graph(), &incompatible_pairs)); EXPECT_EQ(incompatible_pairs.size(), 0); } TEST(ResourceOperationSafetyAnalysisTest, WriteCall) { Scope root = Scope::NewRootScope().ExitOnError(); FunctionDefLibrary flib_def = CreateFunctionDefLibWithConstFunction("Const_func"); TF_ASSERT_OK(root.graph()->AddFunctionLibrary(flib_def)); Node* write = MakeWrite(root, "W"); Status status; Node* call = MakeCall(root.graph(), "Const_func", "C", &status); TF_ASSERT_OK(status); root.graph()->AddControlEdge(write, call); std::vector<std::pair<int, int>> incompatible_pairs; TF_ASSERT_OK(ComputeIncompatiblePairs(root.graph(), &incompatible_pairs)); ASSERT_EQ(incompatible_pairs.size(), 1); std::pair<int, int> write_call_edge = {write->id(), call->id()}; EXPECT_EQ(incompatible_pairs[0], write_call_edge); } TEST(ResourceOperationSafetyAnalysisTest, SymbolicGradientRead) { Scope root = Scope::NewRootScope().ExitOnError(); FunctionDefLibrary flib_def = CreateFunctionDefLibWithConstFunction("Const_func"); TF_ASSERT_OK(root.graph()->AddFunctionLibrary(flib_def)); Node* read = MakeRead(root, "R"); NameAttrList fn; fn.set_name("Const_func"); Node* symbolic_gradient = ops::SymbolicGradient(root, {ops::Const(root, 1.0f)}, {DT_FLOAT}, fn) .output[0] .node(); root.graph()->AddControlEdge(symbolic_gradient, read); std::vector<std::pair<int, int>> incompatible_pairs; TF_ASSERT_OK(ComputeIncompatiblePairs(root.graph(), &incompatible_pairs)); ASSERT_EQ(incompatible_pairs.size(), 1); std::pair<int, int> symbolic_gradient_read_edge = {symbolic_gradient->id(), read->id()}; EXPECT_EQ(incompatible_pairs[0], symbolic_gradient_read_edge); } TEST(ResourceOperationSafetyAnalysisTest, WriteSymbolicGradient) { Scope root = Scope::NewRootScope().ExitOnError(); FunctionDefLibrary flib_def = CreateFunctionDefLibWithConstFunction("Const_func"); TF_ASSERT_OK(root.graph()->AddFunctionLibrary(flib_def)); Node* write = MakeWrite(root, "W"); NameAttrList fn; fn.set_name("Const_func"); Node* symbolic_gradient = ops::SymbolicGradient(root, {ops::Const(root, 1.0f)}, {DT_FLOAT}, fn) .output[0] .node(); root.graph()->AddControlEdge(write, symbolic_gradient); std::vector<std::pair<int, int>> incompatible_pairs; TF_ASSERT_OK(ComputeIncompatiblePairs(root.graph(), &incompatible_pairs)); ASSERT_EQ(incompatible_pairs.size(), 1); std::pair<int, int> write_symbolic_gradient_edge = {write->id(), symbolic_gradient->id()}; EXPECT_EQ(incompatible_pairs[0], write_symbolic_gradient_edge); } TEST(ResourceOperationSafetyAnalysisTest, ChainOfOps) { Scope root = Scope::NewRootScope().ExitOnError(); Node* write_0 = MakeWrite(root, "W0"); Node* neutral_0 = MakeNeutral(root, "N0"); Node* read_0 = MakeRead(root, "R0"); Node* write_1 = MakeWrite(root, "W1"); Node* neutral_1 = MakeNeutral(root, "N1"); Node* read_1 = MakeRead(root, "R1"); root.graph()->AddControlEdge(write_0, neutral_0); root.graph()->AddControlEdge(neutral_0, read_0); root.graph()->AddControlEdge(read_0, write_1); root.graph()->AddControlEdge(write_1, neutral_1); root.graph()->AddControlEdge(neutral_1, read_1); std::vector<std::pair<int, int>> incompatible_pairs; TF_ASSERT_OK(ComputeIncompatiblePairs(root.graph(), &incompatible_pairs)); ASSERT_EQ(incompatible_pairs.size(), 3); std::pair<int, int> write_0_read_0_pair = {write_0->id(), read_0->id()}; std::pair<int, int> write_0_read_1_pair = {write_0->id(), read_1->id()}; std::pair<int, int> write_1_read_1_pair = {write_1->id(), read_1->id()}; EXPECT_EQ(incompatible_pairs[0], write_0_read_0_pair); EXPECT_EQ(incompatible_pairs[1], write_0_read_1_pair); EXPECT_EQ(incompatible_pairs[2], write_1_read_1_pair); } TEST(ResourceOperationSafetyAnalysisTest, DagOfOps) { Scope root = Scope::NewRootScope().ExitOnError(); Node* write_0 = MakeWrite(root, "W0"); Node* write_1 = MakeWrite(root, "W1"); Node* neutral = MakeNeutral(root, "N"); Node* read_0 = MakeRead(root, "R0"); Node* read_1 = MakeRead(root, "R1"); root.graph()->AddControlEdge(write_0, neutral); root.graph()->AddControlEdge(write_1, neutral); root.graph()->AddControlEdge(neutral, read_0); root.graph()->AddControlEdge(neutral, read_1); std::vector<std::pair<int, int>> incompatible_pairs; TF_ASSERT_OK(ComputeIncompatiblePairs(root.graph(), &incompatible_pairs)); ASSERT_EQ(incompatible_pairs.size(), 4); std::pair<int, int> write_0_read_0_pair = {write_0->id(), read_0->id()}; std::pair<int, int> write_0_read_1_pair = {write_0->id(), read_1->id()}; std::pair<int, int> write_1_read_0_pair = {write_1->id(), read_0->id()}; std::pair<int, int> write_1_read_1_pair = {write_1->id(), read_1->id()}; EXPECT_EQ(incompatible_pairs[0], write_0_read_0_pair); EXPECT_EQ(incompatible_pairs[1], write_0_read_1_pair); EXPECT_EQ(incompatible_pairs[2], write_1_read_0_pair); EXPECT_EQ(incompatible_pairs[3], write_1_read_1_pair); } TEST(ResourceOperationSafetyAnalysisTest, DagOfOpsWithRepeatedPaths) { Scope root = Scope::NewRootScope().ExitOnError(); Node* write_0 = MakeWrite(root, "W0"); Node* write_1 = MakeWrite(root, "W1"); Node* neutral = MakeNeutral(root, "N"); Node* read_0 = MakeRead(root, "R0"); Node* read_1 = MakeRead(root, "R1"); root.graph()->AddControlEdge(write_0, neutral); root.graph()->AddControlEdge(write_1, neutral); root.graph()->AddControlEdge(neutral, read_0); root.graph()->AddControlEdge(neutral, read_1); root.graph()->AddControlEdge(write_1, read_1); std::vector<std::pair<int, int>> incompatible_pairs; TF_ASSERT_OK(ComputeIncompatiblePairs(root.graph(), &incompatible_pairs)); ASSERT_EQ(incompatible_pairs.size(), 4); std::pair<int, int> write_0_read_0_pair = {write_0->id(), read_0->id()}; std::pair<int, int> write_0_read_1_pair = {write_0->id(), read_1->id()}; std::pair<int, int> write_1_read_0_pair = {write_1->id(), read_0->id()}; std::pair<int, int> write_1_read_1_pair = {write_1->id(), read_1->id()}; EXPECT_EQ(incompatible_pairs[0], write_0_read_0_pair); EXPECT_EQ(incompatible_pairs[1], write_0_read_1_pair); EXPECT_EQ(incompatible_pairs[2], write_1_read_0_pair); EXPECT_EQ(incompatible_pairs[3], write_1_read_1_pair); } TEST(ResourceOperationSafetyAnalysisTest, Loop) { Scope root = Scope::NewRootScope().ExitOnError(); Output init_value = ops::Placeholder(root.WithOpName("init"), DT_FLOAT); Output loop_cond = ops::Placeholder(root.WithOpName("init"), DT_BOOL); Output enter_value = ops::internal::Enter(root.WithOpName("enter"), init_value, "fr"); ops::Merge iv(root.WithOpName("iv"), {enter_value, enter_value}); ops::Switch latch(root.WithOpName("latch"), iv.output, loop_cond); ops::internal::Exit exit(root.WithOpName("exit"), iv.output); Output next_iteration = ops::NextIteration(root.WithOpName("next_iteration"), latch.output_true); TF_ASSERT_OK( root.graph()->UpdateEdge(next_iteration.node(), 0, iv.output.node(), 1)); Node* write = MakeWrite(root, "W"); Node* read = MakeRead(root, "R"); root.graph()->AddControlEdge(iv.output.node(), write); root.graph()->AddControlEdge(write, read); root.graph()->AddControlEdge(read, next_iteration.node()); std::vector<std::pair<int, int>> incompatible_pairs; TF_ASSERT_OK(ComputeIncompatiblePairs(root.graph(), &incompatible_pairs)); ASSERT_EQ(incompatible_pairs.size(), 1); std::pair<int, int> write_read_pair = {write->id(), read->id()}; EXPECT_EQ(incompatible_pairs[0], write_read_pair); } bool IsResourceArgDef(const OpDef::ArgDef& arg_def) { return arg_def.type() == DT_RESOURCE; } } }
1,064
cpp
tensorflow/tensorflow
shape_inference
third_party/xla/xla/service/shape_inference.cc
third_party/xla/xla/service/shape_inference_test.cc
#ifndef XLA_SERVICE_SHAPE_INFERENCE_H_ #define XLA_SERVICE_SHAPE_INFERENCE_H_ #include <cstdint> #include <optional> #include <utility> #include <vector> #include "absl/status/statusor.h" #include "absl/types/span.h" #include "xla/hlo/ir/hlo_instruction.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/shape.h" #include "xla/xla_data.pb.h" namespace xla { class ShapeInference { public: static absl::StatusOr<Shape> InferUnaryOpShape(HloOpcode opcode, const Shape& shape); static absl::StatusOr<Shape> InferUnaryOpShape(HloOpcode opcode, const HloInstruction* operand); static absl::StatusOr<std::optional<Shape>> InferScalarBroadcastShape( absl::Span<const Shape> shapes); static absl::StatusOr<Shape> InferBinaryOpShape( HloOpcode opcode, const Shape& lhs, const Shape& rhs, absl::Span<const int64_t> broadcast_dimensions); static absl::StatusOr<Shape> InferBinaryOpShape(HloOpcode opcode, const HloInstruction* lhs, const HloInstruction* rhs); static absl::StatusOr<Shape> InferTernaryOpShape(HloOpcode opcode, const Shape& lhs, const Shape& rhs, const Shape& ehs); static absl::StatusOr<Shape> InferTernaryOpShape(HloOpcode opcode, const HloInstruction* lhs, const HloInstruction* rhs, const HloInstruction* ehs); static absl::StatusOr<Shape> InferVariadicOpShape( HloOpcode opcode, absl::Span<const Shape* const> operand_shapes); static absl::StatusOr<Shape> InferVariadicOpShape( HloOpcode opcode, absl::Span<const HloInstruction* const> operands); static absl::StatusOr<Shape> InferMapShape( absl::Span<const Shape* const> arg_shapes, const ProgramShape& to_apply, absl::Span<const int64_t> dimensions); static absl::StatusOr<Shape> InferBatchNormTrainingShape( const Shape& operand_shape, const Shape& scale_shape, const Shape& offset_shape, int64_t feature_index); static absl::StatusOr<Shape> InferBatchNormInferenceShape( const Shape& operand_shape, const Shape& scale_shape, const Shape& offset_shape, const Shape& mean_shape, const Shape& variance_shape, int64_t feature_index); static absl::StatusOr<Shape> InferBatchNormGradShape( const Shape& operand_shape, const Shape& scale_shape, const Shape& mean_shape, const Shape& var_shape, const Shape& output_grad_shape, int64_t feature_index); static absl::StatusOr<Shape> InferConvolveShape( const Shape& lhs, const Shape& rhs, int64_t feature_group_count, int64_t batch_group_count, const Window& window, const ConvolutionDimensionNumbers& dimension_numbers, std::optional<PrimitiveType> preferred_element_type); static absl::StatusOr<Shape> InferFftShape( const Shape& in, FftType fft_type, absl::Span<const int64_t> fft_length); static absl::StatusOr<Shape> InferTriangularSolveShape( const Shape& a, const Shape& b, const TriangularSolveOptions& options); static absl::StatusOr<Shape> InferCholeskyShape(const Shape& a); static absl::StatusOr<Shape> InferAllGatherShape( absl::Span<const Shape* const> operand_shapes, int64_t all_gather_dimension, int64_t shard_count); static absl::StatusOr<Shape> InferAllGatherStartShape( absl::Span<const Shape* const> operand_shapes, int64_t all_gather_dimension, int64_t shard_count); static absl::StatusOr<Shape> InferAllGatherDoneShape( const Shape& all_gather_start_shape); static absl::StatusOr<Shape> InferAllReduceShape( absl::Span<const Shape* const> operand_shapes); static absl::StatusOr<Shape> InferReduceScatterShape( absl::Span<const Shape* const> operand_shapes, int64_t scatter_dimension, int64_t shard_count); static absl::StatusOr<Shape> InferAllReduceStartShape( absl::Span<const Shape* const> operand_shapes); static absl::StatusOr<Shape> InferAllReduceDoneShape( const Shape& operand_shape); static absl::StatusOr<Shape> InferAllToAllShape(const Shape& shape, int64_t split_dimension, int64_t concat_dimension, int64_t split_count); static absl::StatusOr<Shape> InferAllToAllTupleShape( absl::Span<const Shape* const> operand_shapes); static absl::StatusOr<Shape> InferCollectiveBroadcastShape( absl::Span<const Shape* const> operand_shapes); static absl::StatusOr<Shape> InferCollectivePermuteShape( absl::Span<const Shape* const> operand_shapes); static absl::StatusOr<Shape> InferCollectivePermuteStartShape( absl::Span<const Shape* const> operand_shapes, absl::Span<const Shape> context_shapes); static absl::StatusOr<Shape> InferCollectivePermuteDoneShape( const Shape& operand_shape); static absl::StatusOr<Shape> InferReduceShape( absl::Span<const Shape* const> arg_shapes, absl::Span<const int64_t> dimensions_to_reduce, const ProgramShape& to_apply); static absl::StatusOr<Shape> InferReduceWindowShape( const Shape& operand_shape, const Shape& init_value, const Window& window, const ProgramShape& to_apply_shape); static absl::StatusOr<Shape> InferReduceWindowShape( const Shape& operand_shape, const Shape& init_value, const Window& window); static absl::StatusOr<Shape> InferReduceWindowShape( absl::Span<const Shape* const> operands, absl::Span<const Shape* const> init_values, const Window& window, const ProgramShape& to_apply_shape); static absl::StatusOr<Shape> InferReduceWindowShape( absl::Span<const Shape*> operands, absl::Span<const Shape*> init_values, const Window& window); static absl::StatusOr<Shape> InferSelectAndScatterShape( const Shape& operand_shape, const ProgramShape& select_shape, const Window& window, const Shape& source_shape, const Shape& init_value_shape, const ProgramShape& scatter_shape); static absl::StatusOr<Shape> InferReverseShape( const Shape& operand_shape, absl::Span<const int64_t> dimensions); static absl::StatusOr<Shape> InferSliceShape( const Shape& arg, absl::Span<const int64_t> starts, absl::Span<const int64_t> limits, absl::Span<const int64_t> strides); static absl::StatusOr<Shape> InferDynamicSliceShape( const Shape& operand_shape, absl::Span<const Shape> start_index_shapes, absl::Span<const int64_t> slice_sizes, bool allow_scalar_indices = true); static absl::StatusOr<Shape> InferDynamicUpdateSliceShape( const Shape& operand_shape, const Shape& update_shape, absl::Span<const Shape> start_index_shapes, bool allow_scalar_indices = true); static absl::StatusOr<Shape> InferGetTupleElementShape(const Shape& arg, int64_t index); static absl::StatusOr<Shape> InferWhileShape(const ProgramShape& condition, const ProgramShape& body, const Shape& init); static absl::StatusOr<Shape> InferConditionalShape( const Shape& branch_index, absl::Span<const ProgramShape> branch_computations, absl::Span<const Shape> branch_operands); static absl::StatusOr<Shape> InferBroadcastShape( const Shape& operand, absl::Span<const int64_t> broadcast_sizes); static absl::StatusOr<Shape> InferBroadcastShape( const Shape& operand_shape, const Shape& output_shape, absl::Span<const int64_t> broadcast_dimensions); static absl::StatusOr<Shape> InferReshapeShape( const Shape& operand, absl::Span<const int64_t> dimensions, absl::Span<const int64_t> new_sizes, int64_t inferred_dimension); static absl::StatusOr<Shape> InferDynamicReshapeShape( const Shape& operand, absl::Span<const Shape* const> dim_size_shapes, absl::Span<const int64_t> new_size_bounds, const std::vector<bool>& dims_are_dynamic); static absl::StatusOr<Shape> InferTransposeShape( const Shape& operand, absl::Span<const int64_t> dimensions); static absl::StatusOr<Shape> InferConcatOpShape( absl::Span<const Shape* const> arg_shapes, int64_t dimension); static absl::StatusOr<Shape> InferConvertShape( const Shape& operand_shape, PrimitiveType new_element_type); static absl::StatusOr<Shape> InferBitcastConvertShape( const Shape& operand_shape, PrimitiveType new_element_type); static absl::StatusOr<Shape> InferStochasticConvertShape( const Shape& operand_shape, const Shape& random_shape, PrimitiveType new_element_type); static absl::StatusOr<Shape> InferReducePrecisionShape( const Shape& operand_shape, const int exponent_bits, const int mantissa_bits); static absl::StatusOr<Shape> InferPadShape( const Shape& operand_shape, const Shape& padding_value_shape, const PaddingConfig& padding_config); static absl::StatusOr<Shape> InferCallShape( absl::Span<const Shape* const> arg_shapes, const ProgramShape& to_apply); static absl::StatusOr<Shape> InferDotOpShape( const Shape& lhs, const Shape& rhs, const DotDimensionNumbers& dimension_numbers, std::optional<PrimitiveType> preferred_element_type, absl::Span<const SparsityDescriptor> sparsity = {}); static absl::StatusOr<Shape> InferSparseDotMetadataShape( const Shape& operand_shape, const DotDimensionNumbers& dimension_numbers, const SparsityDescriptor& sparsity, PrimitiveType element_type = U16); static absl::StatusOr<Shape> InferGatherShape( const Shape& input_shape, const Shape& start_indices_shape, const GatherDimensionNumbers& gather_dim_numbers, absl::Span<const int64_t> slice_sizes); static absl::StatusOr<Shape> InferScatterShape( absl::Span<const Shape* const> arg_shapes, const ProgramShape& to_apply_shape, const ScatterDimensionNumbers& scatter_dim_numbers); static absl::StatusOr<Shape> InferGetDimensionSizeShape(const Shape& shape, int64_t dimension); static absl::StatusOr<Shape> InferSetDimensionSizeShape( const Shape& operand_shape, const Shape& val_shape, int64_t dimension); static absl::StatusOr<Shape> InferTopKShape(const Shape& operand_shape, int64_t k); static absl::StatusOr<Window> InferWindowFromDimensions( absl::Span<const int64_t> window_dimensions, absl::Span<const int64_t> window_strides, absl::Span<const std::pair<int64_t, int64_t>> padding, absl::Span<const int64_t> lhs_dilation, absl::Span<const int64_t> rhs_dilation, std::optional<std::vector<bool>> window_reversal = std::nullopt); private: static absl::StatusOr<Shape> InferElementwiseBinaryOpShape( HloOpcode operation, const Shape& lhs, const Shape& rhs, absl::Span<const int64_t> broadcast_dimensions); static absl::StatusOr<Shape> InferClampShape(const Shape& min, const Shape& operand, const Shape& max); static absl::StatusOr<Shape> InferSelectShape(const Shape& pred, const Shape& on_true, const Shape& on_false); static absl::StatusOr<Shape> InferDegenerateDimensionBroadcastShape( const Shape& lhs, const Shape& rhs); static absl::StatusOr<Shape> InferInDimBroadcastShape( const Shape& smaller_shape, const Shape& larger_shape, absl::Span<const int64_t> broadcast_dimensions); ShapeInference(const ShapeInference&) = delete; ShapeInference& operator=(const ShapeInference&) = delete; }; } #endif #include "xla/service/shape_inference.h" #include <algorithm> #include <array> #include <cstddef> #include <cstdint> #include <iterator> #include <limits> #include <numeric> #include <optional> #include <set> #include <string> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_set.h" #include "absl/container/inlined_vector.h" #include "absl/log/check.h" #include "absl/status/status.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" #include "absl/strings/str_join.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/permutation_util.h" #include "xla/primitive_util.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/status_macros.h" #include "xla/util.h" #include "xla/window_util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/logging.h" #include "tsl/platform/status.h" #include "tsl/platform/statusor.h" namespace xla { namespace { using absl::InvalidArgumentError; using absl::StrFormat; using absl::StrJoin; bool AllUnique(absl::Span<const int64_t> slice) { return std::set<int64_t>(slice.begin(), slice.end()).size() == slice.size(); } bool IsUnboundedDynamicSize(int64_t size) { return size == Shape::kUnboundedSize; } bool CompatibleDimensionSizes(int64_t size_a, int64_t size_b) { return IsUnboundedDynamicSize(size_a) || IsUnboundedDynamicSize(size_b) || size_a == size_b; } absl::Status ExpectArray(const Shape& shape, absl::string_view op_type) { if (!shape.IsArray()) { return InvalidArgument("Expected array argument for %s, but got %s.", std::string(op_type), ShapeUtil::HumanString(shape)); } return absl::OkStatus(); } absl::Status VerifyReducerShape( const ProgramShape& reducer_shape, absl::Span<const Shape* const> init_value_shapes, absl::Span<const PrimitiveType> input_element_types, int64_t inputs) { if (reducer_shape.parameters_size() != inputs * 2) { return InvalidArgument( "Reduction function must take %d parameters, but " "takes %d parameter(s).", inputs * 2, reducer_shape.parameters_size()); } const Shape& accumulator_shape = reducer_shape.result(); std::vector<const Shape*> accumulator_subshapes; if (accumulator_shape.IsArray()) { if (inputs != 1) { return InvalidArgument( "Reduction function must produce a tuple with %d elements, but " "produces a scalar", inputs); } accumulator_subshapes.push_back(&accumulator_shape); } else if (accumulator_shape.IsTuple()) { if (ShapeUtil::TupleElementCount(accumulator_shape) != inputs) { return InvalidArgument( "Reduction function must produce a tuple with %d elements, but has " "%d elements", inputs, ShapeUtil::TupleElementCount(accumulator_shape)); } for (const Shape& element_shape : accumulator_shape.tuple_shapes()) { accumulator_subshapes.push_back(&element_shape); } } else { return InvalidArgument( "Reduction function must produce a scalar or tuple of scalars, but has " "shape: %s", ShapeUtil::HumanString(accumulator_shape)); } for (const Shape* element_shape : accumulator_subshapes) { if (element_shape->rank() != 0) { return InvalidArgument( "Reduction function must return a scalar or tuple of scalars but " "returns shape: %s", ShapeUtil::HumanString(accumulator_shape)); } } for (int64_t i = 0; i < inputs; ++i) { if (!ShapeUtil::Compatible(*accumulator_subshapes[i], reducer_shape.parameters(i))) { return InvalidArgument( "Reduction function's %d-th parameter shape differs from the " "result shape: %s vs %s", i, ShapeUtil::HumanString(reducer_shape.parameters(i)), ShapeUtil::HumanString(*accumulator_subshapes[i])); } if (!ShapeUtil::CompatibleIgnoringFpPrecision(*accumulator_subshapes[i], *init_value_shapes[i])) { return InvalidArgument( "Reduction function's accumulator shape at index %d differs from " "the init_value shape: %s vs %s", i, ShapeUtil::HumanString(*accumulator_subshapes[i]), ShapeUtil::HumanString(*init_value_shapes[i])); } const Shape input_element_shape = ShapeUtil::MakeShape(input_element_types[i], {}); if (!ShapeUtil::CompatibleIgnoringFpPrecision( input_element_shape, reducer_shape.parameters(inputs + i))) { return InvalidArgument( "Reduction function's %d-th parameter shape differs from the " "input type element type: %s vs %s", inputs + i, ShapeUtil::HumanString(reducer_shape.parameters(inputs + i)), ShapeUtil::HumanString(input_element_shape)); } if (!ShapeUtil::CompatibleIgnoringFpPrecision( *accumulator_subshapes[i], reducer_shape.parameters(inputs + i))) { return InvalidArgument( "Reduction function's %d-th parameter shape must " "match the result shape, but got %s vs %s.", inputs + i, ShapeUtil::HumanString(reducer_shape.parameters(inputs + i)), ShapeUtil::HumanString(*accumulator_subshapes[i])); } } return absl::OkStatus(); } absl::StatusOr<Shape> InferWindowOutputShape(const Shape& base_shape, const Window& window, PrimitiveType element_type) { if (window.dimensions_size() != base_shape.rank()) { return InvalidArgument( "Window has dimension %d but base shape has dimension %d.", window.dimensions_size(), base_shape.rank()); } std::vector<int64_t> output_dimensions(window.dimensions_size()); std::vector<bool> output_is_dynamic(window.dimensions_size()); for (int64_t i = 0; i < window.dimensions_size(); ++i) { const auto& dim = window.dimensions(i); if (dim.size() <= 0) { return InvalidArgument("Window %s has a non-positive dimension.", window.DebugString()); } if (dim.stride() <= 0) { return InvalidArgument("Window %s has a non-positive stride.", window.DebugString()); } if (dim.base_dilation() < 1) { return InvalidArgument( "Window %s has a non-positive base area dilation factor.", window.DebugString()); } if (dim.window_dilation() < 1) { return InvalidArgument( "Window %s has a non-positive window dilation factor.", window.DebugString()); } if (IsUnboundedDynamicSize(ShapeUtil::GetDimension(base_shape, i))) { output_dimensions[i] = Shape::kUnboundedSize; } else { const int64_t dilated_base = window_util::DilatedBound( ShapeUtil::GetDimension(base_shape, i), dim.base_dilation()); const int64_t padded_dilated_base = dim.padding_low() + dilated_base + dim.padding_high(); const int64_t dilated_window = window_util::DilatedBound(dim.size(), dim.window_dilation()); output_dimensions[i] = window_util::StridedBound( padded_dilated_base, dilated_window, dim.stride()); } output_is_dynamic[i] = base_shape.is_dynamic_dimension(i); } return ShapeUtil::MakeValidatedShape(element_type, output_dimensions, output_is_dynamic); } struct DimAndBound { int64_t dimension, bound; }; DimAndBound InferConcatenatedDimAndBound(int64_t left_size, int64_t right_size, int64_t left_bound, int64_t right_bound) { bool is_left_static_dim = !IsUnboundedDynamicSize(left_size); bool is_right_static_dim = !IsUnboundedDynamicSize(right_size); bool is_left_static_bound = !IsUnboundedDynamicSize(left_bound); bool is_right_static_bound = !IsUnboundedDynamicSize(right_bound); int64_t inferred_size = Shape::kUnboundedSize; int64_t inferred_bound = Shape::kUnboundedSize; if (is_left_static_dim && is_right_static_dim) { inferred_size = left_size + right_size; } if (is_left_static_bound || is_right_static_bound) { int64_t leftBoundOrSize = is_left_static_bound ?
#include "xla/service/shape_inference.h" #include <array> #include <cstddef> #include <cstdint> #include <optional> #include <string> #include <utility> #include <vector> #include <gtest/gtest.h> #include "absl/log/check.h" #include "absl/log/log.h" #include "absl/strings/str_join.h" #include "absl/strings/string_view.h" #include "absl/strings/substitute.h" #include "absl/types/span.h" #include "xla/client/padding.h" #include "xla/hlo/ir/hlo_instructions.h" #include "xla/hlo/ir/hlo_opcode.h" #include "xla/service/hlo_parser.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "xla/test.h" #include "xla/test_helpers.h" #include "xla/xla_data.pb.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace xla { namespace { using ::testing::ContainsRegex; using ::testing::HasSubstr; constexpr absl::string_view kBroadcastDimensionMismatchErrorMessage = "Broadcast dimension 0 mismatch"; constexpr absl::string_view kIncompatibleBinaryOpShapeErrorMessage = "Binary op with incompatible shapes"; std::array<const int64_t, 1> zero_array = {0}; class ShapeInferenceTest : public ::testing::Test { protected: const Shape s32_ = ShapeUtil::MakeShape(S32, {}); const Shape f16_ = ShapeUtil::MakeShape(F16, {}); const Shape f32_ = ShapeUtil::MakeShape(F32, {}); const Shape f64_ = ShapeUtil::MakeShape(F64, {}); const Shape pred_ = ShapeUtil::MakeShape(PRED, {}); const Shape vector_32_ = ShapeUtil::MakeShape(F32, {32}); const Shape vector_64_ = ShapeUtil::MakeShape(F32, {64}); const Shape matrix_32_48_ = ShapeUtil::MakeShape(F32, {32, 48}); const Shape matrix_32_64_ = ShapeUtil::MakeShape(F32, {32, 64}); const Shape matrix_64_48_ = ShapeUtil::MakeShape(F32, {64, 48}); const Shape s32matrix_64_64_ = ShapeUtil::MakeShape(S32, {64, 64}); }; class ReduceShapeInferenceTest : public ShapeInferenceTest { protected: void ExpectInferredReduceShape( const Shape& expected_inferred_shape, const Shape& arg, absl::Span<const int64_t> dimensions_to_reduce) { ProgramShape to_apply = ShapeUtil::MakeProgramShape({f32_, f32_}, f32_); const absl::StatusOr<Shape> inferred_shape = ShapeInference::InferReduceShape({&arg, &f32_}, dimensions_to_reduce, to_apply); EXPECT_IS_OK(inferred_shape.status()); EXPECT_TRUE(ShapeUtil::Equal(expected_inferred_shape, *inferred_shape)); } }; class SelectAndScatterShapeInferenceTest : public ShapeInferenceTest { protected: SelectAndScatterShapeInferenceTest() { operand_shape_ = ShapeUtil::MakeShape(F32, {8, 16}); source_shape_ = ShapeUtil::MakeShape(F32, {4, 8}); WindowDimension dim; dim.set_size(2); dim.set_stride(2); dim.set_padding_low(0); dim.set_padding_high(0); dim.set_window_dilation(1); dim.set_base_dilation(1); *window_.add_dimensions() = dim; *window_.add_dimensions() = dim; init_value_shape_ = ShapeUtil::MakeShape(F32, {}); select_program_shape_ = ShapeUtil::MakeProgramShape( {ShapeUtil::MakeShape(F32, {}), ShapeUtil::MakeShape(F32, {})}, pred_); scatter_program_shape_ = ShapeUtil::MakeProgramShape( {ShapeUtil::MakeShape(F32, {}), ShapeUtil::MakeShape(F32, {})}, f32_); } Shape operand_shape_; Shape source_shape_; Window window_; Shape init_value_shape_; ProgramShape select_program_shape_; ProgramShape scatter_program_shape_; }; struct BinaryOpTestCase { std::string lhs; std::string rhs; absl::Span<const int64_t> broadcast_dimensions; std::string expected; std::optional<std::string_view> error_message; }; class UnboundedLogicalOpShapeInferenceTest : public ::testing::TestWithParam<BinaryOpTestCase> {}; class UnboundedBinaryOpShapeInferenceTest : public ::testing::TestWithParam<BinaryOpTestCase> {}; class UnboundedCompareOpShapeInferenceTest : public ::testing::TestWithParam<BinaryOpTestCase> {}; class UnboundedComplexOpShapeInferenceTest : public ::testing::TestWithParam<BinaryOpTestCase> {}; class UnboundedConcatenateOpShapeInferenceTest : public ::testing::TestWithParam<std::vector<std::string>> {}; struct UnaryOpTestCase { std::string operand; std::string expected; HloOpcode opcode; }; class UnboundedUnaryOpShapeInferenceTest : public ::testing::TestWithParam<UnaryOpTestCase> {}; class UnboundedClampOpShapeInferenceTest : public ::testing::TestWithParam<std::vector<std::string>> {}; class UnboundedSelectOpShapeInferenceTest : public ::testing::TestWithParam<std::vector<std::string>> {}; TEST_F(ShapeInferenceTest, UnaryNegateMatrix) { const Shape matrix_shape = ShapeUtil::MakeShape(F32, {128, 64}); const absl::StatusOr<Shape> inferred_shape = ShapeInference::InferUnaryOpShape(HloOpcode::kNegate, matrix_shape); ASSERT_IS_OK(inferred_shape.status()); ASSERT_TRUE(ShapeUtil::Equal(matrix_shape, *inferred_shape)); } TEST_F(ShapeInferenceTest, SelectScalarPredBetweenTuples) { const Shape tuple = ShapeUtil::MakeTupleShape({s32_, f32_}); const absl::StatusOr<Shape> inferred_shape = ShapeInference::InferTernaryOpShape(HloOpcode::kSelect, pred_, tuple, tuple); ASSERT_FALSE(inferred_shape.ok()); ASSERT_THAT(inferred_shape.status().message(), HasSubstr("Expected array argument for select")); } TEST_F(ShapeInferenceTest, SelectScalarPredBetweenArrays) { const absl::StatusOr<Shape> inferred_shape = ShapeInference::InferTernaryOpShape(HloOpcode::kSelect, pred_, matrix_64_48_, matrix_64_48_); ASSERT_IS_OK(inferred_shape.status()); ASSERT_TRUE(ShapeUtil::Equal(matrix_64_48_, *inferred_shape)); } TEST_F(ShapeInferenceTest, SelectArrayPredBetweenArrays) { const Shape predarray = ShapeUtil::MakeShape(PRED, {64, 48}); const absl::StatusOr<Shape> inferred_shape = ShapeInference::InferTernaryOpShape(HloOpcode::kSelect, predarray, matrix_64_48_, matrix_64_48_); ASSERT_IS_OK(inferred_shape.status()); ASSERT_TRUE(ShapeUtil::Equal(matrix_64_48_, *inferred_shape)); } TEST_F(ShapeInferenceTest, SelectBadShapes) { const absl::StatusOr<Shape> inferred_shape_error1 = ShapeInference::InferTernaryOpShape(HloOpcode::kSelect, pred_, matrix_64_48_, matrix_32_64_); ASSERT_FALSE(inferred_shape_error1.ok()); ASSERT_THAT(inferred_shape_error1.status().message(), HasSubstr("Operands to select must be the same shape")); const absl::StatusOr<Shape> inferred_shape_error2 = ShapeInference::InferTernaryOpShape(HloOpcode::kSelect, s32_, matrix_64_48_, matrix_64_48_); ASSERT_FALSE(inferred_shape_error2.ok()); ASSERT_THAT(inferred_shape_error2.status().message(), HasSubstr("pred operand must have PRED")); const absl::StatusOr<Shape> inferred_shape_error3 = ShapeInference::InferTernaryOpShape(HloOpcode::kSelect, ShapeUtil::MakeShape(PRED, {64}), matrix_64_48_, matrix_64_48_); ASSERT_FALSE(inferred_shape_error3.ok()); ASSERT_THAT( inferred_shape_error3.status().message(), HasSubstr("Operands to select and predicate must be the same shape")); const absl::StatusOr<Shape> inferred_shape_error4 = ShapeInference::InferTernaryOpShape( HloOpcode::kSelect, ShapeUtil::MakeTupleShape({pred_, pred_}), ShapeUtil::MakeTupleShape({f32_, f32_}), ShapeUtil::MakeTupleShape({f32_, f32_})); ASSERT_FALSE(inferred_shape_error4.ok()); ASSERT_THAT(inferred_shape_error4.status().message(), HasSubstr("Expected array argument for select pred")); } TEST_F(ShapeInferenceTest, ClampAllMatrix) { const absl::StatusOr<Shape> inferred_shape = ShapeInference::InferTernaryOpShape(HloOpcode::kClamp, matrix_64_48_, matrix_64_48_, matrix_64_48_); ASSERT_IS_OK(inferred_shape.status()); ASSERT_TRUE(ShapeUtil::Equal(matrix_64_48_, *inferred_shape)); } TEST_F(ShapeInferenceTest, ClampAllScalar) { const absl::StatusOr<Shape> inferred_shape = ShapeInference::InferTernaryOpShape(HloOpcode::kClamp, f32_, f32_, f32_); ASSERT_IS_OK(inferred_shape.status()); ASSERT_TRUE(ShapeUtil::Equal(f32_, *inferred_shape)); } TEST_F(ShapeInferenceTest, ClampMinScalar) { const absl::StatusOr<Shape> inferred_shape = ShapeInference::InferTernaryOpShape(HloOpcode::kClamp, f32_, matrix_64_48_, matrix_64_48_); ASSERT_IS_OK(inferred_shape.status()); ASSERT_TRUE(ShapeUtil::Equal(matrix_64_48_, *inferred_shape)); } TEST_F(ShapeInferenceTest, ClampMaxScalar) { const absl::StatusOr<Shape> inferred_shape = ShapeInference::InferTernaryOpShape(HloOpcode::kClamp, matrix_64_48_, matrix_64_48_, f32_); ASSERT_IS_OK(inferred_shape.status()); ASSERT_TRUE(ShapeUtil::Equal(matrix_64_48_, *inferred_shape)); } TEST_F(ShapeInferenceTest, ClampOperandScalar) { const absl::StatusOr<Shape> inferred_shape = ShapeInference::InferTernaryOpShape(HloOpcode::kClamp, matrix_64_48_, f32_, matrix_64_48_); ASSERT_FALSE(inferred_shape.ok()); ASSERT_THAT(inferred_shape.status().message(), HasSubstr("Clamp with incompatible shapes")); } TEST_F(ShapeInferenceTest, ClampMinMatrix) { const absl::StatusOr<Shape> inferred_shape = ShapeInference::InferTernaryOpShape(HloOpcode::kClamp, matrix_64_48_, f32_, f32_); ASSERT_FALSE(inferred_shape.ok()); ASSERT_THAT(inferred_shape.status().message(), HasSubstr("Clamp with incompatible shapes")); } TEST_F(ShapeInferenceTest, ClampMaxMatrix) { const absl::StatusOr<Shape> inferred_shape = ShapeInference::InferTernaryOpShape(HloOpcode::kClamp, f32_, f32_, matrix_64_48_); ASSERT_FALSE(inferred_shape.ok()); ASSERT_THAT(inferred_shape.status().message(), HasSubstr("Clamp with incompatible shapes")); } TEST_F(ShapeInferenceTest, ClampOperandMatrix) { const absl::StatusOr<Shape> inferred_shape = ShapeInference::InferTernaryOpShape(HloOpcode::kClamp, f32_, matrix_64_48_, f32_); ASSERT_IS_OK(inferred_shape.status()); ASSERT_TRUE(ShapeUtil::Equal(matrix_64_48_, *inferred_shape)); } TEST_F(ShapeInferenceTest, ClampBadShapes) { ASSERT_FALSE( ShapeInference::InferTernaryOpShape(HloOpcode::kClamp, s32_, f32_, f32_) .ok()); ASSERT_FALSE( ShapeInference::InferTernaryOpShape(HloOpcode::kClamp, f32_, s32_, f32_) .ok()); ASSERT_FALSE( ShapeInference::InferTernaryOpShape(HloOpcode::kClamp, f32_, f32_, s32_) .ok()); ASSERT_FALSE(ShapeInference::InferTernaryOpShape( HloOpcode::kClamp, vector_64_, vector_32_, vector_32_) .ok()); ASSERT_FALSE(ShapeInference::InferTernaryOpShape( HloOpcode::kClamp, vector_32_, vector_64_, vector_32_) .ok()); ASSERT_FALSE(ShapeInference::InferTernaryOpShape( HloOpcode::kClamp, vector_32_, vector_32_, vector_64_) .ok()); ASSERT_FALSE(ShapeInference::InferTernaryOpShape(HloOpcode::kClamp, vector_64_, vector_32_, f32_) .ok()); ASSERT_FALSE(ShapeInference::InferTernaryOpShape(HloOpcode::kClamp, vector_64_, f32_, vector_32_) .ok()); ASSERT_FALSE(ShapeInference::InferTernaryOpShape(HloOpcode::kClamp, f32_, vector_64_, vector_32_) .ok()); } TEST_F(ShapeInferenceTest, Complex) { const auto complex_shape = [&](const Shape& lhs, const Shape& rhs, absl::Span<const int64_t> bcast) { return ShapeInference::InferBinaryOpShape(HloOpcode::kComplex, lhs, rhs, bcast); }; ASSERT_FALSE(complex_shape(s32_, s32_, {}).ok()); ASSERT_FALSE(complex_shape(pred_, pred_, {}).ok()); ASSERT_FALSE(complex_shape(f32_, f64_, {}).ok()); ASSERT_FALSE(complex_shape(f16_, f16_, {}).ok()); const Shape c64_32 = ShapeUtil::MakeShape(C64, {32}); TF_ASSERT_OK_AND_ASSIGN(Shape result, complex_shape(f32_, f32_, {})); ASSERT_TRUE(ShapeUtil::Equal(result, ShapeUtil::MakeShape(C64, {}))); TF_ASSERT_OK_AND_ASSIGN(result, complex_shape(vector_32_, f32_, {})); ASSERT_TRUE(ShapeUtil::Equal(result, c64_32)); TF_ASSERT_OK_AND_ASSIGN(result, complex_shape(f32_, vector_32_, {})); ASSERT_TRUE(ShapeUtil::Equal(result, c64_32)); TF_ASSERT_OK_AND_ASSIGN(result, complex_shape(vector_32_, f32_, {})); ASSERT_TRUE(ShapeUtil::Equal(result, c64_32)); const Shape c64_32_64 = ShapeUtil::MakeShape(C64, {32, 64}); TF_ASSERT_OK_AND_ASSIGN(result, complex_shape(vector_64_, matrix_32_64_, {1})); ASSERT_TRUE(ShapeUtil::Equal(result, c64_32_64)); TF_ASSERT_OK_AND_ASSIGN(result, complex_shape(matrix_32_64_, vector_64_, {1})); ASSERT_TRUE(ShapeUtil::Equal(result, c64_32_64)); TF_ASSERT_OK_AND_ASSIGN(result, complex_shape(matrix_32_64_, matrix_32_64_, {})); ASSERT_TRUE(ShapeUtil::Equal(result, c64_32_64)); TF_ASSERT_OK_AND_ASSIGN(result, complex_shape(matrix_32_64_, f32_, {})); ASSERT_TRUE(ShapeUtil::Equal(result, c64_32_64)); TF_ASSERT_OK_AND_ASSIGN(result, complex_shape(f64_, f64_, {})); ASSERT_TRUE(ShapeUtil::Equal(result, ShapeUtil::MakeShape(C128, {}))); } TEST_F(ShapeInferenceTest, VariadicOpTuplify) { const absl::StatusOr<Shape> result = ShapeInference::InferVariadicOpShape(HloOpcode::kTuple, {&s32_, &f32_}); ASSERT_IS_OK(result.status()); ASSERT_TRUE( ShapeUtil::Equal(*result, ShapeUtil::MakeTupleShape({s32_, f32_}))); } TEST_F(ShapeInferenceTest, ReduceWindowInHalf) { const Shape matrix_shape = ShapeUtil::MakeShape(F32, {8, 8}); Window window; WindowDimension dim; dim.set_size(2); dim.set_stride(2); dim.set_padding_low(0); dim.set_padding_high(0); dim.set_window_dilation(1); dim.set_base_dilation(1); *window.add_dimensions() = dim; *window.add_dimensions() = dim; const Shape window_shape = ShapeUtil::MakeShape(F32, {2, 2}); const Shape init_value_shape = ShapeUtil::MakeShape(F32, {}); const Shape float_scalar = ShapeUtil::MakeShape(F32, {}); ProgramShape to_apply = ShapeUtil::MakeProgramShape( {ShapeUtil::MakeShape(F32, {}), ShapeUtil::MakeShape(F32, {})}, f32_); const absl::StatusOr<Shape> inferred_shape = ShapeInference::InferReduceWindowShape(matrix_shape, init_value_shape, window, to_apply); ASSERT_IS_OK(inferred_shape.status()); ASSERT_TRUE( ShapeUtil::Equal(ShapeUtil::MakeShape(F32, {4, 4}), *inferred_shape)); } TEST_F(SelectAndScatterShapeInferenceTest, SelectAndScatterProperShapes) { const absl::StatusOr<Shape> inferred_shape_ok = ShapeInference::InferSelectAndScatterShape( operand_shape_, select_program_shape_, window_, source_shape_, init_value_shape_, scatter_program_shape_); ASSERT_IS_OK(inferred_shape_ok.status()); ASSERT_TRUE(ShapeUtil::Equal(operand_shape_, *inferred_shape_ok)); } TEST_F(SelectAndScatterShapeInferenceTest, SelectAndScatterWrongSourceShape) { const Shape source_shape_fail = ShapeUtil::MakeShape(F32, {4, 6}); const absl::StatusOr<Shape> inferred_shape_fail = ShapeInference::InferSelectAndScatterShape( operand_shape_, select_program_shape_, window_, source_shape_fail, init_value_shape_, scatter_program_shape_); ASSERT_FALSE(inferred_shape_fail.ok()); ASSERT_THAT(inferred_shape_fail.status().message(), HasSubstr("Source shape does not match")); } TEST_F(SelectAndScatterShapeInferenceTest, SelectAndScatterWrongSelectShape1) { ProgramShape select_program_shape_fail = ShapeUtil::MakeProgramShape({ShapeUtil::MakeShape(F32, {})}, pred_); const absl::StatusOr<Shape> inferred_shape_fail = ShapeInference::InferSelectAndScatterShape( operand_shape_, select_program_shape_fail, window_, source_shape_, init_value_shape_, scatter_program_shape_); ASSERT_FALSE(inferred_shape_fail.ok()); ASSERT_THAT(inferred_shape_fail.status().message(), HasSubstr("Select function must take 2 parameters")); } TEST_F(SelectAndScatterShapeInferenceTest, SelectAndScatterWrongSelectShape2) { ProgramShape select_program_shape_fail = ShapeUtil::MakeProgramShape( {ShapeUtil::MakeShape(F32, {}), ShapeUtil::MakeShape(F32, {})}, f32_); const absl::StatusOr<Shape> inferred_shape_fail = ShapeInference::InferSelectAndScatterShape( operand_shape_, select_program_shape_fail, window_, source_shape_, init_value_shape_, scatter_program_shape_); ASSERT_FALSE(inferred_shape_fail.ok()); ASSERT_THAT(inferred_shape_fail.status().message(), HasSubstr("Select function must have rank-0 PRED")); } TEST_F(SelectAndScatterShapeInferenceTest, SelectAndScatterWrongSelectShape3) { ProgramShape select_program_shape_fail = ShapeUtil::MakeProgramShape( {ShapeUtil::MakeShape(S32, {}), ShapeUtil::MakeShape(F32, {})}, pred_); const absl::StatusOr<Shape> inferred_shape_fail = ShapeInference::InferSelectAndScatterShape( operand_shape_, select_program_shape_fail, window_, source_shape_, init_value_shape_, scatter_program_shape_); ASSERT_FALSE(inferred_shape_fail.ok()); ASSERT_THAT(inferred_shape_fail.status().message(), HasSubstr("Select function's first parameter")); } TEST_F(SelectAndScatterShapeInferenceTest, SelectAndScatterWrongSelectShape4) { ProgramShape select_program_shape_fail = ShapeUtil::MakeProgramShape( {ShapeUtil::MakeShape(F32, {}), ShapeUtil::MakeShape(U32, {})}, pred_); const absl::StatusOr<Shape> inferred_shape_fail = ShapeInference::InferSelectAndScatterShape( operand_shape_, select_program_shape_fail, window_, source_shape_, init_value_shape_, scatter_program_shape_); ASSERT_FALSE(inferred_shape_fail.ok()); ASSERT_THAT(inferred_shape_fail.status().message(), HasSubstr("Select function's second parameter")); } TEST_F(ShapeInferenceTest, AllGatherStart) { const Shape operand = ShapeUtil::MakeShape(F32, {1, 8, 4}); const Shape expected_shape = ShapeUtil::MakeTupleShape( {operand, ShapeUtil::MakeShape(F32, {8, 8, 4})}); const absl::StatusOr<Shape> inferred_ag_shape = ShapeInference::InferAllGatherStartShape( {&operand}, 0, 8); EXPECT_TRUE(inferred_ag_shape.ok()); EXPECT_TRUE(ShapeUtil::Equal(*inferred_ag_shape, expected_shape)); } TEST_F(ShapeInferenceTest, AllGatherStartMultiOperand) { const Shape operand0 = ShapeUtil::MakeShape(F32, {1, 8, 4}); const Shape operand1 = ShapeUtil::MakeShape(BF16, {1, 5}); const Shape expected_output0_shape = ShapeUtil::MakeShape(F32, {8, 8, 4}); const Shape expected_output1_shape = ShapeUtil::MakeShape(BF16, {8, 5}); const Shape expected_shape = ShapeUtil::MakeTupleShape( { ShapeUtil::MakeTupleShape({operand0, operand1}), ShapeUtil::MakeTupleShape( {expected_output0_shape, expected_output1_shape})}); const absl::StatusOr<Shape> inferred_ag_shape = ShapeInference::InferAllGatherStartShape({&operand0, &operand1}, 0, 8); EXPECT_TRUE(inferred_ag_shape.ok()); EXPECT_TRUE(ShapeUtil::Equal(*inferred_ag_shape, expected_shape)); } TEST_F(ShapeInferenceTest, AllGatherDone) { const Shape input_shape = ShapeUtil::MakeTupleShape({ShapeUtil::MakeShape(F32, {1, 8, 4}), ShapeUtil::MakeShape(F32, {8, 8, 4})}); const Shape expected_shape = ShapeUtil::MakeShape(F32, {8, 8, 4}); const absl::StatusOr<Shape> inferred_ag_done_shape = ShapeInference::InferAllGatherDoneShape(input_shape); EXPECT_TRUE(inferred_ag_done_shape.ok()); EXPECT_TRUE(ShapeUtil::Equal(*inferred_ag_done_shape, expected_shape)); } TEST_F(ShapeInferenceTest, AllGatherDoneMultiOperand) { const Shape operand0 = ShapeUtil::MakeShape(F32, {1, 8, 4}); const Shape operand1 = ShapeUtil::MakeShape(BF16, {1, 5}); const Shape expected_output0_shape = ShapeUtil::MakeShape(F32, {8, 8, 4}); const Shape expected_output1_shape = ShapeUtil::MakeShape(BF16, {8, 5}); const Shape input_shape = ShapeUtil::MakeTupleShape( { ShapeUtil::MakeTupleShape({operand0, operand1}), ShapeUtil::MakeTupleShape( {expected_output0_shape, expected_output1_shape})}); const Shape expected_shape = ShapeUtil::MakeTupleShape( {expected_output0_shape, expected_output1_shape}); const absl::StatusOr<Shape> inferred_ag_done_shape = ShapeInference::InferAllGatherDoneShape(input_shape); EXPECT_TRUE(inferred_ag_done_shape.ok()); EXPECT_TRUE(ShapeUtil::Equal(*inferred_ag_done_shape, expected_shape)); } TEST_F(ShapeInferenceTest, Convolve) { ConvolutionDimensionNumbers dnums; const Shape lhs_shape = ShapeUtil::MakeShape(F32, {10, 11, 3, 4}); dnums.set_input_batch_dimension(0); dnums.set_output_batch_dimension(0); dnums.set_input_feature_dimension(1); dnums.set_output_feature_dimension(1); dnums.add_input_spatial_dimensions(2); dnums.add_output_spatial_dimensions(2); dnums.add_input_spatial_dimensions(3); dnums.add_output_spatial_dimensions(3); const Shape rhs_shape = ShapeUtil::MakeShape(F32, {2, 12, 11, 3}); dnums.set_kernel_input_feature_dimension(2); dnums.set_kernel_output_feature_dimension(1); dnums.add_kernel_spatial_dimensions(3); dnums.add_kernel_spatial_dimensions(0); Window window; const auto dim0 = window.add_dimensions(); const auto dim1 = window.add_dimensions(); dim0->set_size(3); dim0->set_stride(2); dim0->set_padding_low(1); dim0->set_padding_high(1); dim0->set_window_dilation(1); dim0->set_base_dilation(1); dim1->set_size(2); dim1->set_stride(1); dim1->set_padding_low(0); dim1->set_padding_high(0); dim1->set_window_dilation(1); dim1->set_base_dilation(1); const absl::StatusOr<Shape> inferred_shape = ShapeInference::InferConvolveShape( lhs_shape, rhs_shape, 1, 1, window, dnums, std::nullopt); ASSERT_IS_OK(inferred_shape.status()); ASSERT_TRUE(ShapeUtil::Equal(ShapeUtil::MakeShape(F32, {10, 12, 2, 3}), *inferred_shape)); } TEST_F(ShapeInferenceTest, ConvolveWithWindowDilation) { ConvolutionDimensionNumbers dnums; const Shape lhs_shape = ShapeUtil::MakeShape(F32, {10, 11, 103, 4}); dnums.set_input_batch_dimension(0); dnums.set_output_batch_dimension(0); dnums.set_input_feature_dimension(1); dnums.set_output_feature_dimension(1); dnums.add_input_spatial_dimensions(2); dnums.add_output_spatial_dimensions(2); dnums.add_input_spatial_dimensions(3); dnums.add_output_spatial_dimensions(3); const Shape rhs_shape = ShapeUtil::MakeShape(F32, {2, 12, 11, 3}); dnums.set_kernel_input_feature_dimension(2); dnums.set_kernel_output_feature_dimension(1); dnums.add_kernel_spatial_dimensions(3); dnums.add_kernel_spatial_dimensions(0); Window window; const auto dim0 = window.add_dimensions(); dim0->set_size(3); dim0->set_stride(3); dim0->set_padding_low(0); dim0->set_padding_high(0); dim0->set_window_dilation(6); dim0->set_base_dilation(1); const auto dim1 = window.add_dimensions(); dim1->set_size(2); dim1->set_stride(1); dim1->set_padding_low(2); dim1->set_padding_high(1); dim1->set_window_dilation(2); dim1->set_base_dilation(1); const absl::StatusOr<Shape> inferred_shape = ShapeInference::InferConvolveShape( lhs_shape, rhs_shape, 1, 1, window, dnums, std::nullopt); ASSERT_IS_OK(inferred_shape.status()); ASSERT_TRUE(ShapeUtil::Equal(ShapeUtil::MakeShape(F32, {10, 12, 31, 5}), *inferred_shape)); } TEST_F(ShapeInferenceTest, ConvolveWithBaseDilation) { ConvolutionDimensionNumbers dnums; const Shape lhs_shape = ShapeUtil::MakeShape(F32, {10, 11, 3, 4}); dnums.set_input_batch_dimension(0); dnums.set_output_batch_dimension(0); dnums.set_input_feature_dimension(1); dnums.set_output_feature_dimension(1); dnums.add_input_spatial_dimensions(2); dnums.add_output_spatial_dimensions(2); dnums.add_input_spatial_dimensions(3); dnums.add_output_spatial_dimensions(3); const Shape rhs_shape = ShapeUtil::MakeShape(F32, {2, 12, 11, 4}); dnums.set_kernel_input_feature_dimension(2); dnums.set_kernel_output_feature_dimension(1); dnums.add_kernel_spatial_dimensions(3); dnums.add_kernel_spatial_dimensions(0); Window window; const auto dim0 = window.add_dimensions(); dim0->set_size(4); dim0->set_stride(3); dim0->set_padding_low(0); dim0->set_padding_high(0); dim0->set_window_dilation(1); dim0->set_base_dilation(6); const auto dim1 = window.add_dimensions(); dim1->set_size(2); dim1->set_stride(1); dim1->set_padding_low(2); dim1->set_padding_high(1); dim1->set_window_dilation(1); dim1->set_base_dilation(2); const absl::StatusOr<Shape> inferred_shape = ShapeInference::InferConvolveShape( lhs_shape, rhs_shape, 1, 1, window, dnums, std::nullopt); ASSERT_IS_OK(inferred_shape.status()); ASSERT_TRUE(ShapeUtil::Equal(ShapeUtil::MakeShape(F32, {10, 12, 4, 9}), *inferred_shape)); } TEST_F(ShapeInferenceTest, ConvolveDimensionNumbersOverlapError) { const Shape lhs_shape = ShapeUtil::MakeShape(F32, {10, 11, 3, 4}); const Shape rhs_shape = ShapeUtil::MakeShape(F32, {12, 11, 3, 2}); ConvolutionDimensionNumbers dnums; dnums.set_input_batch_dimension(3); dnums.set_output_batch_dimension(3); dnums.set_input_feature_dimension(2); dnums.set_output_feature_dimension(2); dnums.add_input_spatial_dimensions(0); dnums.add_output_spatial_dimensions(0); dnums.add_input_spatial_dimensions(1); dnums.add_output_spatial_dimensions(1); dnums.set_kernel_input_feature_dimension(0); dnums.set_kernel_output_feature_dimension(3); dnums.add_kernel_spatial_dimensions(0); dnums.add_kernel_spatial_dimensions(1); Window window; const auto dim0 = window.add_dimensions(); const auto dim1 = window.add_dimensions(); dim0->set_size(2); dim0->set_stride(1); dim0->set_padding_low(0); dim0->set_padding_high(0); dim1->set_size(3); dim1->set_stride(2); dim1->set_padding_low(1); dim1->set_padding_high(1); const absl::StatusOr<Shape> inferred_shape = ShapeInference::InferConvolveShape( lhs_shape, rhs_shape, 1, 1, window, dnums, std::nullopt); ASSERT_FALSE(inferred_shape.ok()); ASSERT_THAT(inferred_shape.status().message(), HasSubstr("each dimension exactly once")); } TEST_F(ShapeInferenceTest, ConvolveBatchGroupCountUnequalOutputFeature) { ConvolutionDimensionNumbers dnums; dnums.set_input_batch_dimension(0); dnums.set_input_feature_dimension(1); dnums.add_input_spatial_dimensions(2); dnums.add_input_spatial_dimensions(3); dnums.set_kernel_input_feature_dimension(0); dnums.set_kernel_output_feature_dimension(1); dnums.add_kernel_spatial_dimensions(2); dnums.add_kernel_spatial_dimensions(3); dnums.set_output_batch_dimension(0); dnums.set_output_feature_dimension(1); dnums.add_output_spatial_dimensions(2); dnums.add_output_spatial_dimensions(3); const Shape lhs_shape = ShapeUtil::MakeShape(F32, {60, 38, 17, 13}); const Shape rhs_shape = ShapeUtil::MakeShape(F32, {38, 10, 4, 4}); Window window; const auto dim0 = window.add_dimensions(); const auto dim1 = window.add_dimensions(); dim0->set_size(4); dim1->set_size(4); dim0->set_padding_low(0); dim0->set_padding_high(2); dim1->set_padding_low(2); dim1->set_padding_high(1); dim0->set_stride(1); dim1->set_stride(1); dim0->set_window_dilation(3); dim1->set_window_dilation(2); const absl::StatusOr<Shape> inferred_shape = ShapeInference::InferConvolveShape( lhs_shape, rhs_shape, 1, 6, window, dnums, std::nullopt); ASSERT_FALSE(inferred_shape.ok()); ASSERT_THAT(inferred_shape.status().message(), HasSubstr("to be a multiple of batch group count")); } struct ConvolveArgs { Shape lhs_shape; Shape rhs_shape; ConvolutionDimensionNumbers dnums; Window window; }; ConvolveArgs MakeConvolveArgs(PrimitiveType lhs_type, PrimitiveType rhs_type) { ConvolveArgs args; ConvolutionDimensionNumbers& dnums = args.dnums; args.lhs_shape = ShapeUtil::MakeShape(lhs_type, {10, 11, 3, 4}); dnums.set_input_batch_dimension(0); dnums.set_output_batch_dimension(0); dnums.set_input_feature_dimension(1); dnums.set_output_feature_dimension(1); dnums.add_input_spatial_dimensions(2); dnums.add_output_spatial_dimensions(2); dnums.add_input_spatial_dimensions(3); dnums.add_output_spatial_dimensions(3); args.rhs_shape = ShapeUtil::MakeShape(rhs_type, {2, 12, 11, 3}); dnums.set_kernel_input_feature_dimension(2); dnums.set_kernel_output_feature_dimension(1); dnums.add_kernel_spatial_dimensions(3); dnums.add_kernel_spatial_dimensions(0); auto dim0 = args.window.add_dimensions(); auto dim1 = args.window.add_dimensions(); dim0->set_size(3); dim0->set_stride(2); dim0->set_padding_low(1); dim0->set_padding_high(1); dim0->set_window_dilation(1); dim0->set_base_dilation(1); dim1
1,065
cpp
tensorflow/tensorflow
pjrt_tensor_buffer_util
tensorflow/compiler/jit/pjrt_tensor_buffer_util.cc
tensorflow/compiler/jit/pjrt_tensor_buffer_util_test.cc
#ifndef TENSORFLOW_COMPILER_JIT_PJRT_TENSOR_BUFFER_UTIL_H_ #define TENSORFLOW_COMPILER_JIT_PJRT_TENSOR_BUFFER_UTIL_H_ #include <memory> #include "absl/status/statusor.h" #include "tensorflow/compiler/jit/pjrt_tensor_buffer.h" #include "xla/pjrt/pjrt_client.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" namespace tensorflow { absl::StatusOr<Tensor> MakeTensorFromPjRtBuffer( DataType dtype, const TensorShape& shape, std::unique_ptr<xla::PjRtBuffer> pjrt_buffer); class PjRtTensorBufferUtil { public: static absl::Status UpdateOrMakeTensorWithPjRtBuffer( DataType dtype, const TensorShape& shape, std::unique_ptr<xla::PjRtBuffer> pjrt_buffer, Tensor* output_tensor); }; } #endif #include "tensorflow/compiler/jit/pjrt_tensor_buffer_util.h" #include <cstddef> #include <memory> #include <utility> #include "absl/status/status.h" #include "absl/status/statusor.h" #include "tensorflow/compiler/jit/pjrt_tensor_buffer.h" #include "xla/pjrt/pjrt_client.h" #include "tensorflow/core/common_runtime/dma_helper.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/types.h" #include "tsl/platform/statusor.h" namespace tensorflow { static size_t GetTensorSize(const TensorShape& shape, const DataType dtype) { return shape.num_elements() * DataTypeSize(dtype); } absl::StatusOr<Tensor> MakeTensorFromPjRtBuffer( const DataType dtype, const TensorShape& shape, std::unique_ptr<xla::PjRtBuffer> pjrt_buffer) { TF_ASSIGN_OR_RETURN(std::unique_ptr<xla::PjRtBuffer::ExternalReference> ref, pjrt_buffer->AcquireExternalReference()); auto* tensor_buffer = new PjRtTensorBuffer(ref->OpaqueDeviceMemoryDataPointer(), GetTensorSize(shape, dtype), std::move(pjrt_buffer)); Tensor result(dtype, shape, tensor_buffer); tensor_buffer->Unref(); return result; } static bool ShouldReuseTensor(void* opaque_device_memory, const size_t expected_size, const Tensor* existing_tensor) { const PjRtTensorBuffer* input_pjrt_tensor_buffer = dynamic_cast<const PjRtTensorBuffer*>(DMAHelper::buffer(existing_tensor)); if (input_pjrt_tensor_buffer != nullptr) { return false; } const size_t current_size = GetTensorSize(existing_tensor->shape(), existing_tensor->dtype()); return existing_tensor->tensor_data().data() == opaque_device_memory && current_size == expected_size; } absl::Status PjRtTensorBufferUtil::UpdateOrMakeTensorWithPjRtBuffer( const DataType dtype, const TensorShape& shape, std::unique_ptr<xla::PjRtBuffer> pjrt_buffer, Tensor* output_tensor) { TF_ASSIGN_OR_RETURN(std::unique_ptr<xla::PjRtBuffer::ExternalReference> ref, pjrt_buffer->AcquireExternalReference()); const size_t expected_size = GetTensorSize(shape, dtype); void* opaque_device_memory = ref->OpaqueDeviceMemoryDataPointer(); auto* tensor_buffer = new PjRtTensorBuffer( opaque_device_memory, expected_size, std::move(pjrt_buffer)); if (ShouldReuseTensor(opaque_device_memory, expected_size, output_tensor)) { output_tensor->buf_ = tensor_buffer; return absl::OkStatus(); } Tensor result(dtype, shape, tensor_buffer); tensor_buffer->Unref(); *output_tensor = result; return absl::OkStatus(); } }
#include "tensorflow/compiler/jit/pjrt_tensor_buffer_util.h" #include <cstdint> #include <optional> #include <utility> #include <vector> #include <gtest/gtest.h> #include "tensorflow/compiler/jit/test_util.h" #include "xla/pjrt/pjrt_client.h" #include "xla/shape.h" #include "xla/shape_util.h" #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/device.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/tfrt/common/pjrt_util.h" #include "tsl/platform/statusor.h" namespace tensorflow { namespace { TEST(PjRtTensorBufferUtilTest, MakeTensorFromPjRtBuffer) { DeviceSetup device_setup; device_setup.AddDevicesAndSetUp({DEVICE_GPU}); Device* device = device_setup.GetDevice(DEVICE_GPU); std::vector<int64_t> dimensions = {2, 3}; Tensor dest_cpu_tensor(cpu_allocator(), tensorflow::DT_INT32, tensorflow::TensorShape(dimensions)); TF_ASSERT_OK_AND_ASSIGN(auto pjrt_client, GetPjRtClient(DEVICE_GPU)); std::vector<int32_t> data{1, 2, 3, 4, 5, 6}; xla::Shape xla_shape = xla::ShapeUtil::MakeShape(xla::S32, dimensions); TF_ASSERT_OK_AND_ASSIGN( auto pjrt_buffer, pjrt_client->BufferFromHostBuffer( data.data(), xla_shape.element_type(), xla_shape.dimensions(), std::nullopt, xla::PjRtClient::HostBufferSemantics::kImmutableOnlyDuringCall, nullptr, pjrt_client->addressable_devices()[0])); TF_ASSERT_OK_AND_ASSIGN( Tensor tensor, MakeTensorFromPjRtBuffer(DT_INT32, TensorShape(dimensions), std::move(pjrt_buffer))); auto s = device->tensorflow_accelerator_device_info() ->pjrt_context->CopyDeviceTensorToCPUSync(&tensor, "", device, &dest_cpu_tensor); for (int i = 0; i < tensor.NumElements(); ++i) { EXPECT_EQ(dest_cpu_tensor.flat<int32_t>().data()[i], data[i]); } } } }
1,066
cpp
tensorflow/tensorflow
encapsulate_xla_computations_pass
tensorflow/compiler/jit/encapsulate_xla_computations_pass.cc
tensorflow/compiler/jit/encapsulate_xla_computations_pass_test.cc
#ifndef TENSORFLOW_COMPILER_JIT_ENCAPSULATE_XLA_COMPUTATIONS_PASS_H_ #define TENSORFLOW_COMPILER_JIT_ENCAPSULATE_XLA_COMPUTATIONS_PASS_H_ #include <functional> #include <string> #include "tensorflow/core/common_runtime/optimization_registry.h" #include "tensorflow/core/graph/graph.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/statusor.h" namespace tensorflow { class EncapsulateXlaComputationsPass : public GraphOptimizationPass { public: Status Run(const GraphOptimizationPassOptions& options) override; static Status Encapsulate(std::unique_ptr<Graph>* graph, FunctionLibraryDefinition* flib_def); static Status BuildXlaLaunchOps(Graph* graph); struct XlaFunctionInfo { int variable_start_index = -1; std::string function_name; }; static Status BuildXlaLaunchOps( Graph* graph, const std::function<absl::StatusOr<bool>(const Node&)>& is_xla_launch_node, const std::function<absl::StatusOr<XlaFunctionInfo>(const Node&)>& get_xla_function_info, bool add_edges_to_output_of_downstream_nodes); }; } #endif #include "tensorflow/compiler/jit/encapsulate_xla_computations_pass.h" #include <functional> #include <string> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_set.h" #include "absl/memory/memory.h" #include "absl/strings/ascii.h" #include "absl/strings/str_cat.h" #include "tensorflow/compiler/jit/defs.h" #include "tensorflow/compiler/jit/encapsulate_subgraphs_pass.h" #include "tensorflow/compiler/jit/xla_cluster_util.h" #include "xla/status_macros.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/graph/graph_node_util.h" #include "tensorflow/core/lib/core/stringpiece.h" #include "tensorflow/core/lib/hash/hash.h" #include "tensorflow/core/lib/strings/proto_serialization.h" #include "tensorflow/core/lib/strings/str_util.h" #include "tensorflow/core/platform/fingerprint.h" #include "tensorflow/core/platform/statusor.h" #include "tensorflow/core/util/dump_graph.h" namespace tensorflow { namespace { const char* const kXlaClusterOutput = "XlaClusterOutput"; bool IsCpuGpuCompile(const Graph* graph) { for (Node* n : graph->nodes()) { string name; if (!TryGetNodeAttr(n->attrs(), kXlaClusterIdAttr, &name)) continue; DeviceNameUtils::ParsedName parsed; if (DeviceNameUtils::ParseFullName(n->requested_device(), &parsed)) { if (parsed.type != DEVICE_CPU && parsed.type != DEVICE_GPU) { return false; } } } return true; } bool is_guaranteed_constant(const Node& n) { bool guaranteed_constant = false; if (!TryGetNodeAttr(n.attrs(), "_is_guaranteed_constant", &guaranteed_constant)) { return false; } return guaranteed_constant; } Status GetIndexAttr(const Node& n, int num_args, int* index) { TF_RETURN_IF_ERROR(GetNodeAttr(n.attrs(), "index", index)); if (*index < 0 || *index >= num_args) { return errors::InvalidArgument("Invalid ", n.type_string(), " number ", *index); } return absl::OkStatus(); } DataType EdgeType(const Edge* edge) { return edge->dst()->input_type(edge->dst_input()); } void AddControlInputs(const Node& node, absl::flat_hash_set<Node*>* deps) { for (const Edge* edge : node.in_edges()) { if (edge->IsControlEdge()) { deps->insert(edge->src()); } } } void AddControlOutputs(const Node& node, absl::flat_hash_set<Node*>* deps) { for (const Edge* edge : node.out_edges()) { if (edge->IsControlEdge()) { deps->insert(edge->dst()); } } } Status RewriteSubgraph(const std::vector<OutputTensor>& arg_source_tensors, std::unique_ptr<Graph>* graph_ptr, std::vector<int>* input_permutation, std::vector<int>* output_permutation, NodeDef* call_def) { Graph* graph = graph_ptr->get(); const int num_args = input_permutation->size(); const int num_retvals = output_permutation->size(); std::vector<Node*> args; std::vector<Node*> retvals; args.reserve(num_args); retvals.reserve(num_retvals); for (Node* n : graph->nodes()) { if (n->type_string() == "_Arg") { if (is_guaranteed_constant(*n)) { return errors::InvalidArgument( "Guaranteed constants are not supported (", n->name(), ")"); } args.push_back(n); } else if (n->type_string() == "_Retval") { retvals.push_back(n); } } if (std::find(args.begin(), args.end(), nullptr) != args.end()) { return errors::InvalidArgument("Missing or non-consecutive arguments"); } std::sort(args.begin(), args.end(), [&](Node* a, Node* b) { bool a_is_resource = (a->output_type(0) == DT_RESOURCE); bool b_is_resource = (b->output_type(0) == DT_RESOURCE); StringPiece a_name(a->name()); StringPiece b_name(b->name()); return std::tie(a_is_resource, a_name) < std::tie(b_is_resource, b_name); }); std::sort(retvals.begin(), retvals.end(), [](Node* a, Node* b) { return a->name() < b->name(); }); int variable_start_index = num_args; for (int i = 0; i < num_args; ++i) { int index; TF_RETURN_IF_ERROR(GetIndexAttr(*args[i], num_args, &index)); if (args[i]->output_type(0) == DT_RESOURCE && variable_start_index == num_args) { variable_start_index = i; } (*input_permutation)[index] = i; args[i]->AddAttr("index", i); } VLOG(4) << "variable_start_index: " << variable_start_index; for (int i = 0; i < num_retvals; ++i) { int index; TF_RETURN_IF_ERROR(GetIndexAttr(*retvals[i], num_retvals, &index)); (*output_permutation)[index] = i; retvals[i]->AddAttr("index", i); } AddNodeAttr(kXlaClusterIdAttr, call_def->name(), call_def); AddNodeAttr("_variable_start_index", variable_start_index, call_def); TF_ASSIGN_OR_RETURN(uint64 fingerprint, FingerprintGraph(*graph)); VLOG(1) << "Subgraph fingerprint:" << fingerprint; call_def->set_op(absl::StrCat(call_def->op(), "_", fingerprint)); return absl::OkStatus(); } } Status EncapsulateXlaComputationsPass::Encapsulate( std::unique_ptr<Graph>* graph, FunctionLibraryDefinition* flib_def) { for (const Edge* e : (*graph)->edges()) { if (!e->IsControlEdge() && e->src()->attrs().Find(kXlaClusterIdAttr) != nullptr && e->dst()->attrs().Find(kXlaClusterIdAttr) == nullptr && e->dst()->type_string() != kXlaClusterOutput) { return errors::InvalidArgument( "Undeclared output of XLA computation. Some common causes of this " "error are: 1) variable initializers that depend on the XLA " "computation; 2) gradient computations that depend on the XLA " "computation, which can be mitigated by moving gradient computations " "inside XLA computation. Offending edge: ", e->src()->name(), ":", e->src_output(), " -> ", e->dst()->name(), ":", e->dst_input()); } } auto output = std::make_unique<Graph>((*graph)->op_registry()); TF_RETURN_WITH_CONTEXT_IF_ERROR( EncapsulateSubgraphsInFunctions( kXlaClusterIdAttr, **graph, RewriteSubgraph, true, &output, flib_def), "EncapsulateXlaComputationsPass failed"); graph->swap(output); return absl::OkStatus(); } Status EncapsulateXlaComputationsPass::BuildXlaLaunchOps( Graph* graph, const std::function<absl::StatusOr<bool>(const Node&)>& is_xla_launch_node, const std::function<absl::StatusOr<XlaFunctionInfo>(const Node&)>& get_xla_function_info, const bool add_edges_to_output_of_downstream_nodes) { std::vector<Node*> launch_nodes; for (Node* n : graph->nodes()) { TF_ASSIGN_OR_RETURN(const bool is_xla_launch_node, is_xla_launch_node(*n)); if (is_xla_launch_node) launch_nodes.push_back(n); } for (Node* launch : launch_nodes) { TF_ASSIGN_OR_RETURN(const XlaFunctionInfo xla_function_info, get_xla_function_info(*launch)); std::vector<const Edge*> in_edges; TF_RETURN_IF_ERROR(launch->input_edges(&in_edges)); const int num_inputs = in_edges.size(); const int variable_start_index = xla_function_info.variable_start_index; const int num_variables = num_inputs - variable_start_index; const int num_args = variable_start_index; VLOG(4) << "Launch node '" << launch->name() << "'" << " input edges: " << in_edges.size() << " num_args: " << num_args << " num_variables: " << num_variables; std::vector<Node*> nodes_to_remove = {launch}; std::vector<std::pair<Node*, int>> data_inputs(num_inputs); absl::flat_hash_set<Node*> control_inputs; DataTypeVector arg_types(num_args); AddControlInputs(*launch, &control_inputs); for (int i = 0; i < num_args; ++i) { const Edge* edge = in_edges[i]; data_inputs[i] = {edge->src(), edge->src_output()}; arg_types[i] = EdgeType(edge); } for (int i = 0; i < num_variables; ++i) { int pos = variable_start_index + i; const Edge* edge = in_edges[pos]; data_inputs[pos] = {edge->src(), edge->src_output()}; } const int num_outputs = launch->output_types().size(); absl::flat_hash_set<Node*> control_outputs; std::vector<std::vector<std::pair<Node*, int>>> data_outputs(num_outputs); const DataTypeVector& output_types(launch->output_types()); for (const Edge* le : launch->out_edges()) { if (le->IsControlEdge()) { control_outputs.insert(le->dst()); } else { TF_RET_CHECK(le->src_output() < num_outputs); Node* output_node = le->dst(); if (add_edges_to_output_of_downstream_nodes) { TF_RET_CHECK(output_node->type_string() == kXlaClusterOutput) << le->DebugString(); nodes_to_remove.push_back(output_node); for (const Edge* oe : output_node->out_edges()) { TF_RET_CHECK(!oe->IsControlEdge()); data_outputs[le->src_output()].push_back( {oe->dst(), oe->dst_input()}); } AddControlOutputs(*output_node, &control_outputs); } else { data_outputs[le->src_output()].push_back( {le->dst(), le->dst_input()}); } } } NodeDef def; def.set_name(launch->name()); MergeDebugInfo(NodeDebugInfo(launch->def()), &def); VLOG(2) << "Replacing with XlaLaunch"; VLOG(2) << "Device is " << launch->requested_device(); def.set_op("XlaLaunch"); def.set_device(launch->requested_device()); AddNodeAttr("Tconstants", DataTypeVector{}, &def); AddNodeAttr("Targs", arg_types, &def); AddNodeAttr("Nresources", num_variables, &def); AddNodeAttr("Tresults", output_types, &def); NameAttrList function; function.set_name(xla_function_info.function_name); AddNodeAttr("function", function, &def); for (Node* node : nodes_to_remove) { VLOG(2) << "Deleting node " << node->DebugString(); control_inputs.erase(node); control_outputs.erase(node); graph->RemoveNode(node); } TF_ASSIGN_OR_RETURN(Node * xla_launch, graph->AddNode(def)); for (int i = 0, end = data_inputs.size(); i < end; ++i) { graph->AddEdge(data_inputs[i].first, data_inputs[i].second, xla_launch, i); } for (Node* n : control_inputs) { graph->AddControlEdge(n, xla_launch); } for (int i = 0, end = data_outputs.size(); i < end; ++i) { for (const auto& successor : data_outputs[i]) { graph->AddEdge(xla_launch, i, successor.first, successor.second); } } for (Node* n : control_outputs) { graph->AddControlEdge(xla_launch, n); } } return absl::OkStatus(); } Status EncapsulateXlaComputationsPass::BuildXlaLaunchOps( Graph* graph) { const auto is_xla_launch_node = [](const Node& node) -> absl::StatusOr<bool> { const string& name = GetNodeAttrString(node.attrs(), kXlaClusterIdAttr); return !name.empty(); }; const auto get_xla_function_info = [](const Node& node) -> absl::StatusOr<XlaFunctionInfo> { XlaFunctionInfo result; TF_RETURN_IF_ERROR(GetNodeAttr(node.attrs(), "_variable_start_index", &result.variable_start_index)); result.function_name = node.type_string(); return result; }; return BuildXlaLaunchOps(graph, is_xla_launch_node, get_xla_function_info, true); } Status EncapsulateXlaComputationsPass::Run( const GraphOptimizationPassOptions& options) { VLOG(1) << "EncapsulateXlaComputations(): " << DumpGraphToFile("encapsulate_xla_computations_before", **options.graph, options.flib_def); const char* additional_help = IsCpuGpuCompile(options.graph->get()) ? xla::status_macros::kPossibleAutoJitAlternative : ""; TF_RETURN_WITH_CONTEXT_IF_ERROR(Encapsulate(options.graph, options.flib_def), additional_help); VLOG(1) << "EncapsulateXlaComputations() half-way: " << DumpGraphToFile("encapsulate_xla_computations_halfway", **options.graph, options.flib_def); TF_RETURN_WITH_CONTEXT_IF_ERROR(BuildXlaLaunchOps(options.graph->get()), additional_help); VLOG(1) << "EncapsulateXlaComputations() finished: " << DumpGraphToFile("encapsulate_xla_computations_after", **options.graph, options.flib_def); return absl::OkStatus(); } }
#include "tensorflow/compiler/jit/encapsulate_xla_computations_pass.h" #include "tensorflow/cc/ops/function_ops.h" #include "tensorflow/cc/ops/resource_variable_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/compiler/jit/defs.h" #include "tensorflow/compiler/jit/encapsulate_subgraphs_pass.h" #include "tensorflow/compiler/jit/xla_cluster_util.h" #include "tensorflow/compiler/tf2xla/cc/ops/xla_jit_ops.h" #include "tensorflow/compiler/tf2xla/test_util.h" #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/framework/graph_to_functiondef.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/lib/hash/hash.h" #include "tensorflow/core/lib/strings/proto_serialization.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/util/equal_graph_def.h" namespace tensorflow { static std::unique_ptr<Graph> MakeOuterGraph( const FunctionLibraryDefinition& flib_def, const string& function) { Scope scope = Scope::NewRootScope().ExitOnError(); TF_EXPECT_OK(scope.graph()->AddFunctionLibrary(flib_def.ToProto())); auto a = ops::Placeholder(scope.WithOpName("A"), DT_INT32); auto b = ops::Placeholder(scope.WithOpName("B"), DT_FLOAT); auto c = ops::Placeholder(scope.WithOpName("C"), DT_INT32); auto d = ops::Placeholder(scope.WithOpName("D"), DT_FLOAT); auto u = ops::Placeholder(scope.WithOpName("U"), DT_RESOURCE); auto v = ops::Placeholder(scope.WithOpName("V"), DT_RESOURCE); auto w = ops::Placeholder(scope.WithOpName("W"), DT_RESOURCE); NodeDef def; TF_CHECK_OK(NodeDefBuilder("launch0", function, &flib_def) .Input(a.node()->name(), 0, DT_INT32) .Input(b.node()->name(), 0, DT_FLOAT) .Input(c.node()->name(), 0, DT_INT32) .Input(d.node()->name(), 0, DT_FLOAT) .Input(u.node()->name(), 0, DT_RESOURCE) .Input(v.node()->name(), 0, DT_RESOURCE) .Input(w.node()->name(), 0, DT_RESOURCE) .Device("/gpu:0") .Attr(kXlaClusterIdAttr, "launch0") .Attr("_variable_start_index", 4) .Finalize(&def)); Status status; Node* launch = scope.graph()->AddNode(def, &status); TF_CHECK_OK(status); TF_CHECK_OK(scope.DoShapeInference(launch)); scope.graph()->AddEdge(a.node(), 0, launch, 0); scope.graph()->AddEdge(b.node(), 0, launch, 1); scope.graph()->AddEdge(c.node(), 0, launch, 2); scope.graph()->AddEdge(d.node(), 0, launch, 3); scope.graph()->AddEdge(u.node(), 0, launch, 4); scope.graph()->AddEdge(v.node(), 0, launch, 5); scope.graph()->AddEdge(w.node(), 0, launch, 6); auto out0 = ops::XlaClusterOutput(scope.WithOpName("Out0"), Output(launch, 0)); auto out1 = ops::XlaClusterOutput(scope.WithOpName("Out1"), Output(launch, 1)); auto out2 = ops::XlaClusterOutput(scope.WithOpName("Out2"), Output(launch, 2)); auto out3 = ops::XlaClusterOutput(scope.WithOpName("Out3"), Output(launch, 3)); auto consumer0_a = ops::Identity(scope.WithOpName("consumer0_a"), out0); auto consumer0_b = ops::Identity(scope.WithOpName("consumer0_b"), out0); auto consumer0_c = ops::Identity(scope.WithOpName("consumer0_c"), out0); auto consumer1 = ops::Identity(scope.WithOpName("consumer1"), out1); auto consumer2 = ops::Identity(scope.WithOpName("consumer2"), out2); auto consumer3 = ops::Identity(scope.WithOpName("consumer3"), out3); std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); TF_CHECK_OK(scope.ToGraph(graph.get())); return graph; } static std::unique_ptr<Graph> MakeBodyGraph() { Scope scope = Scope::NewRootScope().ExitOnError(); auto arg0 = ops::_Arg(scope.WithOpName("a_0_arg"), DT_INT32, 0); auto arg1 = ops::_Arg(scope.WithOpName("b_0_arg"), DT_FLOAT, 1); auto arg2 = ops::_Arg(scope.WithOpName("c_0_arg"), DT_INT32, 2); auto arg3 = ops::_Arg(scope.WithOpName("d_0_arg"), DT_FLOAT, 3); auto arg4 = ops::_Arg(scope.WithOpName("u_0_arg"), DT_RESOURCE, 4); auto arg5 = ops::_Arg(scope.WithOpName("v_0_arg"), DT_RESOURCE, 5); auto arg6 = ops::_Arg(scope.WithOpName("w_0_arg"), DT_RESOURCE, 6); auto add_attrs = [](Node* node) { node->AddAttr(kXlaClusterIdAttr, "launch0"); node->set_requested_device("/gpu:0"); }; auto b_identity = ops::Identity(scope.WithOpName("B_identity"), arg1); add_attrs(b_identity.node()); auto read_u = ops::ReadVariableOp(scope.WithOpName("ReadU"), arg4, DT_FLOAT); add_attrs(read_u.node()); auto read_v = ops::ReadVariableOp(scope.WithOpName("ReadV"), arg5, DT_FLOAT); add_attrs(read_v.node()); auto read_w = ops::ReadVariableOp(scope.WithOpName("ReadW"), arg6, DT_FLOAT); add_attrs(read_w.node()); auto e = ops::Add(scope.WithOpName("E"), arg0, arg2); add_attrs(e.node()); auto f = ops::Add(scope.WithOpName("F"), read_v, read_w); add_attrs(f.node()); auto g = ops::Add(scope.WithOpName("G"), f, arg3); add_attrs(g.node()); auto out0 = ops::_Retval(scope.WithOpName("b_identity_0_retval_RetVal"), b_identity, 0); auto out1 = ops::_Retval(scope.WithOpName("e_0_retval_RetVal"), e, 1); auto out2 = ops::_Retval(scope.WithOpName("g_0_retval_RetVal"), g, 2); auto out3 = ops::_Retval(scope.WithOpName("readu_0_retval_RetVal"), read_u, 3); std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); TF_CHECK_OK(scope.ToGraph(graph.get())); return graph; } TEST(EncapsulateXlaComputations, DeterministicEncapsulate) { auto get_serialized_graph = [](bool control_input_reversed, bool operand_reversed) -> string { FunctionLibraryDefinition flib_def(OpRegistry::Global(), FunctionDefLibrary()); std::unique_ptr<Graph> graph(new Graph(&flib_def)); { Scope scope = Scope::NewRootScope().ExitOnError(); auto a0 = ops::Placeholder(scope.WithOpName("A0"), DT_INT32); auto a1 = ops::Placeholder(scope.WithOpName("A1"), DT_INT32); ops::Add e = operand_reversed ? ops::Add(scope.WithOpName("E"), a0, a1) : ops::Add(scope.WithOpName("E"), a1, a0); auto add_attrs = [](Node* node) { node->AddAttr(kXlaClusterIdAttr, "launch0"); }; add_attrs(e.node()); TF_CHECK_OK(scope.ToGraph(graph.get())); auto get_node_in_graph = [&graph](Node* node) { return graph->FindNodeId(node->id()); }; if (!control_input_reversed) { graph->AddControlEdge(get_node_in_graph(a0.node()), get_node_in_graph(e.node()), true); graph->AddControlEdge(get_node_in_graph(a1.node()), get_node_in_graph(e.node()), true); } else { graph->AddControlEdge(get_node_in_graph(a1.node()), get_node_in_graph(e.node()), true); graph->AddControlEdge(get_node_in_graph(a0.node()), get_node_in_graph(e.node()), true); } } TF_CHECK_OK(EncapsulateXlaComputationsPass::Encapsulate(&graph, &flib_def)); return SerializeGraphDeterministic(*graph).value(); }; EXPECT_EQ(get_serialized_graph(true, false), get_serialized_graph(false, false)); EXPECT_NE(get_serialized_graph(false, true), get_serialized_graph(false, false)); } TEST(EncapsulateXlaComputations, Encapsulate) { FunctionLibraryDefinition flib_def(OpRegistry::Global(), FunctionDefLibrary()); std::unique_ptr<Graph> graph(new Graph(&flib_def)); { Scope scope = Scope::NewRootScope().ExitOnError(); auto a = ops::Placeholder(scope.WithOpName("A"), DT_INT32); auto b = ops::Placeholder(scope.WithOpName("B"), DT_FLOAT); auto c = ops::Placeholder(scope.WithOpName("C"), DT_INT32); auto d = ops::Placeholder(scope.WithOpName("D"), DT_FLOAT); auto u = ops::Placeholder(scope.WithOpName("U"), DT_RESOURCE); auto v = ops::Placeholder(scope.WithOpName("V"), DT_RESOURCE); auto w = ops::Placeholder(scope.WithOpName("W"), DT_RESOURCE); auto add_attrs = [](Node* node) { node->AddAttr(kXlaClusterIdAttr, "launch0"); node->set_requested_device("/gpu:0"); }; auto b_identity = ops::Identity(scope.WithOpName("B_identity"), b); add_attrs(b_identity.node()); auto read_u = ops::ReadVariableOp(scope.WithOpName("ReadU"), u, DT_FLOAT); add_attrs(read_u.node()); auto read_v = ops::ReadVariableOp(scope.WithOpName("ReadV"), v, DT_FLOAT); add_attrs(read_v.node()); auto read_w = ops::ReadVariableOp(scope.WithOpName("ReadW"), w, DT_FLOAT); add_attrs(read_w.node()); auto e = ops::Add(scope.WithOpName("E"), a, c); add_attrs(e.node()); auto f = ops::Add(scope.WithOpName("F"), read_v, read_w); add_attrs(f.node()); auto g = ops::Add(scope.WithOpName("G"), f, d); add_attrs(g.node()); auto out0 = ops::XlaClusterOutput(scope.WithOpName("Out0"), b_identity); auto out1 = ops::XlaClusterOutput(scope.WithOpName("Out1"), e); auto out2 = ops::XlaClusterOutput(scope.WithOpName("Out2"), g); auto out3 = ops::XlaClusterOutput(scope.WithOpName("Out3"), read_u); auto consumer0_a = ops::Identity(scope.WithOpName("consumer0_a"), out0); auto consumer0_b = ops::Identity(scope.WithOpName("consumer0_b"), out0); auto consumer0_c = ops::Identity(scope.WithOpName("consumer0_c"), out0); auto consumer1 = ops::Identity(scope.WithOpName("consumer1"), out1); auto consumer2 = ops::Identity(scope.WithOpName("consumer2"), out2); auto consumer3 = ops::Identity(scope.WithOpName("consumer3"), out3); TF_ASSERT_OK(scope.ToGraph(graph.get())); } std::unique_ptr<Graph> graph_copy(new Graph(&flib_def)); CopyGraph(*graph, graph_copy.get()); TF_ASSERT_OK(EncapsulateXlaComputationsPass::Encapsulate(&graph, &flib_def)); std::unordered_map<string, Node*> index = graph->BuildNodeNameIndex(); string function = index.at("launch0")->type_string(); { std::unique_ptr<Graph> outer = MakeOuterGraph(flib_def, function); GraphDef expected_def; outer->ToGraphDef(&expected_def); GraphDef actual_def; graph->ToGraphDef(&actual_def); TF_EXPECT_GRAPH_EQ_INTERNAL(expected_def, actual_def); } { std::unique_ptr<Graph> body = MakeBodyGraph(); GraphDef expected_body_def; body->ToGraphDef(&expected_body_def); InstantiationResultForTest result; TF_EXPECT_OK(InstantiateFunctionForTest(function, flib_def, &result)); EXPECT_EQ((DataTypeVector{DT_INT32, DT_FLOAT, DT_INT32, DT_FLOAT, DT_RESOURCE, DT_RESOURCE, DT_RESOURCE}), result.arg_types); EXPECT_EQ((DataTypeVector{DT_FLOAT, DT_INT32, DT_FLOAT, DT_FLOAT}), result.ret_types); TF_EXPECT_GRAPH_EQ(expected_body_def, result.gdef); } TF_ASSERT_OK( EncapsulateXlaComputationsPass::Encapsulate(&graph_copy, &flib_def)); std::unordered_map<string, Node*> index_copy = graph_copy->BuildNodeNameIndex(); string function_copy = index_copy.at("launch0")->type_string(); EXPECT_EQ(function, function_copy); } TEST(EncapsulateXlaComputations, BuildXlaLaunchOp) { std::unique_ptr<Graph> body_graph = MakeBodyGraph(); FunctionDefLibrary flib; TF_ASSERT_OK(GraphToFunctionDef(*body_graph, "launch0", flib.add_function())); FunctionLibraryDefinition flib_def(OpRegistry::Global(), flib); std::unique_ptr<Graph> graph = MakeOuterGraph(flib_def, "launch0"); TF_ASSERT_OK(EncapsulateXlaComputationsPass::BuildXlaLaunchOps(graph.get())); Scope scope = Scope::DisabledShapeInferenceScope().ExitOnError(); TF_EXPECT_OK(scope.graph()->AddFunctionLibrary(flib)); auto a = ops::Placeholder(scope.WithOpName("A"), DT_INT32); auto b = ops::Placeholder(scope.WithOpName("B"), DT_FLOAT); auto c = ops::Placeholder(scope.WithOpName("C"), DT_INT32); auto d = ops::Placeholder(scope.WithOpName("D"), DT_FLOAT); auto u = ops::Placeholder(scope.WithOpName("U"), DT_RESOURCE); auto v = ops::Placeholder(scope.WithOpName("V"), DT_RESOURCE); auto w = ops::Placeholder(scope.WithOpName("W"), DT_RESOURCE); NameAttrList function; function.set_name("launch0"); auto launch = ops::XlaLaunch( scope.WithOpName("launch0").WithDevice("/gpu:0"), std::initializer_list<Input>{}, std::initializer_list<Input>{a, b, c, d}, std::initializer_list<Input>{u, v, w}, DataTypeVector{DT_FLOAT, DT_INT32, DT_FLOAT, DT_FLOAT}, function); auto consumer0_a = ops::Identity(scope.WithOpName("consumer0_a"), launch.results[0]); auto consumer0_b = ops::Identity(scope.WithOpName("consumer0_b"), launch.results[0]); auto consumer0_c = ops::Identity(scope.WithOpName("consumer0_c"), launch.results[0]); auto consumer1 = ops::Identity(scope.WithOpName("consumer1"), launch.results[1]); auto consumer2 = ops::Identity(scope.WithOpName("consumer2"), launch.results[2]); auto consumer3 = ops::Identity(scope.WithOpName("consumer3"), launch.results[3]); GraphDef expected_def; TF_ASSERT_OK(scope.ToGraphDef(&expected_def)); GraphDef actual_def; graph->ToGraphDef(&actual_def); TF_EXPECT_GRAPH_EQ(expected_def, actual_def); } }
1,067
cpp
tensorflow/tensorflow
extract_outside_compilation_pass
tensorflow/compiler/jit/extract_outside_compilation_pass.cc
tensorflow/compiler/jit/extract_outside_compilation_pass_test.cc
#ifndef TENSORFLOW_COMPILER_JIT_EXTRACT_OUTSIDE_COMPILATION_PASS_H_ #define TENSORFLOW_COMPILER_JIT_EXTRACT_OUTSIDE_COMPILATION_PASS_H_ #include "absl/types/optional.h" #include "tensorflow/compiler/jit/encapsulate_util.h" #include "xla/status_macros.h" #include "tensorflow/core/graph/graph.h" namespace tensorflow { class RewriteOutsideCompilationSubgraphFn { public: RewriteOutsideCompilationSubgraphFn( const string& xla_cluster_attr_name, const string& outside_compilation_attr_name, const string& xla_cluster_name, const string& new_function_name) : xla_cluster_attr_name_(xla_cluster_attr_name), outside_compilation_attr_name_(outside_compilation_attr_name), xla_cluster_name_(xla_cluster_name), new_function_name_(new_function_name) {} Status operator()(const std::vector<OutputTensor>&, std::unique_ptr<Graph>* graph, std::vector<int>* input_permutation, std::vector<int>* output_permutation, NodeDef* node_def); private: string xla_cluster_attr_name_; string outside_compilation_attr_name_; string xla_cluster_name_; string new_function_name_; }; Status ExtractOutsideCompilationForFunction( const string& xla_cluster_attr_name, const string& outside_compilation_attr_name, const string& xla_cluster_name, const NameAttrList& func_name_attrs, const string& new_func_name, const string& host_graph_func_name, const std::map<string, int>& host_compute_core, FunctionLibraryRuntime* flr, FunctionLibraryDefinition* fld, std::vector<string>* shape_inference_graphs, bool* has_outside_compilation); Status ExtractOutsideCompilation( const string& xla_cluster_attr_name, const string& outside_compilation_attr_name, const std::unordered_map<string, XlaClusterInfo>& clusters, Graph* g, FunctionLibraryRuntime* flr, FunctionLibraryDefinition* fld, bool* modified); } #endif #include "tensorflow/compiler/jit/extract_outside_compilation_pass.h" #include "absl/container/flat_hash_map.h" #include "absl/strings/match.h" #include "absl/strings/str_cat.h" #include "tensorflow/compiler/jit/encapsulate_subgraphs_pass.h" #include "tensorflow/compiler/jit/encapsulate_util.h" #include "tensorflow/compiler/tf2xla/side_effect_util.h" #include "tensorflow/compiler/tf2xla/tf2xla_util.h" #include "xla/status_macros.h" #include "xla/xla_data.pb.h" #include "tensorflow/core/common_runtime/function.h" #include "tensorflow/core/framework/function.h" #include "tensorflow/core/framework/graph_to_functiondef.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/node_def_util.h" #include "tensorflow/core/framework/tensor_shape.pb.h" #include "tensorflow/core/graph/algorithm.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/gtl/cleanup.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/util/dump_graph.h" namespace tensorflow { namespace { std::optional<string> HostGraphControlRetMapping(const Node* n) { if (HasNodeAttr(n->def(), kXlaHasHostTransferAttrName)) { return n->name(); } return std::nullopt; } absl::StatusOr<Node*> AddHostComputeKeyPlaceholder( const string& xla_cluster_name, Graph* g) { NodeDef key_def; NodeDefBuilder builder(absl::StrCat(xla_cluster_name, "_key_placeholder"), "Placeholder"); builder.Attr("dtype", DT_STRING); builder.Attr("shape", PartialTensorShape({2})); builder.Attr("_host_compute_call_node", xla_cluster_name); Status s = builder.Finalize(&key_def); if (!s.ok()) return s; Node* n = g->AddNode(key_def, &s); if (!s.ok()) return s; return n; } bool IsKeyPlaceholderNode(const Node& n) { return n.type_string() == "Placeholder" && absl::EndsWith(n.name(), "_key_placeholder"); } std::vector<Node*> GatherNodesWithType(const Graph& g, const string& type) { std::vector<Node*> result; for (Node* n : g.nodes()) { if (n->type_string() == type) { result.push_back(n); } } return result; } Status GetArgDataTypes(const std::vector<Node*>& arg_nodes, std::vector<DataType>* recv_at_host_dtypes) { recv_at_host_dtypes->resize(arg_nodes.size(), DT_INVALID); for (auto* n : arg_nodes) { int index; TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "index", &index)); DataType dtype; TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "T", &dtype)); (*recv_at_host_dtypes)[index] = dtype; } for (int i = 0, end = recv_at_host_dtypes->size(); i < end; i++) { if ((*recv_at_host_dtypes)[i] == DT_INVALID) { return errors::Internal("Cannot get datatype for input ", i); } } return absl::OkStatus(); } absl::StatusOr<Node*> BuildRecvAtHostNode( Graph* g, const string& oc_cluster_name, const std::vector<DataType>& recv_at_host_dtypes, Node* key_placeholder) { NodeDefBuilder recv_at_host_builder( absl::StrCat("outside_compilation_", oc_cluster_name, "_recv"), "_XlaRecvAtHost"); NodeDef recv_at_host_def; recv_at_host_builder.Attr("Toutputs", recv_at_host_dtypes); AttrValue device_ordinal_value; device_ordinal_value.set_placeholder("_device_ordinal"); recv_at_host_builder.Attr("device_ordinal", device_ordinal_value); recv_at_host_builder.Attr( "key", absl::StrCat("host_compute_channel_", oc_cluster_name)); recv_at_host_builder.Attr(kXlaHasHostTransferAttrName, true); recv_at_host_builder.Input(key_placeholder->name(), 0, DT_STRING); TF_RETURN_IF_ERROR(recv_at_host_builder.Finalize(&recv_at_host_def)); TF_ASSIGN_OR_RETURN(Node * recv_at_host_node, g->AddNode(recv_at_host_def)); return recv_at_host_node; } absl::StatusOr<Node*> ReplaceArgNodesWithRecvAtHostNode( Graph* g, const string& oc_cluster_name, std::vector<DataType>* recv_at_host_dtypes, Node* key_placeholder) { std::vector<Node*> arg_nodes = GatherNodesWithType(*g, "_Arg"); TF_RETURN_IF_ERROR(GetArgDataTypes(arg_nodes, recv_at_host_dtypes)); TF_ASSIGN_OR_RETURN( Node * recv_at_host_node, BuildRecvAtHostNode(g, oc_cluster_name, *recv_at_host_dtypes, key_placeholder)); for (auto* n : arg_nodes) { int index; TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "index", &index)); std::vector<OutEdgeInfo> out_edge_info; out_edge_info.reserve(n->out_edges().size()); for (auto edge : n->out_edges()) { out_edge_info.push_back( {edge->dst(), edge->src_output(), edge->dst_input()}); } g->RemoveNode(n); for (const OutEdgeInfo& edge : out_edge_info) { if (edge.dst_input == Graph::kControlSlot) { g->AddControlEdge(recv_at_host_node, edge.dst); } else { g->AddEdge(recv_at_host_node, index, edge.dst, edge.dst_input); } } for (int i = 0, end = out_edge_info.size(); i < end; i++) { const OutEdgeInfo edge = out_edge_info[i]; if (edge.dst_input == Graph::kControlSlot) { continue; } Node* dst = edge.dst; NodeDef new_def = dst->def(); *new_def.mutable_input(edge.dst_input) = absl::StrCat(recv_at_host_node->name(), ":", index); TF_ASSIGN_OR_RETURN(Node * dst_replace, ReplaceNode(g, dst, new_def)); for (int j = i + 1, end = out_edge_info.size(); j < end; j++) { if (out_edge_info[j].dst == dst) { out_edge_info[j].dst = dst_replace; } } } } g->AddEdge(key_placeholder, 0, recv_at_host_node, 0); return recv_at_host_node; } Status GetRetDataTypes(const std::vector<Node*>& ret_nodes, std::vector<DataType>* send_from_host_dtypes) { send_from_host_dtypes->resize(ret_nodes.size(), DT_INVALID); for (auto* n : ret_nodes) { int index; TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "index", &index)); DataType dtype; TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "T", &dtype)); (*send_from_host_dtypes)[index] = dtype; } for (int i = 0, end = send_from_host_dtypes->size(); i < end; i++) { if ((*send_from_host_dtypes)[i] == DT_INVALID) { return errors::Internal("Cannot get datatype for output ", i); } } return absl::OkStatus(); } absl::StatusOr<Node*> BuildSendFromHostNode( Graph* g, const string& oc_cluster_name, const std::vector<Node*>& ret_nodes, const std::vector<DataType>& send_from_host_dtypes, Node* key_placeholder) { NodeDefBuilder send_from_host_builder( absl::StrCat("outside_compilation_", oc_cluster_name, "_send"), "_XlaSendFromHost"); NodeDef send_from_host_def; send_from_host_builder.Attr("Tinputs", send_from_host_dtypes); AttrValue device_ordinal_value; device_ordinal_value.set_placeholder("_device_ordinal"); send_from_host_builder.Attr("device_ordinal", device_ordinal_value); send_from_host_builder.Attr( "key", absl::StrCat("host_compute_channel_", oc_cluster_name)); send_from_host_builder.Attr(kXlaHasHostTransferAttrName, true); std::vector<NodeDefBuilder::NodeOut> inputs(send_from_host_dtypes.size()); for (auto* n : ret_nodes) { int index; TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "index", &index)); const int num_dtypes = send_from_host_dtypes.size(); if (index < 0 || index >= num_dtypes) { return errors::Internal("Invalid _Retval index: ", index); } for (auto edge : n->in_edges()) { inputs[index] = NodeDefBuilder::NodeOut{edge->src()->name(), edge->src_output(), edge->src()->output_type(edge->src_output())}; } } send_from_host_builder.Input(inputs); send_from_host_builder.Input(key_placeholder->name(), 0, DT_STRING); TF_RETURN_IF_ERROR(send_from_host_builder.Finalize(&send_from_host_def)); TF_ASSIGN_OR_RETURN(Node * send_from_host_node, g->AddNode(send_from_host_def)); return send_from_host_node; } absl::StatusOr<Node*> ReplaceRetNodesWithSendFromHostNode( Graph* g, const string& oc_cluster_name, std::vector<DataType>* send_from_host_dtypes, Node* key_placeholder) { std::vector<Node*> ret_nodes = GatherNodesWithType(*g, "_Retval"); TF_RETURN_IF_ERROR(GetRetDataTypes(ret_nodes, send_from_host_dtypes)); TF_ASSIGN_OR_RETURN( Node * send_from_host_node, BuildSendFromHostNode(g, oc_cluster_name, ret_nodes, *send_from_host_dtypes, key_placeholder)); for (auto* n : ret_nodes) { int index; TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "index", &index)); for (auto edge : n->in_edges()) { if (edge->src_output() == Graph::kControlSlot) { g->AddControlEdge(edge->src(), send_from_host_node); } else { g->AddEdge(edge->src(), edge->src_output(), send_from_host_node, index); } } g->RemoveNode(n); } g->AddEdge(key_placeholder, 0, send_from_host_node, send_from_host_dtypes->size()); return send_from_host_node; } std::optional<std::vector<PartialTensorShape>> GetInferredInputShapes( int num_inputs, Node* send_from_host_node) { std::vector<PartialTensorShape> results(num_inputs); for (int i = 0; i < num_inputs; i++) { const Edge* e; if (!send_from_host_node->input_edge(i, &e).ok()) { return std::nullopt; } std::vector<PartialTensorShape> shapes; if (!GetNodeAttr(e->src()->attrs(), kXlaInferredShapesAttrName, &shapes) .ok()) { return std::nullopt; } const PartialTensorShape shape = shapes[e->src_output()]; if (!shape.IsFullyDefined()) { return std::nullopt; } results[e->dst_input()] = shape; } return results; } string host_compute_node_name(const string& original_oc_name) { return absl::StrCat("outside_compilation_", original_oc_name, "_host_compute"); } absl::StatusOr<NodeDef> BuildXlaHostComputeNodeDef( const Node* call_node, const std::map<string, int>& host_compute_core, const absl::flat_hash_map<string, std::vector<string>>& cluster_deps) { string original_oc_name; TF_RETURN_IF_ERROR(GetNodeAttr( call_node->attrs(), "_outside_compilation_subgraph", &original_oc_name)); NodeDefBuilder host_compute_builder(host_compute_node_name(original_oc_name), "XlaHostCompute"); host_compute_builder.Attr(kXlaOriginalOutsideCompilationNodeName, host_compute_builder.node_name()); for (const auto& attr : call_node->attrs()) { host_compute_builder.Attr(attr.first, attr.second); } const auto iter = host_compute_core.find(original_oc_name); if (iter != host_compute_core.end()) { int core = iter->second; host_compute_builder.Attr("tpu_core", core); } std::vector<string> xla_token_input_nodes; xla_token_input_nodes.emplace_back(kXlaTokenArgNodeName); auto cluster_deps_it = cluster_deps.find(original_oc_name); if (cluster_deps_it != cluster_deps.end()) { for (const auto& dep : cluster_deps_it->second) { xla_token_input_nodes.emplace_back(host_compute_node_name(dep)); } } host_compute_builder.Attr(kXlaTokenInputNodesAttrName, xla_token_input_nodes); std::vector<DataType> input_dtypes; TF_RETURN_IF_ERROR(GetNodeAttr(call_node->attrs(), "Tinputs", &input_dtypes)); std::vector<NodeDefBuilder::NodeOut> inputs(input_dtypes.size()); for (auto e : call_node->in_edges()) { if (e->IsControlEdge()) { continue; } const int input_dtypes_size = input_dtypes.size(); if (e->dst_input() < 0 || e->dst_input() >= input_dtypes_size) { return errors::Internal("Invalid dst_input: ", e->dst_input()); } inputs[e->dst_input()] = NodeDefBuilder::NodeOut{ e->src()->name(), e->src_output(), input_dtypes[e->dst_input()]}; } host_compute_builder.Input(inputs); NodeDef new_def; TF_RETURN_IF_ERROR(host_compute_builder.Finalize(&new_def)); return new_def; } TF_ATTRIBUTE_NOINLINE absl::StatusOr<Node*> ReplaceOutsideCompilationCallNode( Graph* g, Node* call_node, const std::map<string, int>& host_compute_core, const absl::flat_hash_map<string, std::vector<string>>& cluster_deps) { TF_ASSIGN_OR_RETURN( NodeDef node_def, BuildXlaHostComputeNodeDef(call_node, host_compute_core, cluster_deps)); TF_ASSIGN_OR_RETURN(Node * host_compute_node, ReplaceNode(g, call_node, node_def)); VLOG(4) << "Added HostCompute node: " << host_compute_node->DebugString(); return host_compute_node; } Status ResetDeviceOrdinalToPlaceholderValue(Graph* g) { AttrValue device_ordinal_value; device_ordinal_value.set_placeholder("_device_ordinal"); for (Node* n : g->nodes()) { if (!HasNodeAttr(n->def(), kXlaHasHostTransferAttrName)) { continue; } if (n->type_string() == "_XlaRecvAtHost" || n->type_string() == "_XlaSendFromHost") { n->ClearAttr("device_ordinal"); n->AddAttr("device_ordinal", device_ordinal_value); } else if (n->IsIfNode()) { for (const string& attr_name : std::vector<string>{"then_branch", "else_branch"}) { NameAttrList branch_func; TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), attr_name, &branch_func)); (*branch_func.mutable_attr())["_device_ordinal"] = device_ordinal_value; n->ClearAttr(attr_name); n->AddAttr(attr_name, branch_func); } } else if (n->IsWhileNode()) { for (const string& attr_name : std::vector<string>{"cond", "body"}) { NameAttrList branch_func; TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), attr_name, &branch_func)); (*branch_func.mutable_attr())["_device_ordinal"] = device_ordinal_value; n->ClearAttr(attr_name); n->AddAttr(attr_name, branch_func); } } else if (HasNodeAttr(n->def(), "_device_ordinal")) { n->ClearAttr("_device_ordinal"); n->AddAttr("_device_ordinal", device_ordinal_value); } else { return errors::Internal("Unknown node marked with ", kXlaHasHostTransferAttrName, ": ", n->DebugString()); } } return absl::OkStatus(); } bool HasLiftedArgs(const FunctionDef& function_def) { return absl::c_any_of(function_def.node_def(), [](const NodeDef& node_def) { return (node_def.op() == "Placeholder" && node_def.attr().find(kXlaLiftedArgOutsideCompilationAttrName) != node_def.attr().end()); }); } absl::StatusOr<std::vector<std::pair<Node*, Node*>>> LiftedArgsAndOutsideCompilationNodesInFunctionBody( const FunctionBody& function_body, const std::unordered_map<string, Node*>& outside_compilation_attr_to_node) { std::vector<std::pair<Node*, Node*>> lifted_arg_nodes_and_outside_compilation_nodes; for (Node* n : function_body.graph->op_nodes()) { string oc_cluster; if (n->type_string() == "Placeholder" && GetNodeAttr(n->def(), kXlaLiftedArgOutsideCompilationAttrName, &oc_cluster) .ok()) { TF_RET_CHECK(outside_compilation_attr_to_node.find(oc_cluster) != outside_compilation_attr_to_node.end()); lifted_arg_nodes_and_outside_compilation_nodes.emplace_back( n, outside_compilation_attr_to_node.at(oc_cluster)); } } return lifted_arg_nodes_and_outside_compilation_nodes; } absl::StatusOr<std::vector<DataType>> UpdateTypesAttribute( const std::vector<std::pair<Node*, Node*>>& lifted_arg_nodes_and_outside_compilation_nodes, const string& type_attr_name, Node* n) { std::vector<DataType> data_types; data_types.reserve(lifted_arg_nodes_and_outside_compilation_nodes.size()); TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), type_attr_name, &data_types)); for (auto pair : lifted_arg_nodes_and_outside_compilation_nodes) { Node* outside_compilation_node = pair.second; DataType data_type; TF_RET_CHECK(outside_compilation_node->IsIdentity() || outside_compilation_node->type_string() == "Placeholder"); if (outside_compilation_node->IsIdentity()) { TF_RETURN_IF_ERROR( GetNodeAttr(outside_compilation_node->def(), "T", &data_type)); } else { TF_RETURN_IF_ERROR( GetNodeAttr(outside_compilation_node->def(), "dtype", &data_type)); } data_types.push_back(data_type); } n->ClearAttr(type_attr_name); n->AddAttr(type_attr_name, data_types); return data_types; } void AddEdgesFromOutsideCompilationNodes( const int original_arg_count, const int arg_to_input_edge_offset, const std::vector<DataType>& data_types, const std::vector<Node*>& outside_compilation_nodes, Graph* g, Node* n) { for (int i = original_arg_count, end = data_types.size(); i < end; i++) { Node* outside_compilation_node = outside_compilation_nodes[i - original_arg_count]; g->AddEdge(outside_compilation_node, 0, n, i + arg_to_input_edge_offset); } } absl::StatusOr<Node*> AddOutsideCompilationInputArgToFunctionBody( const FunctionBody& function_body, const int arg_idx, const DataType& data_type) { NodeDefBuilder arg_builder(absl::StrCat("arg_", arg_idx), "_Arg"); arg_builder.Attr("T", data_type); arg_builder.Attr("index", arg_idx); NodeDef arg_def; TF_RETURN_IF_ERROR(arg_builder.Finalize(&arg_def)); TF_ASSIGN_OR_RETURN(Node * arg_node, function_body.graph->AddNode(arg_def)); return arg_node; } Status AddMatchingRetvalNode(const FunctionBody& function_body, const int arg_idx, const DataType& data_type, Node* arg_node) { NodeDefBuilder ret_builder(absl::StrCat("ret_", arg_idx), "_Retval"); ret_builder.Attr("T", data_type); ret_builder.Attr("index", arg_idx); ret_builder.Input(arg_node->name(), 0, data_type); NodeDef ret_def; TF_RETURN_IF_ERROR(ret_builder.Finalize(&ret_def)); TF_ASSIGN_OR_RETURN(Node * ret_node, function_body.graph->AddNode(ret_def)); function_body.graph->AddEdge(arg_node, 0, ret_node, 0); return absl::OkStatus(); } void ReplaceLiftedArgNodePlaceholderWithArg( const FunctionBody& function_body, const int original_arg_count, const int arg_idx, const std::vector<Node*>& lifted_arg_nodes, Node* arg_node) { Node* lifted_arg_node = lifted_arg_nodes[arg_idx - original_arg_count]; if (!lifted_arg_node) { return; } for (const Edge* e : lifted_arg_node->out_edges()) { if (e->IsControlEdge()) { function_body.graph->AddControlEdge(arg_node, e->dst()); } else { function_body.graph->AddEdge(arg_node, 0, e->dst(), e->dst_input()); } } function_body.graph->RemoveNode(lifted_arg_node); } Status AddFunctionWithNewName(const std::string& new_name, const std::string& func_attr_name, const FunctionDef& function_def, NameAttrList* func_attr, Node* callsite_node, FunctionLibraryDefinition* fld) { TF_RETURN_IF_ERROR(fld->AddFunctionDef(function_def)); func_attr->set_name(new_name); callsite_node->ClearAttr(func_attr_name); callsite_node->AddAttr(func_attr_name, *func_attr); return absl::OkStatus(); } Status PostprocessLiftedArgsForWhile( const std::unordered_map<string, Node*>& outside_compilation_attr_to_node, Graph* g, Node* n, FunctionLibraryDefinition* fld) { TF_RET_CHECK(n->IsWhileNode()); NameAttrList body_func; TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "body", &body_func)); const FunctionDef* body_function_def = fld->Find(body_func.name()); TF_RET_CHECK(body_function_def); if (!HasLiftedArgs(*body_function_def)) { return absl::OkStatus(); } std::unique_ptr<FunctionBody> body_function_body; TF_RETURN_IF_ERROR(FunctionDefToBodyHelper(*body_function_def, AttrSlice(&body_func.attr()), fld, &body_function_body)); int original_arg_count = body_function_body->arg_nodes.size(); TF_ASSIGN_OR_RETURN( auto lifted_arg_nodes_and_outside_compilation_nodes, LiftedArgsAndOutsideCompilationNodesInFunctionBody( *body_function_body, outside_compilation_attr_to_node)); TF_ASSIGN_OR_RETURN( std::vector<DataType> data_types, UpdateTypesAttribute(lifted_arg_nodes_and_outside_compilation_nodes, "T", n)); std::vector<Node*> outside_compilation_nodes; outside_compilation_nodes.reserve( lifted_arg_nodes_and_outside_compilation_nodes.size()); std::transform( lifted_arg_nodes_and_outside_compilation_nodes.begin(), lifted_arg_nodes_and_outside_compilation_nodes.end(), std::back_inserter(outside_compilation_nodes), [](const std::pair<Node*, Node*>& pair) { return pair.second; }); AddEdgesFromOutsideCompilationNodes(original_arg_count, 0, data_types, outside_compilation_nodes, g, n); std::vector<Node*> lifted_arg_nodes; lifted_arg_nodes.reserve( lifted_arg_nodes_and_outside_compilation_nodes.size()); std::transform( lifted_arg_nodes_and_outside_compilation_nodes.begin(), lifted_arg_nodes_and_outside_compilation_nodes.end(), std::back_inserter(lifted_arg_nodes), [](const std::pair<Node*, Node*>& pair) { return pair.first; }); for (int i = original_arg_count, end = data_types.size(); i < end; i++) { TF_ASSIGN_OR_RETURN(Node * arg_node, AddOutsideCompilationInputArgToFunctionBody( *body_function_body, i, data_types[i])); TF_RETURN_IF_ERROR( AddMatchingRetvalNode(*body_function_body, i, data_types[i], arg_node)); ReplaceLiftedArgNodePlaceholderWithArg( *body_function_body, original_arg_count, i, lifted_arg_nodes, arg_node); } const auto new_body_function_name = fld->UniqueFunctionName(absl::StrCat(body_func.name(), "_lifted_arg_")); FunctionDef rewritten_body_function_def; TF_RETURN_IF_ERROR(GraphToFunctionDef( *body_function_body->graph, new_body_function_name, HostGraphControlRetMapping, &rewritten_body_function_def)); TF_RETURN_IF_ERROR(AddFunctionWithNewName(new_body_function_name, "body", rewritten_b
#include "tensorflow/compiler/jit/extract_outside_compilation_pass.h" #include "absl/strings/match.h" #include "tensorflow/cc/framework/scope.h" #include "tensorflow/cc/ops/array_ops.h" #include "tensorflow/cc/ops/function_ops.h" #include "tensorflow/cc/ops/functional_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/compiler/jit/encapsulate_util.h" #include "xla/test.h" #include "tensorflow/core/common_runtime/device_factory.h" #include "tensorflow/core/common_runtime/function.h" #include "tensorflow/core/framework/common_shape_fns.h" #include "tensorflow/core/framework/function.h" #include "tensorflow/core/framework/graph_to_functiondef.h" #include "tensorflow/core/framework/node_def_util.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_shape.pb.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/public/session_options.h" #include "tensorflow/core/public/version.h" namespace tensorflow { TEST(RewriteOutsideCompilationSubgraphFnTest, Basic) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output arg0 = ops::_Arg(s.WithOpName("arg0"), DT_INT32, 0); Output arg1 = ops::_Arg(s.WithOpName("arg1"), DT_FLOAT, 1); Output arg2 = ops::_Arg(s.WithOpName("arg2"), DT_INT32, 2); Output add = ops::Add(s.WithOpName("add"), arg0, arg0); auto ret0 = ops::_Retval(s.WithOpName("ret0"), add, 0); auto ret1 = ops::_Retval(s.WithOpName("ret1"), arg1, 1); std::unique_ptr<Graph> g(new Graph(OpRegistry::Global())); TF_CHECK_OK(s.ToGraph(g.get())); auto node_name_image = g->BuildNodeNameIndex(); Node *add_node = node_name_image["add"]; EXPECT_NE(add_node, nullptr); add_node->AddAttr(kXlaConnectedToXlaComputationAttrName, "cluster"); add_node->AddAttr(kXlaConnectedFromXlaComputationAttrName, "cluster"); RewriteOutsideCompilationSubgraphFn rewrite_fn("_xla", "_oc", "cluster", ""); std::vector<OutputTensor> arg_source_tensors; NodeDef call_node_def; call_node_def.set_op("0"); TF_CHECK_OK( rewrite_fn(arg_source_tensors, &g, nullptr, nullptr, &call_node_def)); node_name_image = g->BuildNodeNameIndex(); Node *key_placeholder = node_name_image["cluster_key_placeholder"]; EXPECT_NE(key_placeholder, nullptr); for (Node *n : g->nodes()) { EXPECT_NE(n->type_string(), "_Arg"); } Node *recv_at_host = node_name_image["outside_compilation_cluster__0_recv"]; EXPECT_NE(recv_at_host, nullptr); std::vector<DataType> recv_at_host_dtypes; TF_CHECK_OK( GetNodeAttr(recv_at_host->attrs(), "Toutputs", &recv_at_host_dtypes)); EXPECT_EQ(recv_at_host_dtypes.size(), 3); EXPECT_EQ(recv_at_host_dtypes[0], DT_INT32); EXPECT_EQ(recv_at_host_dtypes[1], DT_FLOAT); EXPECT_EQ(recv_at_host_dtypes[2], DT_INT32); for (Node *n : g->nodes()) { EXPECT_NE(n->type_string(), "_Retval"); } Node *send_from_host = node_name_image["outside_compilation_cluster__0_send"]; EXPECT_NE(send_from_host, nullptr); std::vector<DataType> send_from_host_dtypes; TF_CHECK_OK( GetNodeAttr(send_from_host->attrs(), "Tinputs", &send_from_host_dtypes)); EXPECT_EQ(send_from_host_dtypes.size(), 2); EXPECT_EQ(send_from_host_dtypes[0], DT_INT32); EXPECT_EQ(send_from_host_dtypes[1], DT_FLOAT); add_node = node_name_image["add"]; EXPECT_NE(add_node, nullptr); EXPECT_TRUE(HasNodeAttr(add_node->def(), "_xla")); EXPECT_TRUE(HasNodeAttr(add_node->def(), "_oc")); bool has_control_edge_from_recv_at_host = false; for (auto e : add_node->in_edges()) { if (e->IsControlEdge() && e->src() == recv_at_host) { has_control_edge_from_recv_at_host = true; } } EXPECT_TRUE(has_control_edge_from_recv_at_host); bool has_control_edge_to_send_from_host = false; for (auto e : add_node->out_edges()) { if (e->IsControlEdge() && e->dst() == send_from_host) { has_control_edge_to_send_from_host = true; } } EXPECT_TRUE(has_control_edge_to_send_from_host); NameAttrList shape_inference_graph; TF_CHECK_OK(GetNodeAttr(AttrSlice(&call_node_def.attr()), "shape_inference_graph", &shape_inference_graph)); EXPECT_EQ(shape_inference_graph.name(), "_outside_compilation_shape_inference_cluster__0"); } TEST(RewriteOutsideCompilationSubgraphFnTest, NoSendFromHost) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output arg0 = ops::_Arg(s.WithOpName("arg0"), DT_INT32, 0); std::unique_ptr<Graph> g(new Graph(OpRegistry::Global())); TF_CHECK_OK(s.ToGraph(g.get())); RewriteOutsideCompilationSubgraphFn rewrite_fn("_xla", "_oc", "cluster", ""); std::vector<OutputTensor> arg_source_tensors; NodeDef call_node_def; call_node_def.set_op("0"); TF_CHECK_OK( rewrite_fn(arg_source_tensors, &g, nullptr, nullptr, &call_node_def)); auto node_name_image = g->BuildNodeNameIndex(); Node *key_placeholder = node_name_image["cluster_key_placeholder"]; EXPECT_NE(key_placeholder, nullptr); Node *recv_at_host = node_name_image["outside_compilation_cluster__0_recv"]; EXPECT_NE(recv_at_host, nullptr); Node *send_from_host = node_name_image["outside_compilation_cluster__0_send"]; EXPECT_EQ(send_from_host, nullptr); } TEST(RewriteOutsideCompilationSubgraphFnTest, NoRecvAtHost) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output const0 = ops::Const(s.WithOpName("const0"), 1, {2}); auto ret = ops::_Retval(s.WithOpName("ret"), const0, 0); std::unique_ptr<Graph> g(new Graph(OpRegistry::Global())); TF_CHECK_OK(s.ToGraph(g.get())); RewriteOutsideCompilationSubgraphFn rewrite_fn("_xla", "_oc", "cluster", ""); std::vector<OutputTensor> arg_source_tensors; NodeDef call_node_def; call_node_def.set_op("0"); TF_CHECK_OK( rewrite_fn(arg_source_tensors, &g, nullptr, nullptr, &call_node_def)); auto node_name_image = g->BuildNodeNameIndex(); Node *key_placeholder = node_name_image["cluster_key_placeholder"]; EXPECT_NE(key_placeholder, nullptr); Node *recv_at_host = node_name_image["outside_compilation_cluster__0_recv"]; EXPECT_EQ(recv_at_host, nullptr); Node *send_from_host = node_name_image["outside_compilation_cluster__0_send"]; EXPECT_NE(send_from_host, nullptr); } TEST(RewriteOutsideCompilationSubgraphFnTest, NoKeyPlaceholder) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output const0 = ops::Const(s.WithOpName("const0"), 1, {2}); std::unique_ptr<Graph> g(new Graph(OpRegistry::Global())); TF_CHECK_OK(s.ToGraph(g.get())); RewriteOutsideCompilationSubgraphFn rewrite_fn("_xla", "_oc", "cluster", ""); std::vector<OutputTensor> arg_source_tensors; NodeDef call_node_def; call_node_def.set_op("0"); TF_CHECK_OK( rewrite_fn(arg_source_tensors, &g, nullptr, nullptr, &call_node_def)); auto node_name_image = g->BuildNodeNameIndex(); Node *key_placeholder = node_name_image["cluster_key_placeholder"]; EXPECT_EQ(key_placeholder, nullptr); Node *recv_at_host = node_name_image["outside_compilation_cluster__0_recv"]; EXPECT_EQ(recv_at_host, nullptr); Node *send_from_host = node_name_image["outside_compilation_cluster__0_send"]; EXPECT_EQ(send_from_host, nullptr); } TEST(RewriteOutsideCompilationSubgraphFnTest, ShapesInferred) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output const0 = ops::Const(s.WithOpName("const0"), 1, {2}); auto ret = ops::_Retval(s.WithOpName("ret"), const0, 0); std::unique_ptr<Graph> g(new Graph(OpRegistry::Global())); TF_CHECK_OK(s.ToGraph(g.get())); auto node_name_image = g->BuildNodeNameIndex(); Node *const0_node = node_name_image["const0"]; EXPECT_NE(const0_node, nullptr); PartialTensorShape shape({2}); const0_node->AddAttr(kXlaInferredShapesAttrName, std::vector<PartialTensorShape>{shape}); RewriteOutsideCompilationSubgraphFn rewrite_fn("_xla", "_oc", "cluster", ""); std::vector<OutputTensor> arg_source_tensors; NodeDef call_node_def; call_node_def.set_op("0"); TF_CHECK_OK( rewrite_fn(arg_source_tensors, &g, nullptr, nullptr, &call_node_def)); node_name_image = g->BuildNodeNameIndex(); std::vector<TensorShapeProto> shapes; TF_CHECK_OK(GetNodeAttr(AttrSlice(&call_node_def.attr()), "shapes", &shapes)); EXPECT_EQ(shapes.size(), 1); EXPECT_EQ(shapes[0].dim_size(), 1); } class ExtractOutsideCompilationForFunctionTest : public ::testing::Test { public: void SetUp() override { SessionOptions session_options; std::vector<std::unique_ptr<Device>> devices; TF_CHECK_OK(DeviceFactory::AddDevices( session_options, "/job:localhost/replica:0/task:0", &devices)); device_mgr_ = std::make_unique<StaticDeviceMgr>(std::move(devices)); } Status ExtractOutsideCompilationTest( const string &xla_cluster_attr_name, const string &outside_compilation_attr_name, const string &xla_cluster_name, const NameAttrList &func_name_attrs, const string &new_func_name, const string &host_graph_func_name, const std::map<string, int> &host_compute_core, FunctionLibraryDefinition *fld, std::vector<string> *shape_inference_graphs, bool *has_outside_compilation) { OptimizerOptions opts; pflr_ = std::make_unique<ProcessFunctionLibraryRuntime>( device_mgr_.get(), Env::Default(), nullptr, TF_GRAPH_DEF_VERSION, fld, opts, nullptr); auto flr = pflr_->GetFLR("/job:localhost/replica:0/task:0/cpu:0"); return ExtractOutsideCompilationForFunction( xla_cluster_attr_name, outside_compilation_attr_name, xla_cluster_name, func_name_attrs, new_func_name, host_graph_func_name, host_compute_core, flr, fld, shape_inference_graphs, has_outside_compilation); } private: std::unique_ptr<DeviceMgr> device_mgr_; std::unique_ptr<ProcessFunctionLibraryRuntime> pflr_; }; TEST_F(ExtractOutsideCompilationForFunctionTest, Basic) { FunctionDefLibrary fdl; { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output const0 = ops::Const(s.WithOpName("const0"), 1, {2}); Output identity0 = ops::Identity(s.WithOpName("identity0"), const0); Output identity1 = ops::Identity(s.WithOpName("identity1"), identity0); Output identity2 = ops::Identity(s.WithOpName("identity2"), identity1); std::unique_ptr<Graph> g(new Graph(OpRegistry::Global())); TF_CHECK_OK(s.ToGraph(g.get())); auto node_name_image = g->BuildNodeNameIndex(); node_name_image["identity0"]->AddAttr("_oc", "0"); node_name_image["identity1"]->AddAttr("_oc", "1"); PartialTensorShape shape({2}); node_name_image["identity1"]->AddAttr( kXlaInferredShapesAttrName, std::vector<PartialTensorShape>{shape}); FunctionDef *xla_fdef = fdl.add_function(); TF_CHECK_OK(GraphToFunctionDef(*g, "cluster", xla_fdef)); } FunctionLibraryDefinition fld(OpRegistry::Global(), fdl); protobuf::Map<string, tensorflow::AttrValue> attrs; std::map<string, int> host_compute_core = {{"0", 1}, {"1", 0}}; std::vector<string> shape_inference_graphs; bool has_outside_compilation; NameAttrList name_attrs; name_attrs.set_name("cluster"); *name_attrs.mutable_attr() = attrs; TF_CHECK_OK(ExtractOutsideCompilationTest( "_xla", "_oc", "cluster", name_attrs, "cluster_rewritten", "host_graph", host_compute_core, &fld, &shape_inference_graphs, &has_outside_compilation)); std::unique_ptr<FunctionBody> xla_fbody; TF_CHECK_OK(FunctionDefToBodyHelper(*fld.Find("cluster_rewritten"), AttrSlice(), &fld, &xla_fbody)); auto node_name_index = xla_fbody->graph->BuildNodeNameIndex(); Node *host_compute_0 = node_name_index["outside_compilation_0_host_compute"]; EXPECT_NE(host_compute_0, nullptr); Node *host_compute_1 = node_name_index["outside_compilation_1_host_compute"]; EXPECT_NE(host_compute_1, nullptr); int tpu_core; TF_CHECK_OK(GetNodeAttr(host_compute_0->attrs(), "tpu_core", &tpu_core)); EXPECT_EQ(tpu_core, 1); TF_CHECK_OK(GetNodeAttr(host_compute_1->attrs(), "tpu_core", &tpu_core)); EXPECT_EQ(tpu_core, 0); std::vector<TensorShapeProto> shapes; TF_CHECK_OK(GetNodeAttr(host_compute_0->attrs(), "shapes", &shapes)); EXPECT_EQ(shapes.size(), 0); TF_CHECK_OK(GetNodeAttr(host_compute_1->attrs(), "shapes", &shapes)); EXPECT_EQ(shapes.size(), 1); EXPECT_EQ(shapes[0].dim_size(), 1); NameAttrList shape_inference_graph; TF_CHECK_OK(GetNodeAttr(host_compute_0->attrs(), "shape_inference_graph", &shape_inference_graph)); EXPECT_EQ(shape_inference_graph.name(), ""); TF_CHECK_OK(GetNodeAttr(host_compute_1->attrs(), "shape_inference_graph", &shape_inference_graph)); EXPECT_EQ(shape_inference_graph.name(), ""); EXPECT_EQ(shape_inference_graphs.size(), 0); std::unique_ptr<FunctionBody> host_fbody; AttrValue device_ordinal_temp_value; device_ordinal_temp_value.set_i(0); protobuf::Map<string, AttrValue> host_func_attrs; host_func_attrs["_device_ordinal"] = device_ordinal_temp_value; TF_CHECK_OK(FunctionDefToBodyHelper( *fld.Find("host_graph"), AttrSlice(&host_func_attrs), &fld, &host_fbody)); Graph *host_graph = host_fbody->graph; Node *key_placeholder = nullptr, *sequencer = nullptr; for (Node *n : host_graph->nodes()) { if (n->type_string() == "Placeholder" && absl::EndsWith(n->name(), "_key_placeholder")) { EXPECT_EQ(key_placeholder, nullptr); key_placeholder = n; } else if (HasNodeAttr(n->def(), "_xla_host_transfer_sequencer")) { EXPECT_EQ(sequencer, nullptr); sequencer = n; } } EXPECT_NE(key_placeholder, nullptr); EXPECT_NE(sequencer, nullptr); int num_send_from_host = 0, num_recv_at_host = 0; std::vector<Node *> send_recv_nodes; for (Node *n : host_graph->nodes()) { if (n->type_string() == "_XlaSendFromHost") { num_send_from_host++; send_recv_nodes.push_back(n); } else if (n->type_string() == "_XlaRecvAtHost") { num_recv_at_host++; send_recv_nodes.push_back(n); } } EXPECT_EQ(num_send_from_host, 1); EXPECT_EQ(num_recv_at_host, 1); for (Node *n : send_recv_nodes) { Node *input_node; TF_CHECK_OK(n->input_node(n->num_inputs() - 1, &input_node)); EXPECT_EQ(input_node, key_placeholder); bool has_control_edge_to_sequencer = false; for (const Edge *e : n->out_edges()) { if (e->IsControlEdge() && e->dst() == sequencer) { has_control_edge_to_sequencer = true; break; } } EXPECT_TRUE(has_control_edge_to_sequencer); } } TEST_F(ExtractOutsideCompilationForFunctionTest, NoHostGraph) { FunctionDefLibrary fdl; { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output const0 = ops::Const(s.WithOpName("const0"), 1, {2}); std::unique_ptr<Graph> g(new Graph(OpRegistry::Global())); TF_CHECK_OK(s.ToGraph(g.get())); FunctionDef *xla_fdef = fdl.add_function(); TF_CHECK_OK(GraphToFunctionDef(*g, "cluster", xla_fdef)); } FunctionLibraryDefinition fld(OpRegistry::Global(), fdl); protobuf::Map<string, tensorflow::AttrValue> attrs; std::map<string, int> host_compute_core = {{"0", 1}, {"1", 0}}; std::vector<string> shape_inference_graphs; bool has_outside_compilation; NameAttrList name_attrs; name_attrs.set_name("cluster"); *name_attrs.mutable_attr() = attrs; TF_CHECK_OK(ExtractOutsideCompilationTest( "_xla", "_oc", "cluster", name_attrs, "cluster_rewritten", "host_graph", host_compute_core, &fld, &shape_inference_graphs, &has_outside_compilation)); EXPECT_EQ(fld.Find("host_graph"), nullptr); } TEST_F(ExtractOutsideCompilationForFunctionTest, OutsideCompilationInIf) { FunctionDefLibrary fdl; { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output arg = ops::_Arg(s.WithOpName("arg"), DT_INT32, 0); Output identity = ops::Identity(s.WithOpName("identity_true_fn"), arg); ops::_Retval retval(s.WithOpName("retval"), identity, 0); std::unique_ptr<Graph> g(new Graph(OpRegistry::Global())); TF_CHECK_OK(s.ToGraph(g.get())); auto node_name_image = g->BuildNodeNameIndex(); node_name_image["identity_true_fn"]->AddAttr("_oc", "0"); PartialTensorShape shape({2}); node_name_image["identity_true_fn"]->AddAttr( kXlaInferredShapesAttrName, std::vector<PartialTensorShape>{shape}); FunctionDef *true_fn_fdef = fdl.add_function(); TF_CHECK_OK(GraphToFunctionDef(*g, "true_fn", true_fn_fdef)); } { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output arg = ops::_Arg(s.WithOpName("arg"), DT_INT32, 0); Output identity = ops::Identity(s.WithOpName("identity_false_fn"), arg); ops::_Retval retval(s.WithOpName("retval"), identity, 0); std::unique_ptr<Graph> g(new Graph(OpRegistry::Global())); TF_CHECK_OK(s.ToGraph(g.get())); auto node_name_image = g->BuildNodeNameIndex(); node_name_image["identity_false_fn"]->AddAttr("_oc", "0"); PartialTensorShape shape({2}); node_name_image["identity_false_fn"]->AddAttr( kXlaInferredShapesAttrName, std::vector<PartialTensorShape>{shape}); FunctionDef *false_fn_fdef = fdl.add_function(); TF_CHECK_OK(GraphToFunctionDef(*g, "false_fn", false_fn_fdef)); } { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output cond = ops::Const(s.WithOpName("const0"), true, {2}); Output input = ops::Const(s.WithOpName("const1"), 1, {2}); NameAttrList true_fn; true_fn.set_name("true_fn"); NameAttrList false_fn; false_fn.set_name("false_fn"); auto if_op = ops::If(s.WithOpName("if"), cond, std::initializer_list<Input>{cond, input}, {DT_INT32}, true_fn, false_fn); ops::_Retval retval(s.WithOpName("retval"), if_op.output[0], 0); std::unique_ptr<Graph> g(new Graph(OpRegistry::Global())); TF_CHECK_OK(s.ToGraph(g.get())); FunctionDef *xla_fdef = fdl.add_function(); TF_CHECK_OK(GraphToFunctionDef(*g, "cluster", xla_fdef)); } FunctionLibraryDefinition fld(OpRegistry::Global(), fdl); protobuf::Map<string, tensorflow::AttrValue> attrs; std::map<string, int> host_compute_core; std::vector<string> shape_inference_graphs; bool has_outside_compilation; NameAttrList name_attrs; name_attrs.set_name("cluster"); *name_attrs.mutable_attr() = attrs; TF_CHECK_OK(ExtractOutsideCompilationTest( "_xla", "_oc", "cluster", name_attrs, "cluster_rewritten", "host_graph", host_compute_core, &fld, &shape_inference_graphs, &has_outside_compilation)); { std::unique_ptr<FunctionBody> host_fbody; AttrValue device_ordinal_temp_value; device_ordinal_temp_value.set_i(0); protobuf::Map<string, AttrValue> host_func_attrs; host_func_attrs["_device_ordinal"] = device_ordinal_temp_value; TF_CHECK_OK(FunctionDefToBodyHelper(*fld.Find("host_graph"), AttrSlice(&host_func_attrs), &fld, &host_fbody)); Graph *host_graph = host_fbody->graph; auto node_name_index = host_graph->BuildNodeNameIndex(); Node *recv_if_pred_node = node_name_index["recv_oc_if_pred_if"]; EXPECT_NE(recv_if_pred_node, nullptr); Node *if_oc_node = node_name_index["oc_if_if"]; EXPECT_NE(if_oc_node, nullptr); Node *if_oc_node_cond_input; TF_CHECK_OK(if_oc_node->input_node(0, &if_oc_node_cond_input)); EXPECT_EQ(if_oc_node_cond_input, recv_if_pred_node); const FunctionDef *true_def = fld.Find("oc_then_branch_host_if_true_fn"); EXPECT_NE(true_def, nullptr); bool has_identity_true_fn_node = false; for (const auto &node_def : true_def->node_def()) { if (node_def.name() == "identity_true_fn") { has_identity_true_fn_node = true; break; } } EXPECT_TRUE(has_identity_true_fn_node); const FunctionDef *false_def = fld.Find("oc_else_branch_host_if_false_fn"); EXPECT_NE(false_def, nullptr); bool has_identity_false_fn_node = false; for (const auto &node_def : false_def->node_def()) { if (node_def.name() == "identity_false_fn") { has_identity_false_fn_node = true; break; } } EXPECT_TRUE(has_identity_false_fn_node); } { std::unique_ptr<FunctionBody> xla_fbody; TF_CHECK_OK(FunctionDefToBodyHelper(*fld.Find("cluster_rewritten"), AttrSlice(), &fld, &xla_fbody)); Graph *xla_graph = xla_fbody->graph; auto node_name_index = xla_graph->BuildNodeNameIndex(); Node *send_if_pred_node = node_name_index["send_oc_if_pred_if"]; EXPECT_NE(send_if_pred_node, nullptr); bool has_control_edge_to_if = false; for (const Edge *e : send_if_pred_node->out_edges()) { if (e->IsControlEdge() && e->dst()->name() == "if") { has_control_edge_to_if = true; break; } } EXPECT_TRUE(has_control_edge_to_if); Node *if_node = node_name_index["if"]; EXPECT_NE(if_node, nullptr); std::vector<string> token_inputs; TF_CHECK_OK( GetNodeAttr(if_node->def(), "_xla_token_input_nodes", &token_inputs)); EXPECT_THAT(token_inputs, ::testing::ElementsAre("send_oc_if_pred_if")); } } TEST_F(ExtractOutsideCompilationForFunctionTest, OutsideCompilationInWhile) { FunctionDefLibrary fdl; { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output arg = ops::_Arg(s.WithOpName("arg"), DT_BOOL, 0); Output identity = ops::Identity(s.WithOpName("identity_cond_fn"), arg); ops::_Retval retval(s.WithOpName("retval"), identity, 0); std::unique_ptr<Graph> g(new Graph(OpRegistry::Global())); TF_CHECK_OK(s.ToGraph(g.get())); auto node_name_image = g->BuildNodeNameIndex(); node_name_image["identity_cond_fn"]->AddAttr("_oc", "0"); PartialTensorShape shape({2}); node_name_image["identity_cond_fn"]->AddAttr( kXlaInferredShapesAttrName, std::vector<PartialTensorShape>{shape}); FunctionDef *cond_fn_fdef = fdl.add_function(); TF_CHECK_OK(GraphToFunctionDef(*g, "cond_fn", cond_fn_fdef)); } { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output arg = ops::_Arg(s.WithOpName("arg"), DT_BOOL, 0); Output identity = ops::Identity(s.WithOpName("identity_body_fn"), arg); ops::_Retval retval(s.WithOpName("retval"), identity, 0); std::unique_ptr<Graph> g(new Graph(OpRegistry::Global())); TF_CHECK_OK(s.ToGraph(g.get())); auto node_name_image = g->BuildNodeNameIndex(); node_name_image["identity_body_fn"]->AddAttr("_oc", "0"); PartialTensorShape shape({2}); node_name_image["identity_body_fn"]->AddAttr( kXlaInferredShapesAttrName, std::vector<PartialTensorShape>{shape}); FunctionDef *body_fn_fdef = fdl.add_function(); TF_CHECK_OK(GraphToFunctionDef(*g, "body_fn", body_fn_fdef)); } { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output input = ops::Const(s.WithOpName("const0"), true, {2}); NameAttrList cond_fn; cond_fn.set_name("cond_fn"); NameAttrList body_fn; body_fn.set_name("body_fn"); auto while_op = ops::While(s.WithOpName("while"), std::initializer_list<Input>{input}, cond_fn, body_fn); ops::_Retval retval(s.WithOpName("retval"), while_op.output[0], 0); std::unique_ptr<Graph> g(new Graph(OpRegistry::Global())); TF_CHECK_OK(s.ToGraph(g.get())); FunctionDef *xla_fdef = fdl.add_function(); TF_CHECK_OK(GraphToFunctionDef(*g, "cluster", xla_fdef)); } FunctionLibraryDefinition fld(OpRegistry::Global(), fdl); protobuf::Map<string, tensorflow::AttrValue> attrs; std::map<string, int> host_compute_core; std::vector<string> shape_inference_graphs; bool has_outside_compilation; NameAttrList name_attrs; name_attrs.set_name("cluster"); *name_attrs.mutable_attr() = attrs; TF_CHECK_OK(ExtractOutsideCompilationTest( "_xla", "_oc", "cluster", name_attrs, "cluster_rewritten", "host_graph", host_compute_core, &fld, &shape_inference_graphs, &has_outside_compilation)); { std::unique_ptr<FunctionBody> host_fbody; AttrValue device_ordinal_temp_value; device_ordinal_temp_value.set_i(0); protobuf::Map<string, AttrValue> host_func_attrs; host_func_attrs["_device_ordinal"] = device_ordinal_temp_value; TF_CHECK_OK(FunctionDefToBodyHelper(*fld.Find("host_graph"), AttrSlice(&host_func_attrs), &fld, &host_fbody)); Graph *host_graph = host_fbody->graph; auto node_name_index = host_graph->BuildNodeNameIndex(); Node *while_oc_node = node_name_index["oc_while_while"]; EXPECT_NE(while_oc_node, nullptr); const FunctionDef *cond_def = fld.Find("oc_cond_host_while_cond_fn"); EXPECT_NE(cond_def, nullptr); bool has_identity_cond_fn_node = false; for (const auto &node_def : cond_def->node_def()) { if (node_def.name() == "identity_cond_fn") { has_identity_cond_fn_node = true; break; } } EXPECT_TRUE(has_identity_cond_fn_node); const FunctionDef *body_def = fld.Find("oc_body_host_while_body_fn"); EXPECT_NE(body_def, nullptr); bool has_identity_body_fn_node = false; for (const auto &node_def : body_def->node_def()) { if (node_def.name() == "identity_body_fn") { has_identity_body_fn_node = true; break; } } EXPECT_TRUE(has_identity_body_fn_node); } { const FunctionDef *cond_def = fld.Find("cond_fn_oc"); EXPECT_NE(cond_def, nullptr); bool has_send_oc_while_cond_node = false; for (const auto &node_def : cond_def->node_def()) { if (node_def.name() == "send_oc_while_cond_while") { has_send_oc_while_cond_node = true; break; } } EXPECT_TRUE(has_send_oc_while_cond_node); } } TEST_F(ExtractOutsideCompilationForFunctionTest, OutsideCompilationInFunction) { FunctionDefLibrary fdl; { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output arg = ops::_Arg(s.WithOpName("arg"), DT_INT32, 0); Output identity = ops::Identity(s.WithOpName("identity"), arg); ops::_Retval retval(s.WithOpName("retval"), identity, 0); std::unique_ptr<Graph> g(new Graph(OpRegistry::Global())); TF_CHECK_OK(s.ToGraph(g.get())); auto node_name_image = g->BuildNodeNameIndex(); node_name_image["identity"]->AddAttr("_oc", "0"); PartialTensorShape shape({2}); node_name_image["identity"]->AddAttr( kXlaInferredShapesAttrName, std::vector<PartialTensorShape>{shape}); FunctionDef *true_fn_fdef = fdl.add_function(); TF_CHECK_OK(GraphToFunctionDef(*g, "fn", true_fn_fdef)); } FunctionLibraryDefinition fld(OpRegistry::Global(), fdl); { std::unique_ptr<Graph> g(new Graph(&fld)); tensorflow::TensorProto tensor_proto; tensor_proto.set_dtype(tensorflow::DT_INT32); tensorflow::TensorShapeProto shape; shape.add_dim()->set_size(2); *tensor_proto.mutable_tensor_shape() = shape; for (int i = 0; i < 2; ++i) { tensor_proto.add_int_val(1); } NodeDef const_def; TF_CHECK_OK(NodeDefBuilder("const", "Const") .Attr("dtype", DT_INT32) .Attr("value", tensor_proto) .Finalize(&const_def)); Status s; Node *const_node = g->AddNode(const_def, &s); TF_CHECK_OK(s); NodeDef fn_def; TF_CHECK_OK(NodeDefBuilder("fn", "fn", &fld) .Input("const", 0, DT_INT32) .Finalize(&fn_def)); Node *fn_node = g->AddNode(fn_def, &s); TF_CHECK_OK(s); g->AddEdge(const_node, 0, fn_node, 0); NodeDef ret_def; TF_CHECK_OK(NodeDefBuilder("ret", "_Retval") .Attr("index", 0) .Attr("T", DT_INT32) .Input("fn", 0, DT_INT32) .Finalize(&ret_def)); Node *ret_node = g->AddNode(ret_def, &s); TF_CHECK_OK(s); g->AddEdge(fn_node, 0, ret_node, 0); FunctionDef *xla_fdef = fdl.add_function(); TF_CHECK_OK(GraphToFunctionDef(*g, "cluster", xla_fdef)); TF_CHECK_OK(fld.AddFunctionDef(*xla_fdef)); } protobuf::Map<string, tensorflow::AttrValue> attrs; std::map<string, int> host_compute_core; std::vector<string> shape_inference_graphs; bool has_outside_compilation; NameAttrList name_attrs; name_attrs.set_name("cluster"); *name_attrs.mutable_attr() = attrs; TF_CHECK_OK(ExtractOutsideCompilationTest( "_xla", "_oc", "cluster", name_attrs, "cluster_rewritten", "host_graph", host_compute_core, &fld, &shape_inference_graphs, &has_outside_compilation)); { std::unique_ptr<FunctionBody> host_fbody; AttrValue device_ordinal_temp_value; device_ordinal_temp_value.set_i(0); protobuf::Map<string, AttrValue> host_func_attrs; host_func_attrs["_device_ordinal"] = dev
1,068
cpp
tensorflow/tensorflow
device_compiler_client
tensorflow/compiler/jit/device_compiler_client.cc
tensorflow/compiler/jit/device_compiler_client_test.cc
#ifndef TENSORFLOW_COMPILER_JIT_DEVICE_COMPILER_CLIENT_H_ #define TENSORFLOW_COMPILER_JIT_DEVICE_COMPILER_CLIENT_H_ #include <optional> #include <string> #include <variant> #include "tensorflow/compiler/tf2xla/xla_compiler.h" #include "xla/client/executable_build_options.h" namespace tensorflow { template <typename ExecutableType, typename ClientType> class DeviceCompilerClient { public: DeviceCompilerClient() = default; virtual ~DeviceCompilerClient() = default; virtual StatusOr<std::unique_ptr<ExecutableType>> BuildExecutable( const XlaCompiler::Options& options, const XlaCompiler::CompilationResult& result) = 0; virtual absl::StatusOr<std::string> SerializeExecutable( const ExecutableType& executable) = 0; virtual absl::StatusOr<std::string> BuildSerializedExecutable( const XlaCompiler::Options& options, const XlaCompiler::CompilationResult& result) = 0; virtual StatusOr<std::unique_ptr<ExecutableType>> LoadExecutable( const XlaCompiler::Options& options, const XlaCompiler::CompilationResult& result, const std::string& serialized_executable) = 0; virtual void WaitForProgramsToFinish() = 0; virtual ClientType* client() const = 0; private: DeviceCompilerClient(const DeviceCompilerClient&) = delete; void operator=(const DeviceCompilerClient&) = delete; }; xla::ExecutableBuildOptions GetExecutableBuildOptions( const XlaCompiler::Options& options, const XlaCompiler::CompilationResult& result, int default_device_ordinal); } #endif #include "tensorflow/compiler/jit/device_compiler_client.h" #include "tensorflow/compiler/tf2xla/xla_compiler.h" #include "tensorflow/core/util/determinism.h" namespace tensorflow { xla::ExecutableBuildOptions GetExecutableBuildOptions( const XlaCompiler::Options& options, const XlaCompiler::CompilationResult& result, int default_device_ordinal) { xla::ExecutableBuildOptions build_options; if (result.collective_info) { build_options.set_num_replicas(result.collective_info->group_size); } if (options.device_ordinal != -1) { build_options.set_device_ordinal(options.device_ordinal); } else if (default_device_ordinal != -1) { build_options.set_device_ordinal(default_device_ordinal); } build_options.set_result_layout(result.xla_output_shape); build_options.set_device_allocator(options.device_allocator.get()); build_options.set_alias_passthrough_params(options.alias_passthrough_params); build_options.mutable_debug_options()->set_xla_detailed_logging( options.detailed_logging); if (tensorflow::OpDeterminismRequired()) { build_options.mutable_debug_options()->set_xla_gpu_deterministic_ops(true); } return build_options; } }
#include "tensorflow/compiler/jit/device_compiler_client.h" #include <gtest/gtest.h> namespace tensorflow { namespace { TEST(GetExecutableOptionTest, Basic) { XlaCompiler::Options options; options.device_ordinal = 0; options.alias_passthrough_params = true; options.detailed_logging = true; XlaCompiler::CompilationResult result; xla::Shape xla_output_shape; result.xla_output_shape = xla_output_shape; auto build_option = GetExecutableBuildOptions(options, result, -1); EXPECT_EQ(build_option.device_ordinal(), 0); EXPECT_EQ(build_option.result_layout()->ToString(), xla_output_shape.ToString()); EXPECT_EQ(build_option.alias_passthrough_params(), true); EXPECT_EQ(build_option.debug_options().xla_detailed_logging(), true); EXPECT_EQ(build_option.debug_options().xla_enable_dumping(), true); } TEST(GetExecutableOptionTest, DefaultDeviceOrdinal) { XlaCompiler::Options options; XlaCompiler::CompilationResult result; auto build_option = GetExecutableBuildOptions(options, result, 0); EXPECT_EQ(build_option.device_ordinal(), 0); } TEST(GetExecutableOptionTest, DeviceOrdinalNotSet) { XlaCompiler::Options options; XlaCompiler::CompilationResult result; auto build_option = GetExecutableBuildOptions(options, result, -1); EXPECT_EQ(build_option.device_ordinal(), -1); } TEST(GetExecutableOptionTest, DumpingWithoutDetailedLogging) { XlaCompiler::Options options; options.detailed_logging = false; XlaCompiler::CompilationResult result; auto build_option = GetExecutableBuildOptions(options, result, -1); EXPECT_FALSE(build_option.debug_options().xla_detailed_logging()); EXPECT_TRUE(build_option.debug_options().xla_enable_dumping()); } } }
1,069
cpp
tensorflow/tensorflow
xla_activity_listener
tensorflow/compiler/jit/xla_activity_listener.cc
tensorflow/compiler/jit/xla_activity_listener_test.cc
#ifndef TENSORFLOW_COMPILER_JIT_XLA_ACTIVITY_LISTENER_H_ #define TENSORFLOW_COMPILER_JIT_XLA_ACTIVITY_LISTENER_H_ #include <memory> #include "tensorflow/compiler/jit/xla_activity.pb.h" #include "tensorflow/core/lib/core/status.h" namespace tensorflow { Status BroadcastXlaActivity(XlaAutoClusteringActivity auto_clustering_activity); Status BroadcastXlaActivity(XlaJitCompilationActivity jit_compilation_activity); Status BroadcastOptimizationRemark(XlaOptimizationRemark optimization_remark); Status BroadcastOptimizationRemark( XlaOptimizationRemark::Warning optimization_warning, string debug_information); class XlaActivityListener { public: virtual Status Listen( const XlaAutoClusteringActivity& auto_clustering_activity) = 0; virtual Status Listen( const XlaJitCompilationActivity& jit_compilation_activity) = 0; virtual Status Listen(const XlaOptimizationRemark& optimization_remark) = 0; virtual void Flush(); virtual ~XlaActivityListener(); }; void RegisterXlaActivityListener(std::unique_ptr<XlaActivityListener> listener); } #endif #include "tensorflow/compiler/jit/xla_activity_listener.h" #include "absl/synchronization/mutex.h" #include "tensorflow/compiler/jit/xla_activity.pb.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/platform/thread_annotations.h" namespace tensorflow { namespace { struct XlaActivityListenerList { absl::Mutex mutex; std::vector<std::unique_ptr<XlaActivityListener>> listeners TF_GUARDED_BY(mutex); }; void FlushAllListeners(); XlaActivityListenerList* GetXlaActivityListenerList() { static XlaActivityListenerList* listener_list = new XlaActivityListenerList; static int unused = std::atexit(FlushAllListeners); (void)unused; return listener_list; } template <typename FnTy> Status ForEachListener(FnTy fn) { XlaActivityListenerList* listener_list = GetXlaActivityListenerList(); absl::ReaderMutexLock reader_lock(&listener_list->mutex); for (const std::unique_ptr<XlaActivityListener>& listener : listener_list->listeners) { TF_RETURN_IF_ERROR(fn(listener.get())); } return absl::OkStatus(); } void FlushAllListeners() { Status s = ForEachListener([](XlaActivityListener* listener) { listener->Flush(); return absl::OkStatus(); }); CHECK(s.ok()); } } Status BroadcastXlaActivity( XlaAutoClusteringActivity auto_clustering_activity) { return ForEachListener([&](XlaActivityListener* listener) { return listener->Listen(auto_clustering_activity); }); } Status BroadcastXlaActivity( XlaJitCompilationActivity jit_compilation_activity) { return ForEachListener([&](XlaActivityListener* listener) { return listener->Listen(jit_compilation_activity); }); } Status BroadcastOptimizationRemark(XlaOptimizationRemark optimization_remark) { VLOG(2) << "OptimizationRemark: " << optimization_remark.DebugString(); return ForEachListener([&](XlaActivityListener* listener) { return listener->Listen(optimization_remark); }); } Status BroadcastOptimizationRemark( XlaOptimizationRemark::Warning optimization_warning, string debug_information) { XlaOptimizationRemark remark; remark.set_warning(optimization_warning); remark.set_debug_information(std::move(debug_information)); return BroadcastOptimizationRemark(std::move(remark)); } void RegisterXlaActivityListener( std::unique_ptr<XlaActivityListener> listener) { XlaActivityListenerList* listener_list = GetXlaActivityListenerList(); absl::WriterMutexLock writer_lock(&listener_list->mutex); listener_list->listeners.push_back(std::move(listener)); } void XlaActivityListener::Flush() {} XlaActivityListener::~XlaActivityListener() {} }
#include "tensorflow/compiler/jit/xla_activity_listener.h" #include <cstdlib> #include "tensorflow/cc/framework/ops.h" #include "tensorflow/cc/ops/array_ops.h" #include "tensorflow/cc/ops/list_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/compiler/jit/flags.h" #include "tensorflow/core/common_runtime/direct_session.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace { class TestListener : public XlaActivityListener { public: Status Listen( const XlaAutoClusteringActivity& auto_clustering_activity) override { auto_clustering_activity_ = auto_clustering_activity; return absl::OkStatus(); } Status Listen( const XlaJitCompilationActivity& jit_compilation_activity) override { jit_compilation_activity_ = jit_compilation_activity; return absl::OkStatus(); } Status Listen(const XlaOptimizationRemark& optimization_remark) override { return absl::OkStatus(); } ~TestListener() override {} const XlaAutoClusteringActivity& auto_clustering_activity() const { return auto_clustering_activity_; } const XlaJitCompilationActivity& jit_compilation_activity() const { return jit_compilation_activity_; } private: XlaAutoClusteringActivity auto_clustering_activity_; XlaJitCompilationActivity jit_compilation_activity_; }; class XlaActivityListenerTest : public ::testing::Test { protected: XlaActivityListenerTest() { auto listener = std::make_unique<TestListener>(); listener_ = listener.get(); RegisterXlaActivityListener(std::move(listener)); } TestListener* listener() const { return listener_; } private: TestListener* listener_; }; GraphDef CreateGraphDef() { Scope root = Scope::NewRootScope().ExitOnError().WithAssignedDevice( "/job:localhost/replica:0/task:0/device:CPU:0"); Output a = ops::Placeholder(root.WithOpName("A"), DT_FLOAT); for (int i = 0; i < 5; i++) { a = ops::MatMul(root.WithOpName(absl::StrCat("matmul_", i)), a, a); a = ops::Add(root.WithOpName(absl::StrCat("add_", i)), a, a); } GraphDef graph_def; root.graph()->ToGraphDef(&graph_def); return graph_def; } TEST_F(XlaActivityListenerTest, Test) { GraphDef graph_def = CreateGraphDef(); SessionOptions options; options.config.mutable_graph_options() ->mutable_optimizer_options() ->set_global_jit_level(OptimizerOptions::ON_2); std::unique_ptr<Session> session(NewSession(options)); TF_ASSERT_OK(session->Create(graph_def)); std::vector<std::string> output_names = {std::string("add_4:0")}; Tensor tensor_2x2(DT_FLOAT, TensorShape({2, 2})); for (int i = 0; i < 4; i++) { tensor_2x2.matrix<float>()(i / 2, i % 2) = 5 * i; } Tensor tensor_3x3(DT_FLOAT, TensorShape({3, 3})); for (int i = 0; i < 9; i++) { tensor_3x3.matrix<float>()(i / 3, i % 3) = 5 * i; } std::vector<std::pair<string, Tensor>> inputs_2x2 = {{"A", tensor_2x2}}; std::vector<Tensor> outputs; TF_ASSERT_OK(session->Run(inputs_2x2, output_names, {}, &outputs)); XlaAutoClusteringActivity expected_auto_clustering_activity; protobuf::TextFormat::ParseFromString( R"(global_jit_level: ON_2 cpu_global_jit_enabled: true summary { unclustered_node_count: 4 clustered_node_count: 14 clusters { name: "cluster_0" size: 14 op_histogram { op: "Add" count: 1 } op_histogram { op: "Const" count: 4 } op_histogram { op: "MatMul" count: 5 } op_histogram { op: "Mul" count: 4 } } unclustered_op_histogram { op: "NoOp" count: 2 } unclustered_op_histogram { op: "_Arg" count: 1 } unclustered_op_histogram { op: "_Retval" count: 1 } } )", &expected_auto_clustering_activity); EXPECT_EQ(listener()->auto_clustering_activity().DebugString(), expected_auto_clustering_activity.DebugString()); EXPECT_EQ(listener()->jit_compilation_activity().cluster_name(), "cluster_0"); EXPECT_EQ(listener()->jit_compilation_activity().compile_count(), 1); int64_t first_compile_time = listener()->jit_compilation_activity().compile_time_us(); EXPECT_GT(first_compile_time, 0); EXPECT_EQ(listener()->jit_compilation_activity().cumulative_compile_time_us(), first_compile_time); std::vector<std::pair<string, Tensor>> inputs_3x3 = {{"A", tensor_3x3}}; outputs.clear(); for (int i = 0; i < 3; i++) { TF_ASSERT_OK(session->Run(inputs_3x3, output_names, {}, &outputs)); } EXPECT_EQ(listener()->jit_compilation_activity().cluster_name(), "cluster_0"); EXPECT_EQ(listener()->jit_compilation_activity().compile_count(), 2); EXPECT_GT(listener()->jit_compilation_activity().compile_time_us(), 0); EXPECT_EQ(listener()->jit_compilation_activity().cumulative_compile_time_us(), first_compile_time + listener()->jit_compilation_activity().compile_time_us()); } } } int main(int argc, char** argv) { tensorflow::GetMarkForCompilationPassFlags()->tf_xla_cpu_global_jit = true; ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
1,070
cpp
tensorflow/tensorflow
xla_launch_util
tensorflow/compiler/jit/xla_launch_util.cc
tensorflow/compiler/jit/xla_launch_util_test.cc
#ifndef TENSORFLOW_COMPILER_JIT_XLA_LAUNCH_UTIL_H_ #define TENSORFLOW_COMPILER_JIT_XLA_LAUNCH_UTIL_H_ #include <map> #include <memory> #include <set> #include <vector> #include "tensorflow/compiler/jit/variable_info.h" #include "tensorflow/compiler/jit/xla_tensor.h" #include "tensorflow/compiler/tf2xla/xla_compiler.h" #include "xla/client/local_client.h" #include "xla/pjrt/pjrt_client.h" #include "xla/service/shaped_buffer.h" #include "xla/stream_executor/device_memory_allocator.h" #include "tensorflow/core/framework/allocation_description.pb.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/platform/thread_annotations.h" namespace tensorflow { absl::StatusOr<std::vector<VariableInfo>> GatherVariableInfo( OpKernelContext* ctx, const XlaCompiler::CompilationResult& compilation_result, int missing_ctx_input_prefix); std::vector<const Tensor*> InputsFromContext(OpKernelContext* ctx); absl::StatusOr<std::vector<int>> GetConstantInputIndicesFromContext( OpKernelContext* ctx); Status SetOutputForConstant( OpKernelContext* ctx, bool requires_copy_to_device, const XlaCompiler::CompilationResult* compilation_result, int output_num); Status PreparePjRtExecutableArguments( int num_missing_prefix_ctx_inputs, const std::vector<int>& input_mapping, const std::vector<const Tensor*>& inputs, const absl::flat_hash_map<int, const Tensor*>& variable_snapshots, xla::PjRtClient* pjrt_client, xla::PjRtDevice* pjrt_device, bool use_pjrt_tensor_buffer, std::vector<xla::PjRtBuffer*>* args, std::vector<std::unique_ptr<xla::PjRtBuffer>>* owned_args, absl::flat_hash_set<int>* non_donatable_input_indices); Status PopulateCtxOutputsFromPjRtExecutableOutputs( int num_missing_prefix_ctx_inputs, const std::vector<const Tensor*>& inputs, const std::vector<VariableInfo>& variables, const XlaCompiler::CompilationResult& compilation_result, bool use_pjrt_tensor_buffer, std::vector<std::unique_ptr<xla::PjRtBuffer>>& executable_outputs, OpKernelContext* ctx); xla::ExecuteOptions GetPjRtExecuteOptions( const DeviceType& device_type, absl::flat_hash_set<int> non_donatable_input_indices); int GetDeviceOrdinal(const DeviceBase* device); DeviceType GetDeviceType(OpKernelContext* ctx); Status RunPjRtExecutable( const std::vector<const Tensor*>& inputs, const std::vector<VariableInfo>& variables, const XlaCompiler::CompilationResult& compilation_result, xla::PjRtClient* pjrt_client, xla::PjRtLoadedExecutable* executable, OpKernelContext* ctx); Status RunPjRtExecutable( int num_missing_prefix_ctx_inputs, const std::vector<const Tensor*>& inputs, const absl::flat_hash_map<int, const Tensor*>& variable_snapshots, const std::vector<VariableInfo>& updated_variables, const XlaCompiler::CompilationResult& compilation_result, xla::PjRtClient* pjrt_client, xla::PjRtLoadedExecutable* executable, OpKernelContext* ctx); absl::StatusOr<std::vector<std::unique_ptr<xla::PjRtBuffer>>> RunPjRtExecutable( int num_missing_prefix_ctx_inputs, const std::vector<const Tensor*>& inputs, const absl::flat_hash_map<int, const Tensor*>& variable_snapshots, const std::vector<VariableInfo>& updated_variables, const DeviceType& device_type, bool use_pjrt_tensor_buffer, const XlaCompiler::CompilationResult& compilation_result, xla::PjRtDevice* device, xla::PjRtClient* pjrt_client, xla::PjRtLoadedExecutable* executable); class XlaComputationLaunchContext { public: XlaComputationLaunchContext(xla::LocalClient* client, se::DeviceMemoryAllocator* xla_allocator, int device_ordinal, bool allocate_xla_tensors, bool use_multiple_streams); static absl::StatusOr<std::vector<XlaCompiler::Argument>> BuildXlaCompilerArguments(absl::Span<int const> must_be_constant_idxs, absl::Span<const Tensor* const> inputs, absl::Span<VariableInfo const> variable_args, Device* device); absl::StatusOr<std::vector<xla::ExecutionInput>> PopulateInputs( OpKernelContext* ctx, const XlaCompiler::CompilationResult* compilation_result, const std::map<int, const Tensor*>& resource_vars, int missing_ctx_input_prefix, const xla::HloInputOutputAliasConfig& input_output_alias); Status PopulateOutputs( OpKernelContext* ctx, const XlaCompiler::CompilationResult* compilation_result, xla::ScopedShapedBuffer output, int missing_ctx_input_prefix, absl::Span<VariableInfo> variable_infos, const xla::HloInputOutputAliasConfig& input_output_alias, const std::map<int, const Tensor*>& resource_vars); private: xla::LocalClient* client_; se::DeviceMemoryAllocator* xla_allocator_; bool allocate_xla_tensors_; bool use_multiple_streams_; int device_ordinal_; }; class XlaTensorBuffer : public TensorBuffer { public: XlaTensorBuffer(const void* ptr, size_t expected_size, size_t actual_size, Allocator* allocator) : TensorBuffer(const_cast<void*>(ptr)), expected_size_(expected_size), actual_size_(actual_size), allocator_(allocator) {} ~XlaTensorBuffer() override { if (data()) { allocator_->DeallocateRaw(data()); } } size_t size() const override { return expected_size_; } TensorBuffer* root_buffer() override { return this; } void FillAllocationDescription(AllocationDescription* proto) const override { proto->set_requested_bytes(static_cast<int64_t>(expected_size_)); proto->set_allocator_name(allocator_->Name()); proto->set_ptr(reinterpret_cast<uintptr_t>(data())); if (allocator_->TracksAllocationSizes()) { auto ab = static_cast<int64_t>(allocator_->AllocatedSize(data())); proto->set_allocated_bytes(ab); int64_t id = allocator_->AllocationId(data()); if (id > 0) { proto->set_allocation_id(id); } if (RefCountIsOne()) { proto->set_has_single_reference(true); } } } private: size_t expected_size_; size_t actual_size_; Allocator* allocator_; }; } #endif #include "tensorflow/compiler/jit/xla_launch_util.h" #include <cstdint> #include <memory> #include <optional> #include <set> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/cleanup/cleanup.h" #include "absl/container/flat_hash_set.h" #include "absl/status/status.h" #include "absl/types/span.h" #include "tensorflow/compiler/jit/pjrt_tensor_buffer.h" #include "tensorflow/compiler/jit/pjrt_tensor_buffer_util.h" #include "tensorflow/compiler/jit/variable_info.h" #include "tensorflow/compiler/jit/variable_info_util.h" #include "tensorflow/compiler/tf2xla/const_analysis.h" #include "tensorflow/compiler/tf2xla/shape_util.h" #include "tensorflow/compiler/tf2xla/xla_compiler.h" #include "xla/client/local_client.h" #include "xla/pjrt/pjrt_client.h" #include "xla/pjrt/pjrt_common.h" #include "xla/pjrt/pjrt_future.h" #include "xla/pjrt/pjrt_stream_executor_client.h" #include "xla/pjrt/tracked_device_buffer.h" #include "xla/shape_util.h" #include "xla/status_macros.h" #include "xla/stream_executor/platform_manager.h" #include "xla/tsl/framework/device_id_utils.h" #include "xla/tsl/framework/serving_device_selector_policies.h" #include "tensorflow/core/common_runtime/dma_helper.h" #include "tensorflow/core/common_runtime/gpu/gpu_serving_device_selector.h" #include "tensorflow/core/common_runtime/gpu_device_context.h" #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/resource_mgr.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/core/refcount.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/platform/statusor.h" #include "tensorflow/core/tfrt/common/async_value_tensor.h" #include "tensorflow/core/util/stream_executor_util.h" #include "tsl/platform/status.h" #include "tsl/platform/statusor.h" namespace tensorflow { namespace { using xla::ScopedShapedBuffer; using xla::ShapedBuffer; se::Platform::Id XlaPlatformInfoFromDevice(DeviceBase* device_base) { auto device = static_cast<Device*>(device_base); se::Platform::Id platform_id = nullptr; if (device->device_type() == DEVICE_CPU) { platform_id = se::host::kHostPlatformId; } return platform_id; } absl::flat_hash_map<int, int> CreateVariableLookup( const std::vector<VariableInfo>& variables) { absl::flat_hash_map<int, int> variable_lookup; for (int i = 0; i < variables.size(); i++) { variable_lookup[variables[i].index()] = i; } return variable_lookup; } } std::vector<const Tensor*> InputsFromContext(OpKernelContext* ctx) { std::vector<const Tensor*> inputs; inputs.reserve(ctx->num_inputs()); for (int input_idx = 0; input_idx < ctx->num_inputs(); input_idx++) { inputs.push_back(&ctx->input(input_idx)); } return inputs; } absl::StatusOr<std::vector<int>> GetConstantInputIndicesFromContext( OpKernelContext* ctx) { std::vector<int> constant_input_indices; TF_RETURN_IF_ERROR(GetCompileTimeConstInputs( &ctx->op_kernel(), &constant_input_indices, ctx->function_library())); if (!absl::c_all_of(constant_input_indices, [&](int idx) { return ctx->input_memory_type(idx) == HOST_MEMORY; })) { return errors::Internal("Unexpected device placement for a constant input"); } return constant_input_indices; } XlaComputationLaunchContext::XlaComputationLaunchContext( xla::LocalClient* client, se::DeviceMemoryAllocator* xla_allocator, int device_ordinal, bool allocate_xla_tensors, bool use_multiple_streams) : client_(client), xla_allocator_(xla_allocator), allocate_xla_tensors_(allocate_xla_tensors), use_multiple_streams_(use_multiple_streams), device_ordinal_(device_ordinal) { if (use_multiple_streams_) { CHECK(allocate_xla_tensors_) << "To use multiple streams correctly we must " "be allocating XLA tensors!"; } } static void PopulateExecutionInputBuffer(xla::ExecutionInput& execution_input, xla::ShapeIndex index, se::DeviceMemoryBase buffer, bool donate_buffer, int device_ordinal, se::DeviceMemoryAllocator* allocator) { xla::MaybeOwningDeviceMemory* in_buffer = execution_input.MutableBuffer(index); if (donate_buffer) { *in_buffer = se::OwningDeviceMemory(buffer, device_ordinal, allocator); } else { *in_buffer = buffer; } } absl::StatusOr<std::vector<xla::ExecutionInput>> XlaComputationLaunchContext::PopulateInputs( OpKernelContext* ctx, const XlaCompiler::CompilationResult* compilation_result, const std::map<int, const Tensor*>& resource_vars, int missing_ctx_input_prefix, const xla::HloInputOutputAliasConfig& input_output_alias) { std::vector<xla::ExecutionInput> arguments; arguments.reserve(compilation_result->xla_input_shapes.size()); for (int i = 0; i < compilation_result->xla_input_shapes.size(); ++i) { int arg_num = compilation_result->input_mapping[i]; CHECK_GE(arg_num, missing_ctx_input_prefix); const xla::Shape& device_shape = compilation_result->xla_input_shapes[i]; const xla::Shape& host_shape = xla::ShapeUtil::DeviceShapeToHostShape(device_shape); auto resource_var_it = resource_vars.find(arg_num); bool is_resource_variable = resource_var_it != resource_vars.end(); bool is_updated_resource_variable = is_resource_variable && absl::c_any_of(compilation_result->resource_updates, [&](const XlaCompiler::ResourceUpdate& update) { return update.input_index == arg_num && update.modified; }); const Tensor* t = is_resource_variable ? resource_var_it->second : &(ctx->input(arg_num - missing_ctx_input_prefix)); CHECK(t); bool donate_buffer = t->RefCountIsOne() && is_updated_resource_variable && input_output_alias.ParameterHasAlias(i, xla::ShapeIndex{}); VLOG(3) << "Processing input: " << i << "; is_resource_variable=" << is_resource_variable << "; is_updated_resource_variable=" << is_updated_resource_variable << "; donate_buffer=" << donate_buffer; if (use_multiple_streams_) { CHECK(ctx->op_device_context() && ctx->op_device_context()->stream()) << "Must have a stream available when using XLA tensors!"; XlaTensor* xla_tensor = XlaTensor::FromTensor(t); CHECK(xla_tensor); xla_tensor->WaitForDefinitionEventOnStream( ctx->op_device_context()->stream()); } arguments.emplace_back(device_shape, host_shape); xla::ExecutionInput& execution_input = arguments.back(); se::DeviceMemoryBase dmem = XlaTensor::DeviceMemoryFromTensor(*t); PopulateExecutionInputBuffer(execution_input, xla::ShapeIndex{}, dmem, donate_buffer, device_ordinal_, xla_allocator_); } return std::move(arguments); } static Tensor MakeTensor(DataType dtype, const TensorShape& shape, se::DeviceMemoryBase buffer, Allocator* allocator) { size_t expected_size = shape.num_elements() * DataTypeSize(dtype); auto* tensor_buffer = new XlaTensorBuffer(buffer.opaque(), expected_size, buffer.size(), allocator); Tensor t(dtype, shape, tensor_buffer); tensor_buffer->Unref(); return t; } static absl::StatusOr<Tensor> GetOrCreateTensorForOutput( xla::ScopedShapedBuffer& output, int output_num, OpKernelContext* ctx, int missing_ctx_input_prefix, const xla::HloInputOutputAliasConfig& input_output_alias, absl::Span<const int> input_mapping, const std::map<int, const Tensor*>& resource_vars_snapshots, DataType output_dtype, const TensorShape& output_shape, Allocator* output_allocator, bool allocate_xla_tensors, se::Stream* stream, bool use_multiple_streams, std::shared_ptr<se::Event> definition_event) { xla::ShapeIndex output_index = input_output_alias.shape().IsTuple() ? xla::ShapeIndex({output_num}) : xla::ShapeIndex({}); CHECK(input_output_alias.shape().IsTuple() || output_num == 0); if (std::optional<xla::HloInputOutputAliasConfig::Alias> alias = input_output_alias.GetAliasedParameter(output_index)) { VLOG(3) << "Found alias: " << alias->ToString(); int tf_param = input_mapping[alias->parameter_number] - missing_ctx_input_prefix; const Tensor input_tensor = ctx->input(tf_param).dtype() != DT_RESOURCE ? ctx->input(tf_param) : *resource_vars_snapshots.at(missing_ctx_input_prefix + tf_param); se::DeviceMemoryBase input_buffer = XlaTensor::DeviceMemoryFromTensor(input_tensor); se::DeviceMemoryBase output_buffer = output.buffer({output_num}); if (input_buffer.opaque() == output_buffer.opaque()) { output.set_buffer(se::OwningDeviceMemory(), {output_num}); return input_tensor; } } if (allocate_xla_tensors) { Tensor output_tensor; TF_RETURN_IF_ERROR( ctx->allocate_temp(output_dtype, output_shape, &output_tensor)); if (output_tensor.TotalBytes() > 0) { XlaTensor* xla_tensor = XlaTensor::FromTensor(&output_tensor); TF_RET_CHECK(xla_tensor); xla_tensor->set_shaped_buffer(output.TakeSubTree({output_num})); if (use_multiple_streams) { xla_tensor->ResetDefinitionEvent(definition_event, stream); } } return output_tensor; } se::DeviceMemoryBase output_buffer = output.buffer({output_num}); Tensor output_tensor = MakeTensor(output_dtype, output_shape, output_buffer, output_allocator); output.set_buffer(se::OwningDeviceMemory(), {output_num}); return output_tensor; } Status SetOutputForConstant( OpKernelContext* ctx, bool requires_copy_to_device, const XlaCompiler::CompilationResult* compilation_result, int output_num) { CHECK(compilation_result->outputs[output_num].is_constant); const Tensor& const_tensor = compilation_result->outputs[output_num].constant_value; Tensor* output_tensor; if (requires_copy_to_device && const_tensor.TotalBytes() > 0) { VLOG(1) << "Constant output tensor on device"; TF_RETURN_IF_ERROR( ctx->allocate_output(output_num, const_tensor.shape(), &output_tensor)); Device* device = dynamic_cast<Device*>(ctx->device()); if (device == nullptr) { return errors::Internal("DeviceBase was not a Device."); } ctx->op_device_context()->CopyCPUTensorToDevice( &const_tensor, device, output_tensor, [&](Status status) { TF_CHECK_OK(status); }); if (device->device_type() == DEVICE_GPU) { auto* gpu_device_context = static_cast<GPUDeviceContext*>(ctx->op_device_context()); TF_RETURN_IF_ERROR(gpu_device_context->stream()->WaitFor( gpu_device_context->host_to_device_stream())); } } else { ctx->set_output(output_num, const_tensor); output_tensor = ctx->mutable_output(output_num); } return absl::OkStatus(); } static absl::StatusOr<Var*> GetOrCreateResourceVar( OpKernelContext* ctx, const ResourceHandle& handle, const XlaCompiler::ResourceUpdate& write) { Var* variable = nullptr; TF_RETURN_IF_ERROR( LookupOrCreateResource<Var>(ctx, handle, &variable, [&write](Var** ptr) { *ptr = new Var(write.type); return absl::OkStatus(); })); return variable; } absl::StatusOr<std::vector<VariableInfo>> GatherVariableInfo( OpKernelContext* ctx, const XlaCompiler::CompilationResult& compilation_result, int missing_ctx_input_prefix) { std::vector<VariableInfo> out; out.reserve(compilation_result.resource_updates.size()); for (int i = 0; i < compilation_result.resource_updates.size(); ++i) { const XlaCompiler::ResourceUpdate& write = compilation_result.resource_updates[i]; int actual_input_index = write.input_index - missing_ctx_input_prefix; if (actual_input_index < 0 || actual_input_index >= ctx->num_inputs()) { return errors::Internal("Invalid input index for variable write."); } const ResourceHandle handle = HandleFromInput(ctx, actual_input_index); TF_ASSIGN_OR_RETURN(Var * variable, GetOrCreateResourceVar(ctx, handle, write)); out.emplace_back(actual_input_index, handle.name(), variable, handle.definition_stack_trace()); } return std::move(out); } Status XlaComputationLaunchContext::PopulateOutputs( OpKernelContext* ctx, const XlaCompiler::CompilationResult* compilation_result, ScopedShapedBuffer output, int missing_ctx_input_prefix, absl::Span<VariableInfo> variable_infos, const xla::HloInputOutputAliasConfig& input_output_alias, const std::map<int, const Tensor*>& resource_vars) { se::Stream* stream = ctx->op_device_context() ? ctx->op_device_context()->stream() : nullptr; Allocator* allocator = ctx->device()->GetAllocator({}); VLOG(2) << "Result tuple shape: " << output.on_host_shape().DebugString(); VLOG(2) << "Result tuple shape (on device): " << output.on_device_shape().DebugString(); CHECK_EQ(ctx->num_outputs(), compilation_result->outputs.size()); if (!output.on_host_shape().IsTuple()) { ShapedBuffer nontuple_buffer = output.release(); ShapedBuffer buffer( xla::ShapeUtil::MakeTupleShape({nontuple_buffer.on_host_shape()}), xla::ShapeUtil::MakeTupleShape({nontuple_buffer.on_device_shape()}), output.device_ordinal()); buffer.buffers().CopySubtreeFrom(nontuple_buffer.buffers(), {}, {0}); output = ScopedShapedBuffer(std::move(buffer), output.memory_allocator()); } std::shared_ptr<se::Event> definition_event; if (use_multiple_streams_ && stream) { TF_ASSIGN_OR_RETURN(definition_event, stream->parent()->CreateEvent()); TF_RETURN_IF_ERROR(stream->RecordEvent(definition_event.get())); } for (const XlaOutputDescription& descr : compilation_result->outputs) { if (descr.type == DT_VARIANT) { return errors::Unimplemented( "Support for TensorList crossing the XLA/TF boundary " "is not implemented"); } } std::vector<TensorShape> output_tensor_shapes; output_tensor_shapes.reserve(ctx->num_outputs()); if (output.on_host_shape().is_dynamic()) { const se::Platform* platform = nullptr; if (stream != nullptr) { platform = stream->parent()->GetPlatform(); } else { TF_ASSIGN_OR_RETURN(platform, se::PlatformManager::PlatformWithId( XlaPlatformInfoFromDevice(ctx->device()))); } TF_ASSIGN_OR_RETURN(auto transfer_manager, xla::TransferManager::GetForPlatform(platform)); xla::Shape output_device_shape = output.on_device_shape(); TF_RETURN_IF_ERROR(transfer_manager->ReadDynamicShapes( stream, &output, &output_device_shape)); output.set_shapes(output_device_shape, output_device_shape); for (int i = 0; i < ctx->num_outputs(); ++i) { const xla::Shape& subshape = xla::ShapeUtil::GetSubshape(output_device_shape, {i}); TensorShape shape; TF_RETURN_IF_ERROR(XLAShapeToTensorShape(subshape, &shape)); output_tensor_shapes.push_back(shape); } } else { for (int i = 0; i < ctx->num_outputs(); ++i) { output_tensor_shapes.push_back(compilation_result->outputs[i].shape); } } int output_num = 0; for (int i = 0, end = ctx->num_outputs(); i < end; ++i) { const TensorShape& shape = output_tensor_shapes[i]; const DataType& type = compilation_result->outputs[i].type; VLOG(2) << "Populating output for retval " << i << " shape " << shape.DebugString() << " type " << DataTypeString(type); if (compilation_result->outputs[i].is_constant) { TF_RETURN_IF_ERROR(SetOutputForConstant( ctx, stream != nullptr, compilation_result, i)); } else if (type == DT_RESOURCE) { int input_index = compilation_result->outputs[i].input_index - missing_ctx_input_prefix; TF_RET_CHECK(input_index >= 0 && input_index < ctx->num_inputs()) << "Invalid input for outputs " << i << ": " << input_index; ctx->set_output(i, ctx->input(input_index)); } else { TF_ASSIGN_OR_RETURN( Tensor output_tensor, GetOrCreateTensorForOutput( output, output_num, ctx, missing_ctx_input_prefix, input_output_alias, compilation_result->input_mapping, resource_vars, ctx->expected_output_dtype(i), shape, allocator, allocate_xla_tensors_, stream, use_multiple_streams_, definition_event)); ctx->set_output(i, output_tensor); ++output_num; } } absl::flat_hash_map<int, int> variable_info_lookup; for (int i = 0; i < variable_infos.size(); i++) { variable_info_lookup.emplace(variable_infos[i].index(), i); } for (int i = 0, end = compilation_result->resource_updates.size(); i < end; ++i) { const XlaCompiler::ResourceUpdate& write = compilation_result->resource_updates[i]; int actual_input_index = wr
#include "tensorflow/compiler/jit/xla_launch_util.h" #include <cstdint> #include <memory> #include <utility> #include <vector> #include <gtest/gtest.h> #include "absl/container/flat_hash_set.h" #include "tensorflow/compiler/jit/device_compiler.h" #include "tensorflow/compiler/jit/flags.h" #include "tensorflow/compiler/jit/pjrt_device_compiler_client.h" #include "tensorflow/compiler/jit/variable_info.h" #include "tensorflow/compiler/jit/variable_info_util.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/pjrt/pjrt_client.h" #include "xla/pjrt/pjrt_common.h" #include "xla/pjrt/tfrt_cpu_pjrt_client.h" #include "xla/tests/literal_test_util.h" #include "xla/tsl/framework/allocator.h" #include "xla/tsl/framework/device_id_utils.h" #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/device.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/platform/refcount.h" #include "tensorflow/core/tfrt/common/create_pjrt_client_util.h" #include "tensorflow/core/tfrt/common/pjrt_util.h" #include "tsl/lib/core/status_test_util.h" #include "tsl/platform/status.h" #include "tsl/platform/statusor.h" namespace tensorflow { namespace { using PjRtDeviceCompiler = DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>; using PjRtDeviceExecutablePersistor = DeviceExecutablePersistor<xla::PjRtLoadedExecutable, xla::PjRtClient>; absl::flat_hash_map<int, const Tensor*> GetVariableSnapshots( const std::vector<VariableInfo>& variables) { absl::flat_hash_map<int, const Tensor*> variable_snapshots; for (int i = 0; i < variables.size(); i++) { variable_snapshots[variables[i].index()] = variables[i].var()->tensor(); } return variable_snapshots; } class PjRtExecutionUtilTest : public OpsTestBase { public: PjRtExecutionUtilTest() { auto& rollout_config = GetXlaOpsCommonFlags()->tf_xla_use_device_api; rollout_config.enabled_for_xla_launch_ = true; rollout_config.enabled_for_compile_on_demand_ = true; GetXlaDeviceFlags()->tf_xla_enable_xla_devices = true; auto device_type = DeviceType(DEVICE_XLA_CPU); rollout_config.AllowForDeviceInXlaLaunch(device_type); rollout_config.AllowForDeviceInXlaCompileOnDemand(device_type); auto jit_device_type = DeviceType(DEVICE_CPU_XLA_JIT); auto device = DeviceFactory::NewDevice(device_type.type_string(), SessionOptions(), "/job:localhost/replica:0/task:0"); device_ = device.get(); SetDevice(device_type, std::move(device)); TF_CHECK_OK(SetPjRtClientInTFGlobalResourceManager( device_type, xla::GetTfrtCpuClient(true, 1) .value())); TF_CHECK_OK(device_->TryGetDeviceContext(&device_context_)); AllocatorAttributes host_alloc_attr; host_alloc_attr.set_on_host(true); host_allocator_ = device_->GetAllocator(host_alloc_attr); AllocatorAttributes device_alloc_attr; device_alloc_attr.set_on_host(false); device_allocator_ = device_->GetAllocator(device_alloc_attr); auto pjrt_client_or = GetOrCreatePjRtClient(device_type_); TF_CHECK_OK(pjrt_client_or.status()); pjrt_client_ = pjrt_client_or.value(); device_compiler_ = new PjRtDeviceCompiler( std::make_unique<PjRtDeviceExecutablePersistor>( PjRtDeviceExecutablePersistor::Config(), jit_device_type), std::make_unique<PjRtDeviceCompilerClient>(pjrt_client_)); profiler_ = new DeviceCompilationProfiler(); compiler_options_.device_type = jit_device_type; compiler_options_.client = nullptr; compiler_options_.flib_def = flib_def_.get(); } ~PjRtExecutionUtilTest() override { for (const auto& tensor : tensors_) { delete tensor; } tensors_.clear(); device_context_->Unref(); core::ScopedUnref device_compiler_ref(device_compiler_); core::ScopedUnref profiler_ref(profiler_); } template <typename T> Tensor* CreateHostTensor(const TensorShape& shape, const gtl::ArraySlice<T> data) { Tensor* host_tensor = new Tensor(host_allocator_, DataTypeToEnum<T>::v(), shape); test::FillValues<T>(host_tensor, data); tensors_.push_back(host_tensor); return host_tensor; } template <typename T> Tensor* CreateDeviceTensor(const TensorShape& shape, const gtl::ArraySlice<T> data) { Tensor* host_tensor = CreateHostTensor<T>(shape, data); Tensor* device_tensor = new Tensor(device_allocator_, DataTypeToEnum<T>::v(), shape); TF_EXPECT_OK(device_context_->CopyCPUTensorToDeviceSync( host_tensor, device_, device_tensor)); tensors_.push_back(device_tensor); return device_tensor; } Tensor* GetOutput(int output_index) { CHECK_LT(output_index, context_->num_outputs()); Tensor* device_tensor = context_->mutable_output(output_index); managed_outputs_.resize(context_->num_outputs()); if (managed_outputs_[output_index]) { return managed_outputs_[output_index]; } Tensor* host_tensor = new Tensor(host_allocator_, device_tensor->dtype(), device_tensor->shape()); TF_EXPECT_OK(device_context_->CopyDeviceTensorToCPUSync( device_tensor, "", device_, host_tensor)); managed_outputs_[output_index] = host_tensor; return host_tensor; } void CompileToExecutable(const std::vector<XlaCompiler::Argument>& args, const XlaCompiler::CompilationResult** result, xla::PjRtLoadedExecutable** executable, XlaCompiler::CompileOptions compile_options = {}) { TF_EXPECT_OK(device_compiler_->CompileSingleOpIfNeeded( compiler_options_, args, compile_options, context_.get(), profiler_, result, executable)); } absl::StatusOr<std::vector<std::unique_ptr<xla::PjRtBuffer>>> RunExecutable( const std::vector<const Tensor*>& inputs, const std::vector<VariableInfo>& variables, const XlaCompiler::CompilationResult* result, xla::PjRtLoadedExecutable* executable) { TF_ASSIGN_OR_RETURN(auto pjrt_device, pjrt_client_->LookupAddressableDevice( xla::PjRtLocalDeviceId(device_->parsed_name().id))); std::vector<xla::PjRtBuffer*> executable_args; executable_args.reserve(result->input_mapping.size()); absl::flat_hash_set<int> non_donatable_input_indices; TF_EXPECT_OK(PreparePjRtExecutableArguments( 0, result->input_mapping, inputs, GetVariableSnapshots(variables), nullptr, nullptr, false, &executable_args, {}, &non_donatable_input_indices)); xla::ExecuteOptions exe_options; exe_options.arguments_are_tupled = false; exe_options.untuple_result = true; return executable->ExecutePortable(executable_args, pjrt_device, exe_options); } template <typename T> Var* CreateVariable(const string& name, const TensorShape& shape, const gtl::ArraySlice<T> data) { Tensor* init_var_value = CreateDeviceTensor<T>(shape, data); Var* var = new Var(DataTypeToEnum<T>::v()); *var->tensor() = *init_var_value; var->is_initialized = true; return var; } template <typename T> void AddVariableInput(const string& name, const TensorShape& shape, const gtl::ArraySlice<T> data) { Var* var = CreateVariable<T>(name, shape, data); ResourceMgr* rm = device_->resource_manager(); TF_ASSERT_OK(rm->Create(rm->default_container(), name, var)); ResourceHandle handle; handle.set_device(device_->name()); handle.set_container(rm->default_container()); handle.set_name(name); TypeIndex type_index = TypeIndex::Make<Var>(); handle.set_hash_code(type_index.hash_code()); handle.set_maybe_type_name(type_index.name()); Tensor* input = new Tensor(host_allocator_, DT_RESOURCE, TensorShape({})); input->scalar<ResourceHandle>()() = handle; tensors_.push_back(input); inputs_.push_back({nullptr, input}); } protected: DeviceContext* device_context_; Allocator* host_allocator_; Allocator* device_allocator_; XlaCompiler::Options compiler_options_; xla::PjRtClient* pjrt_client_; PjRtDeviceCompiler* device_compiler_; DeviceCompilationProfiler* profiler_; }; TEST_F(PjRtExecutionUtilTest, PreparePjRtExecutableArguments) { std::vector<const Tensor*> inputs; inputs.push_back(CreateDeviceTensor<int32_t>(TensorShape({1, 3}), {0, 0, 0})); inputs.push_back(CreateDeviceTensor<int32_t>(TensorShape({1, 3}), {1, 2, 3})); inputs.push_back(CreateDeviceTensor<int32_t>(TensorShape({1, 3}), {4, 5, 6})); int num_missing_prefix_ctx_inputs = 2; std::vector<int> input_mapping{3, 4}; std::vector<VariableInfo> variables; std::vector<xla::PjRtBuffer*> exec_args; exec_args.reserve(input_mapping.size()); absl::flat_hash_set<int> non_donatable_input_indices; TF_EXPECT_OK(PreparePjRtExecutableArguments( num_missing_prefix_ctx_inputs, input_mapping, inputs, GetVariableSnapshots(variables), nullptr, nullptr, false, &exec_args, {}, &non_donatable_input_indices)); EXPECT_EQ(exec_args.size(), 2); std::shared_ptr<xla::Literal> literal1 = *exec_args[0]->ToLiteralSync(); EXPECT_TRUE(xla::LiteralTestUtil::Equal( *literal1, xla::LiteralUtil::CreateR2<int32_t>({{1, 2, 3}}))); std::shared_ptr<xla::Literal> literal2 = *exec_args[1]->ToLiteralSync(); EXPECT_TRUE(xla::LiteralTestUtil::Equal( *literal2, xla::LiteralUtil::CreateR2<int32_t>({{4, 5, 6}}))); } TEST_F(PjRtExecutionUtilTest, PreparePjRtExecutableArgumentsVariableInputs) { std::vector<VariableInfo> variables; Var* var1 = CreateVariable<int32>("v1", TensorShape({1, 2}), {1, 2}); variables.emplace_back(3, "v1", var1); Var* var2 = CreateVariable<int32>("v2", TensorShape({1, 2}), {3, 4}); variables.emplace_back(4, "v2", var2); std::vector<const Tensor*> inputs; inputs.push_back(CreateDeviceTensor<int32_t>(TensorShape({1, 3}), {0, 0, 0})); int num_missing_prefix_ctx_inputs = 2; std::vector<int> input_mapping{3, 4}; std::vector<xla::PjRtBuffer*> exec_args; exec_args.reserve(input_mapping.size()); absl::flat_hash_set<int> non_donatable_input_indices; TF_EXPECT_OK(PreparePjRtExecutableArguments( num_missing_prefix_ctx_inputs, input_mapping, inputs, GetVariableSnapshots(variables), nullptr, nullptr, false, &exec_args, {}, &non_donatable_input_indices)); EXPECT_EQ(exec_args.size(), 2); std::shared_ptr<xla::Literal> literal1 = *exec_args[0]->ToLiteralSync(); EXPECT_TRUE(xla::LiteralTestUtil::Equal( *literal1, xla::LiteralUtil::CreateR2<int32_t>({{1, 2}}))); std::shared_ptr<xla::Literal> literal2 = *exec_args[1]->ToLiteralSync(); EXPECT_TRUE(xla::LiteralTestUtil::Equal( *literal2, xla::LiteralUtil::CreateR2<int32_t>({{3, 4}}))); } TEST_F(PjRtExecutionUtilTest, PopulateCtxOutputs) { XlaOpRegistry::RegisterCompilationKernels(); TF_EXPECT_OK(NodeDefBuilder("AddV2", "AddV2") .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_INT32)) .Attr("T", DT_INT32) .Device("/job:localhost/replica:0/task:0/device:XLA_CPU:0") .Finalize(node_def())); TF_EXPECT_OK(InitOp()); Tensor* a = CreateDeviceTensor<int32>(TensorShape({1, 3}), {1, 2, 3}); Tensor* b = CreateDeviceTensor<int32>(TensorShape({1, 3}), {4, 5, 6}); inputs_.push_back({nullptr, a}); inputs_.push_back({nullptr, b}); CreateContext(); std::vector<XlaCompiler::Argument> args(2); args[0].kind = XlaCompiler::Argument::kParameter; args[0].type = DT_INT32; args[0].shape = TensorShape({1, 3}); args[1].kind = XlaCompiler::Argument::kParameter; args[1].type = DT_INT32; args[1].shape = TensorShape({1, 3}); const XlaCompiler::CompilationResult* result; xla::PjRtLoadedExecutable* executable; CompileToExecutable(args, &result, &executable); std::vector<const Tensor*> inputs; inputs.push_back(a); inputs.push_back(b); TF_ASSERT_OK_AND_ASSIGN(auto execute_outputs, RunExecutable(inputs, {}, result, executable)); TF_EXPECT_OK(PopulateCtxOutputsFromPjRtExecutableOutputs( 0, inputs, {}, *result, false, execute_outputs, context_.get())); Tensor* expected = CreateHostTensor<int32>(TensorShape({1, 3}), {5, 7, 9}); test::ExpectTensorEqual<int32>(*expected, *GetOutput(0)); } TEST_F(PjRtExecutionUtilTest, PopulateCtxOutputsDynamicShape) { XlaOpRegistry::RegisterCompilationKernels(); TF_EXPECT_OK(NodeDefBuilder("testWhere", "Where") .Input(FakeInput(DT_FLOAT)) .Attr("T", DT_FLOAT) .Device("/job:localhost/replica:0/task:0/device:XLA_CPU:0") .Finalize(node_def())); TF_EXPECT_OK(InitOp()); Tensor* a = CreateDeviceTensor<float>(TensorShape({2, 3}), {0., 1., 1., 0., 0., 0.}); inputs_.push_back({nullptr, a}); CreateContext(); std::vector<XlaCompiler::Argument> args(1); args[0].kind = XlaCompiler::Argument::kParameter; args[0].type = DT_FLOAT; args[0].shape = TensorShape({2, 3}); const XlaCompiler::CompilationResult* result; xla::PjRtLoadedExecutable* executable; CompileToExecutable(args, &result, &executable); std::vector<const Tensor*> inputs; inputs.push_back(a); TF_ASSERT_OK_AND_ASSIGN(auto execute_outputs, RunExecutable(inputs, {}, result, executable)); TF_EXPECT_OK(PopulateCtxOutputsFromPjRtExecutableOutputs( 0, inputs, {}, *result, false, execute_outputs, context_.get())); Tensor* expected = CreateHostTensor<int64>(TensorShape({2, 2}), {0, 1, 0, 2}); test::ExpectTensorEqual<int64>(*expected, *GetOutput(0)); } TEST_F(PjRtExecutionUtilTest, PopulateCtxOutputsVariableInputs) { XlaOpRegistry::RegisterCompilationKernels(); TF_EXPECT_OK(NodeDefBuilder("AddV2", "AddV2") .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_INT32)) .Attr("T", DT_INT32) .Device("/job:localhost/replica:0/task:0/device:XLA_CPU:0") .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddVariableInput<int32>("var1", TensorShape({1, 2}), {1, 2}); AddVariableInput<int32>("var2", TensorShape({1, 2}), {3, 4}); CreateContext(); std::vector<XlaCompiler::Argument> args(2); args[0].kind = XlaCompiler::Argument::kParameter; args[0].initialized = true; args[0].type = DT_INT32; args[0].shape = TensorShape({1, 2}); args[1].kind = XlaCompiler::Argument::kParameter; args[1].initialized = true; args[1].type = DT_INT32; args[1].shape = TensorShape({1, 2}); const XlaCompiler::CompilationResult* result; xla::PjRtLoadedExecutable* executable; CompileToExecutable(args, &result, &executable); std::vector<const Tensor*> inputs = InputsFromContext(context_.get()); std::vector<int> variables_indices = GetResourceVariableIndicesFromContext(context_.get()); std::vector<VariableInfo> variables; variables.reserve(variables_indices.size()); TF_ASSERT_OK(GetVariableInfosFromInputs(context_->resource_manager(), context_->device(), inputs, variables_indices, &variables)); TF_ASSERT_OK_AND_ASSIGN(auto execute_outputs, RunExecutable(inputs, variables, result, executable)); TF_EXPECT_OK(PopulateCtxOutputsFromPjRtExecutableOutputs( 0, inputs, variables, *result, false, execute_outputs, context_.get())); Tensor* expected = CreateHostTensor<int32>(TensorShape({1, 2}), {4, 6}); test::ExpectTensorEqual<int32>(*expected, *GetOutput(0)); } TEST_F(PjRtExecutionUtilTest, PopulateCtxOutputsResourceUpdates) { XlaOpRegistry::RegisterCompilationKernels(); TF_EXPECT_OK(NodeDefBuilder("AssignAddVariableOp", "AssignAddVariableOp") .Input(FakeInput(DT_RESOURCE)) .Input(FakeInput(DT_INT32)) .Attr("dtype", DT_INT32) .Device("/job:localhost/replica:0/task:0/device:XLA_CPU:0") .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddVariableInput<int32>("var", TensorShape({1, 3}), {1, 2, 3}); Tensor* a = CreateDeviceTensor<int32>(TensorShape({1, 3}), {2, 2, 2}); inputs_.push_back({nullptr, a}); CreateContext(); std::vector<const Tensor*> inputs = InputsFromContext(context_.get()); std::vector<int> variables_indices = GetResourceVariableIndicesFromContext(context_.get()); std::vector<VariableInfo> variables; variables.reserve(variables_indices.size()); TF_ASSERT_OK(GetVariableInfosFromInputs(context_->resource_manager(), context_->device(), inputs, variables_indices, &variables)); TF_ASSERT_OK_AND_ASSIGN(std::vector<int> constant_input_indices, GetConstantInputIndicesFromContext(context_.get())); TF_ASSERT_OK(LockVariables(absl::MakeSpan(variables))); TF_ASSERT_OK_AND_ASSIGN( std::vector<XlaCompiler::Argument> args, XlaComputationLaunchContext::BuildXlaCompilerArguments( constant_input_indices, inputs, variables, static_cast<Device*>(context_->device()))); const XlaCompiler::CompilationResult* result; xla::PjRtLoadedExecutable* executable; CompileToExecutable(args, &result, &executable); TF_ASSERT_OK_AND_ASSIGN(auto execute_outputs, RunExecutable(inputs, variables, result, executable)); TF_EXPECT_OK(PopulateCtxOutputsFromPjRtExecutableOutputs( 0, inputs, variables, *result, false, execute_outputs, context_.get())); EXPECT_EQ(context_->num_outputs(), 0); ResourceMgr* rm = device_->resource_manager(); Var* var = nullptr; TF_ASSERT_OK(rm->Lookup(rm->default_container(), "var", &var)); core::ScopedUnref var_ref(var); Tensor* device_tensor = var->tensor(); Tensor* host_tensor = new Tensor(host_allocator_, device_tensor->dtype(), device_tensor->shape()); tensors_.push_back(host_tensor); TF_ASSERT_OK(device_context_->CopyDeviceTensorToCPUSync( device_tensor, "", device_, host_tensor)); Tensor* expected = CreateHostTensor<int32>(TensorShape({1, 3}), {3, 4, 5}); test::ExpectTensorEqual<int32>(*expected, *host_tensor); } TEST(XlaLaunchUtilTest, GetPjRtExecuteOptions) { xla::ExecuteOptions options = GetPjRtExecuteOptions(DeviceType(DEVICE_GPU), {}); EXPECT_FALSE(options.arguments_are_tupled); EXPECT_TRUE(options.untuple_result); EXPECT_FALSE(options.strict_shape_checking); EXPECT_TRUE(options.use_major_to_minor_data_layout_for_callbacks); } TEST_F(PjRtExecutionUtilTest, RunPjRtExecutable) { XlaOpRegistry::RegisterCompilationKernels(); TF_EXPECT_OK(NodeDefBuilder("AddV2", "AddV2") .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_INT32)) .Attr("T", DT_INT32) .Device("/job:localhost/replica:0/task:0/device:XLA_CPU:0") .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddVariableInput<int32>("var1", TensorShape({1, 2}), {1, 2}); AddVariableInput<int32>("var2", TensorShape({1, 2}), {3, 4}); CreateContext(); std::vector<XlaCompiler::Argument> args(2); args[0].kind = XlaCompiler::Argument::kParameter; args[0].initialized = true; args[0].type = DT_INT32; args[0].shape = TensorShape({1, 2}); args[1].kind = XlaCompiler::Argument::kParameter; args[1].initialized = true; args[1].type = DT_INT32; args[1].shape = TensorShape({1, 2}); const XlaCompiler::CompilationResult* result; xla::PjRtLoadedExecutable* executable; CompileToExecutable(args, &result, &executable); std::vector<const Tensor*> inputs = InputsFromContext(context_.get()); std::vector<int> variables_indices = GetResourceVariableIndicesFromContext(context_.get()); std::vector<VariableInfo> variables; variables.reserve(variables_indices.size()); TF_ASSERT_OK(GetVariableInfosFromInputs(context_->resource_manager(), context_->device(), inputs, variables_indices, &variables)); TF_ASSERT_OK(RunPjRtExecutable(inputs, variables, *result, pjrt_client_, executable, context_.get())); Tensor* expected = CreateHostTensor<int32>(TensorShape({1, 2}), {4, 6}); test::ExpectTensorEqual<int32>(*expected, *GetOutput(0)); } TEST_F(PjRtExecutionUtilTest, RunPjRtExecutableWithVariableSnapshotsAndMissingInputs) { XlaOpRegistry::RegisterCompilationKernels(); TF_EXPECT_OK(NodeDefBuilder("Fill", "Fill") .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_INT32)) .Attr("index_type", DT_INT32) .Attr("T", DT_INT32) .Device("/job:localhost/replica:0/task:0/device:XLA_CPU:0") .Finalize(node_def())); TF_EXPECT_OK(InitOp()); Tensor* dims = CreateHostTensor<int32>(TensorShape({1}), {2}); Tensor* value = CreateDeviceTensor<int32>(TensorShape(), {1}); inputs_.push_back({nullptr, dims}); inputs_.push_back({nullptr, value}); CreateContext(); TF_ASSERT_OK_AND_ASSIGN(std::vector<int> constant_input_indices, GetConstantInputIndicesFromContext(context_.get())); EXPECT_EQ(constant_input_indices.size(), 1); std::vector<const Tensor*> inputs = InputsFromContext(context_.get()); std::vector<int> variables_indices = GetResourceVariableIndicesFromContext(context_.get()); absl::flat_hash_map<int, const Tensor*> variable_snapshots; const XlaCompiler::CompilationResult* result; xla::PjRtLoadedExecutable* executable; { std::vector<VariableInfo> variables; variables.reserve(variables_indices.size()); TF_ASSERT_OK(GetVariableInfosFromInputs(context_->resource_manager(), context_->device(), inputs, variables_indices, &variables)); TF_ASSERT_OK(LockVariables(absl::MakeSpan(variables))); variable_snapshots = GetVariableSnapshots(variables); TF_ASSERT_OK_AND_ASSIGN( std::vector<XlaCompiler::Argument> args, XlaComputationLaunchContext::BuildXlaCompilerArguments( constant_input_indices, inputs, variables, static_cast<Device*>(context_->device()))); CompileToExecutable(args, &result, &executable); } inputs = {inputs.begin() + constant_input_indices.size(), inputs.end()}; { TF_ASSERT_OK_AND_ASSIGN(std::vector<VariableInfo> updated_variables, GatherVariableInfo(context_.get(), *result, constant_input_indices.size())); TF_ASSERT_OK(LockVariables(absl::MakeSpan(updated_variables))); TF_ASSERT_OK(RunPjRtExecutable( constant_input_indices.size(), inputs, variable_snapshots, updated_variables, *result, pjrt_client_, executable, context_.get())); } Tensor* expected = CreateHostTensor<int32>(TensorShape({2}), {1, 1}); test::ExpectTensorEqual<int32>(*expected, *GetOutput(0)); } TEST_F(PjRtExecutionUtilTest, RunPjRtExecutableWithoutCtx) { XlaOpRegistry::RegisterCompilationKernels(); TF_ASSERT_OK(NodeDefBuilder("AddV2", "AddV2") .Input(FakeInput(DT_INT32)) .Input(FakeInput(DT_INT32)) .Attr("T", DT_INT32) .Device("/job:localhost/replica:0/task:0/device:XLA_CPU:0") .Finalize(node_def())); TF_ASSERT_OK(InitOp()); AddVariableInput<int32>("var1", TensorShape({1, 2}), {1, 2}); AddVariableInput<int32>("var2", TensorShape({1, 2}), {3, 4}); CreateContext(); std::vector<XlaCompiler::Argument> args(2); args[0].kind = XlaCompiler::Argument::kParameter; args[0].initialized = true; args[0].type = DT_INT32; args[0].shape = TensorShape({1, 2}); args[1].kind = XlaCompiler::Argument::kParameter; args[1].initialized = true; args[1].type = DT_INT32; args[1].shape = TensorShape({1, 2}); const XlaCompiler::CompilationResult* result; xla::PjRtLoadedExecutable* executable; CompileToExecutable(args, &result, &executable); std::vector<const Tensor*> inputs = InputsFromContext(context_.get()); std::vector<int> variables_indices = GetResourceVariableIndicesFromContext(context_.get()); std::vector<VariableInfo> variables; variables.reserve(variables_indices.size()); TF_ASSERT_OK(GetVariableInfosFromInputs(context_->resource_manager(), context_->device(), inputs, variables_indices, &variables)); const bool use_pjrt_tensor_buffer = context_->device() ->tensorflow_accelerator_device_info() ->use_pjrt_tensor_buffer; const DeviceType& device_type = GetDeviceType(context_.get()); TF_ASSERT_OK_AND_ASSIGN(const int pjrt_device_id, tsl::GetDeviceIdFromDeviceParsedName( context_->device()->parsed_name(), device_type)); TF_ASSERT_OK_AND_ASSIGN(xla::PjRtDevice * pjrt_device, pjrt_client_->LookupAddressableDevice( xla::PjRtLocalDeviceId(pjrt_device_id))); absl::flat_hash_map<int, const Tensor*> variable_snapshots; for (int i = 0; i < variables.size(); i++) { variable_snapshots[variables[i].index()] = variables[i].var()->tensor(); } TF_ASSERT_OK_AND_ASSIGN( std::vector<std::unique_ptr<xla::PjRtBuffer>> execute_outputs, RunPjRtExecutable(0, inputs, variable_snapshots, variables, device_type, use_pjrt_tensor_buffer, *result, pjrt_device, pjrt_client_, executable)); for (const auto& output : execute_outputs) { TF_ASSERT_OK(output->GetReadyFuture().Await()); } ASSERT_EQ(execute_outputs.size(), 1); std::shared_ptr<xla::Literal> literal = *execute_outputs[0]->ToLiteralSync(); EXPECT_TRUE(xla::LiteralTestUtil::Equal( *literal, xla::LiteralUtil::CreateR2<int32_t>({{4, 6}}))); } } }
1,071
cpp
tensorflow/tensorflow
xla_cluster_util
tensorflow/compiler/jit/xla_cluster_util.cc
tensorflow/compiler/jit/xla_cluster_util_test.cc
#ifndef TENSORFLOW_COMPILER_JIT_XLA_CLUSTER_UTIL_H_ #define TENSORFLOW_COMPILER_JIT_XLA_CLUSTER_UTIL_H_ #include <string> #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/status/statusor.h" #include "absl/types/optional.h" #include "tensorflow/compiler/jit/xla_activity.pb.h" #include "xla/service/graphcycles/graphcycles.h" #include "tensorflow/core/common_runtime/optimization_registry.h" #include "tensorflow/core/graph/algorithm.h" namespace tensorflow { extern const char* const kXlaClusterAttr; extern const char* const kXlaCompileTimeConstantInputsAttr; using OrderedNodeSet = std::set<Node*, NodeComparatorID>; bool HasForwardedRefInput(const Node& node); absl::StatusOr<bool> CreateCycleDetectionGraph(const Graph* graph, GraphCycles* cycles); std::optional<absl::string_view> GetXlaClusterForNode(const Node& node); void RemoveFromXlaCluster(NodeDef* node_def); void RemoveFromXlaCluster(Node* node); bool HasResourceInputOrOutput(const Node& node); OptimizerOptions::GlobalJitLevel GetGlobalJitLevelForGraph( const GraphOptimizationPassOptions& options); bool IsSingleGpuGraph(const Graph& g); bool MayCallFunction(const Node& n, const FunctionLibraryDefinition* flib_def); bool IsShapeConsumerOp(const Node& node); XlaAutoClusteringSummary GetXlaAutoClusteringSummary(const Graph& graph); absl::StatusOr<absl::flat_hash_set<Node*>> GetNodesRelatedToRefVariables( const Graph& graph, FunctionLibraryRuntime* lib_runtime); absl::StatusOr<std::string> SerializeGraphDeterministic(const Graph& graph); absl::StatusOr<uint64> FingerprintGraph(const Graph& graph); } #endif #include "tensorflow/compiler/jit/xla_cluster_util.h" #include <string> #include <unordered_map> #include "absl/algorithm/container.h" #include "absl/container/inlined_vector.h" #include "absl/strings/match.h" #include "absl/strings/numbers.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_join.h" #include "tensorflow/compiler/jit/flags.h" #include "xla/status_macros.h" #include "tensorflow/core/common_runtime/function.h" #include "tensorflow/core/framework/bounds_check.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/graph/control_flow.h" #include "tensorflow/core/lib/gtl/cleanup.h" #include "tensorflow/core/lib/strings/proto_serialization.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/fingerprint.h" #include "tensorflow/core/public/session_options.h" #include "tensorflow/core/util/device_name_utils.h" #include "tensorflow/core/util/xla_config_registry.h" namespace tensorflow { const char* const kXlaClusterAttr = "_XlaCluster"; const char* const kXlaCompileTimeConstantInputsAttr = "_XlaCompileTimeConstantInputs"; namespace { string DescribeCycle(const GraphCycles* cycles, const Graph& graph, int src, int dst) { int32_t max_path_size = graph.num_node_ids() + 1; std::vector<int32> path(max_path_size); int32_t path_size = cycles->FindPath(dst, src, max_path_size, path.data()); if (path_size == 0) { return ""; } auto node_name = [&graph](int node_id) { if (!FastBoundsCheck(node_id, graph.num_node_ids())) { return string("(null)"); } auto* node = graph.FindNodeId(node_id); if (node == nullptr) { return string("(null)"); } return node->name(); }; string description; absl::StrAppend(&description, "Edge from ", node_name(src), " to ", node_name(dst), " would create a cycle.\n"); path.resize(path_size); for (int32_t node_id : path) { string ascii_art; if (node_id == dst) { ascii_art = "+-> "; } else if (node_id != src) { ascii_art = "| "; } else { ascii_art = "+-- "; } absl::StrAppend(&description, ascii_art, node_name(node_id), "\n"); } return description; } bool AlwaysForwardsRefInput(const Node& node) { return node.IsIdentity(); } } bool HasForwardedRefInput(const Node& node) { if (AlwaysForwardsRefInput(node)) { for (const Edge* incoming_edge : node.in_edges()) { if (incoming_edge->IsControlEdge()) { continue; } Node* incoming_node = incoming_edge->src(); if (IsRefType(incoming_node->output_type(incoming_edge->src_output()))) { VLOG(2) << "Node " << node.def().ShortDebugString() << " has ref input " << incoming_node->name() << " " << incoming_node->type_string(); return true; } } } return false; } absl::StatusOr<bool> CreateCycleDetectionGraph(const Graph* graph, GraphCycles* cycles) { for (int i = 0; i < graph->num_node_ids(); ++i) { CHECK_EQ(i, cycles->NewNode()); } std::vector<ControlFlowInfo> control_flow_info; TF_RETURN_IF_ERROR(BuildControlFlowInfo(graph, &control_flow_info)); std::unordered_map<string, int> frame_nodes; auto GetOrAddFrameNodeId = [&frame_nodes, cycles](const string& frame_name) { int& frame_id = frame_nodes.emplace(frame_name, -1).first->second; if (frame_id < 0) { frame_id = cycles->NewNode(); } return frame_id; }; for (Edge const* edge : graph->edges()) { if (edge->dst()->IsEnter() || edge->src()->IsExit()) { const char* src_type = "pre-enter"; const char* dst_type = "post-exit"; int src = edge->src()->id(); int dst = edge->dst()->id(); if (edge->dst()->IsEnter()) { const string& frame_name = control_flow_info[edge->dst()->id()].frame_name; dst = GetOrAddFrameNodeId(frame_name); dst_type = "frame"; } if (edge->src()->IsExit()) { const string& frame_name = control_flow_info[edge->src()->id()].frame_name; src = GetOrAddFrameNodeId(frame_name); src_type = "frame"; } if (!cycles->InsertEdge(src, dst)) { VLOG(1) << "Cycle detected when adding " << src_type << "->" << dst_type << " edge: " << DescribeCycle(cycles, *graph, src, dst); return false; } continue; } if (edge->src()->IsNextIteration()) { continue; } if (!cycles->InsertEdge(edge->src()->id(), edge->dst()->id())) { return errors::Internal( "Found cycle in graph without control flow operator during XLA " "compilation: ", DescribeCycle(cycles, *graph, edge->src()->id(), edge->dst()->id())); } } return true; } std::optional<absl::string_view> GetXlaClusterForNode(const Node& node) { const AttrValue* attr_value = node.attrs().Find(kXlaClusterAttr); if (attr_value == nullptr) { return std::nullopt; } Status s = AttrValueHasType(*attr_value, "string"); if (!s.ok()) { return std::nullopt; } return attr_value->s(); } bool HasResourceInputOrOutput(const Node& node) { return std::find(node.input_types().begin(), node.input_types().end(), DT_RESOURCE) != node.input_types().end() || std::find(node.output_types().begin(), node.output_types().end(), DT_RESOURCE) != node.output_types().end(); } void RemoveFromXlaCluster(NodeDef* node_def) { node_def->mutable_attr()->erase(kXlaClusterAttr); } void RemoveFromXlaCluster(Node* node) { node->ClearAttr(kXlaClusterAttr); } namespace { typedef xla_config_registry::XlaGlobalJitLevel XlaGlobalJitLevel; XlaGlobalJitLevel GetXlaGlobalJitLevel( const OptimizerOptions::GlobalJitLevel& jit_level_in_session_opts) { XlaGlobalJitLevel result; if (jit_level_in_session_opts == OptimizerOptions::DEFAULT) { result.single_gpu = result.general = OptimizerOptions::OFF; } else { result.single_gpu = result.general = jit_level_in_session_opts; } MarkForCompilationPassFlags* flags = GetMarkForCompilationPassFlags(); if (flags->xla_auto_jit_flag.optimization_level_single_gpu != OptimizerOptions::DEFAULT) { result.single_gpu = static_cast<OptimizerOptions::GlobalJitLevel>( flags->xla_auto_jit_flag.optimization_level_single_gpu); } if (flags->xla_auto_jit_flag.optimization_level_general != OptimizerOptions::DEFAULT) { result.general = static_cast<OptimizerOptions::GlobalJitLevel>( flags->xla_auto_jit_flag.optimization_level_general); } return result; } int GetGpuNumber(const string& device_name) { DeviceNameUtils::ParsedName parsed_name; if (!DeviceNameUtils::ParseFullName(device_name, &parsed_name)) { return -1; } return parsed_name.type == DEVICE_GPU ? parsed_name.id : -1; } } bool IsSingleGpuGraph(const Graph& g) { int gpus_seen = 0; absl::flat_hash_set<string> devices_seen; for (Node* n : g.op_nodes()) { if (devices_seen.contains(n->assigned_device_name())) { continue; } int gpu_number = GetGpuNumber(n->assigned_device_name()); if (gpu_number != -1) { if (++gpus_seen > 1) { return false; } } devices_seen.insert(n->assigned_device_name()); } return gpus_seen == 1; } OptimizerOptions::GlobalJitLevel GetGlobalJitLevelForGraph( const GraphOptimizationPassOptions& options) { OptimizerOptions::GlobalJitLevel jit_level_in_session_opts = options.session_options->config.graph_options() .optimizer_options() .global_jit_level(); XlaGlobalJitLevel xla_global_jit_level = GetXlaGlobalJitLevel(jit_level_in_session_opts); if (xla_global_jit_level.single_gpu == xla_global_jit_level.general) { VLOG(4) << "GetGlobalJitLevelForGraph returning " << xla_global_jit_level.single_gpu; return xla_global_jit_level.single_gpu; } OptimizerOptions::GlobalJitLevel result = IsSingleGpuGraph(**options.graph) ? xla_global_jit_level.single_gpu : xla_global_jit_level.general; VLOG(4) << "GetGlobalJitLevelForGraph returning " << result; return result; } bool MayCallFunction(const Node& n, const FunctionLibraryDefinition* flib_def) { if (flib_def->Contains(n.type_string())) { return true; } return absl::c_any_of(n.def().attr(), [](const std::pair<string, AttrValue>& name_attr_pair) { return name_attr_pair.second.has_func(); }); } bool IsShapeConsumerOp(const Node& node) { return node.type_string() == "Shape" || node.type_string() == "Rank" || node.type_string() == "Size"; } namespace { struct ClusterInfo { int size; absl::flat_hash_map<absl::string_view, int> op_histogram; }; void HistogramMapToRepeatedOpAndCount( protobuf::RepeatedPtrField<XlaAutoClusteringSummary::OpAndCount>* result, const absl::flat_hash_map<absl::string_view, int>& histogram) { for (const auto& pair : histogram) { XlaAutoClusteringSummary::OpAndCount* new_entry = result->Add(); new_entry->set_op(std::string(pair.first)); new_entry->set_count(pair.second); } absl::c_sort(*result, [](const XlaAutoClusteringSummary::OpAndCount& a, const XlaAutoClusteringSummary::OpAndCount& b) { return a.op() < b.op(); }); } void ClusterInfoToProtobuf(XlaAutoClusteringSummary::Cluster* result, absl::string_view name, const ClusterInfo& info) { result->set_name(std::string(name)); result->set_size(info.size); HistogramMapToRepeatedOpAndCount(result->mutable_op_histogram(), info.op_histogram); } } XlaAutoClusteringSummary GetXlaAutoClusteringSummary(const Graph& graph) { absl::flat_hash_map<absl::string_view, ClusterInfo> cluster_name_to_info; XlaAutoClusteringSummary result; absl::flat_hash_map<absl::string_view, int> unclustered_op_histogram; for (Node* n : graph.nodes()) { std::optional<absl::string_view> cluster_name = GetXlaClusterForNode(*n); if (cluster_name) { result.set_clustered_node_count(result.clustered_node_count() + 1); ClusterInfo* info = &cluster_name_to_info[*cluster_name]; info->size++; info->op_histogram[n->type_string()]++; } else { result.set_unclustered_node_count(result.unclustered_node_count() + 1); unclustered_op_histogram[n->type_string()]++; } } for (const auto& pair : cluster_name_to_info) { XlaAutoClusteringSummary::Cluster* new_cluster = result.add_clusters(); ClusterInfoToProtobuf(new_cluster, pair.first, pair.second); } absl::c_sort(*result.mutable_clusters(), [&](const XlaAutoClusteringSummary::Cluster& a, const XlaAutoClusteringSummary::Cluster& b) { return a.name() < b.name(); }); HistogramMapToRepeatedOpAndCount(result.mutable_unclustered_op_histogram(), unclustered_op_histogram); return result; } namespace { using CallTargetListTy = absl::InlinedVector<NameAttrList, 2>; CallTargetListTy GetCallTargetListFromNode( const Node& n, FunctionLibraryRuntime* lib_runtime) { const FunctionLibraryDefinition& flib_def = *lib_runtime->GetFunctionLibraryDefinition(); if (flib_def.Find(n.type_string())) { NameAttrList callee; callee.set_name(n.type_string()); *callee.mutable_attr() = n.def().attr(); return {callee}; } CallTargetListTy result; for (const auto& name_attr_pair : n.attrs()) { const AttrValue& attr_value = name_attr_pair.second; if (attr_value.value_case() == AttrValue::kFunc) { result.push_back(attr_value.func()); } else if (attr_value.value_case() == AttrValue::kList) { result.insert(result.end(), attr_value.list().func().begin(), attr_value.list().func().end()); } } return result; } enum class Direction { kForward, kBackward }; Status GetNodesRelatedToRefVariablesInDirection( const Graph& graph, FunctionLibraryRuntime* lib_runtime, Direction direction, int depth, absl::flat_hash_set<Node*>* result); absl::StatusOr<bool> DoesAnyCalleeHaveRefNodes( const CallTargetListTy& call_target_list, FunctionLibraryRuntime* lib_runtime, Direction direction, int depth) { const int kMaxDepth = 10; if (depth == kMaxDepth && !call_target_list.empty()) { return true; } absl::flat_hash_set<Node*> callee_ref_nodes; for (const NameAttrList& call_target : call_target_list) { const OpRegistrationData* op_reg; if (OpRegistry::Global()->LookUp(call_target.name(), &op_reg).ok()) { const OpDef& op = op_reg->op_def; if (absl::c_any_of(op.output_arg(), [](const OpDef::ArgDef arg) { return arg.is_ref(); })) { return true; } continue; } callee_ref_nodes.clear(); FunctionLibraryRuntime::Handle handle; if (!lib_runtime ->Instantiate(call_target.name(), AttrSlice(&call_target.attr()), &handle) .ok()) { VLOG(2) << "Could not find " << call_target.name() << " in the function library."; return true; } auto release_handle_on_return = gtl::MakeCleanup( [&] { TF_CHECK_OK(lib_runtime->ReleaseHandle(handle)); }); const FunctionBody* fbody = lib_runtime->GetFunctionBody(handle); TF_RETURN_IF_ERROR(GetNodesRelatedToRefVariablesInDirection( *fbody->graph, lib_runtime, direction, depth + 1, &callee_ref_nodes)); if (!callee_ref_nodes.empty()) { return true; } } return false; } Status GetNodesRelatedToRefVariablesInDirection( const Graph& graph, FunctionLibraryRuntime* lib_runtime, Direction direction, int depth, absl::flat_hash_set<Node*>* result) { std::vector<Node*> nodes_in_order; if (direction == Direction::kForward) { GetReversePostOrder(graph, &nodes_in_order, NodeComparatorName()); } else { GetPostOrder(graph, &nodes_in_order, NodeComparatorName()); } size_t old_result_size; int iterations = 0; const int kMaxIterations = 10 * 1000; std::vector<bool> callee_has_ref_nodes_cache; callee_has_ref_nodes_cache.resize(graph.num_node_ids()); auto does_callee_have_ref_nodes = [&](Node* n) -> absl::StatusOr<bool> { if (iterations == 1) { TF_ASSIGN_OR_RETURN( bool callee_has_ref_nodes, DoesAnyCalleeHaveRefNodes(GetCallTargetListFromNode(*n, lib_runtime), lib_runtime, direction, depth)); callee_has_ref_nodes_cache[n->id()] = callee_has_ref_nodes; return callee_has_ref_nodes; } else { return {callee_has_ref_nodes_cache[n->id()]}; } }; do { TF_RET_CHECK(iterations++ < kMaxIterations) << "infinite loop?"; old_result_size = result->size(); for (Node* n : nodes_in_order) { if (n->IsSource() || n->IsSink()) { continue; } bool inserted_n = false; const EdgeSet& edges = direction == Direction::kForward ? n->in_edges() : n->out_edges(); for (const Edge* e : edges) { if (result->contains(direction == Direction::kForward ? e->src() : e->dst())) { result->insert(n); inserted_n = true; break; } } if (inserted_n) { continue; } if (direction == Direction::kForward && absl::c_any_of(n->output_types(), IsRefType)) { result->insert(n); continue; } TF_ASSIGN_OR_RETURN(bool callee_has_ref_nodes, does_callee_have_ref_nodes(n)); if (callee_has_ref_nodes) { result->insert(n); continue; } } } while (result->size() != old_result_size); VLOG(2) << "# iterations = " << iterations; return absl::OkStatus(); } void SortControlInputs(GraphDef* gdef) { int64_t num_nodes = gdef->node_size(); for (int64_t i = 0; i < num_nodes; ++i) { NodeDef* node = gdef->mutable_node(i); std::stable_sort(node->mutable_input()->begin(), node->mutable_input()->end(), [](const string& a, const string& b) { bool a_is_control = absl::StartsWith(a, "^"); bool b_is_control = absl::StartsWith(b, "^"); return (!a_is_control && b_is_control) || (a_is_control && b_is_control && a < b); }); } } } absl::StatusOr<absl::flat_hash_set<Node*>> GetNodesRelatedToRefVariables( const Graph& graph, FunctionLibraryRuntime* lib_runtime) { absl::flat_hash_set<Node*> result; TF_RETURN_IF_ERROR(GetNodesRelatedToRefVariablesInDirection( graph, lib_runtime, Direction::kForward, 0, &result)); TF_RETURN_IF_ERROR(GetNodesRelatedToRefVariablesInDirection( graph, lib_runtime, Direction::kBackward, 0, &result)); VLOG(1) << "GetNodesRelatedToRefVariables() found " << result.size() << " nodes"; return result; } absl::StatusOr<std::string> SerializeGraphDeterministic(const Graph& graph) { GraphDef def; graph.ToGraphDef(&def); SortControlInputs(&def); std::string s; if (!SerializeToStringDeterministic(def, &s)) { return errors::Internal("Failed to serialize graphdef."); } return s; } absl::StatusOr<uint64> FingerprintGraph(const Graph& graph) { TF_ASSIGN_OR_RETURN(std::string serialized, SerializeGraphDeterministic(graph)); return Hash64(serialized.data(), serialized.size()); } REGISTER_XLA_CONFIG_GETTER(GetXlaGlobalJitLevel); }
#include "tensorflow/compiler/jit/xla_cluster_util.h" #include "absl/algorithm/container.h" #include "absl/strings/str_join.h" #include "tensorflow/cc/framework/ops.h" #include "tensorflow/cc/ops/control_flow_ops_internal.h" #include "tensorflow/cc/ops/function_ops.h" #include "tensorflow/cc/ops/functional_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "xla/status_macros.h" #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/common_runtime/process_function_library_runtime.h" #include "tensorflow/core/framework/function_testlib.h" #include "tensorflow/core/framework/graph_to_functiondef.h" #include "tensorflow/core/graph/algorithm.h" #include "tensorflow/core/graph/graph_def_builder.h" #include "tensorflow/core/graph/testlib.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/public/version.h" namespace tensorflow { namespace { TEST(CreateCycleDetectionGraph, ConnectivityThroughEnterExitRegion) { Scope root = Scope::NewRootScope().ExitOnError(); Output a = ops::Const(root.WithOpName("a"), Input::Initializer(0.0)); Output enter = ops::internal::Enter(root.WithOpName("enter"), a, "only_frame"); Output exit = ops::internal::Exit(root.WithOpName("exit"), enter); Output b = ops::Add(root.WithOpName("b"), a, exit); FixupSourceAndSinkEdges(root.graph()); GraphCycles cycles; TF_ASSERT_OK(CreateCycleDetectionGraph(root.graph(), &cycles).status()); EXPECT_FALSE(cycles.CanContractEdge(a.node()->id(), b.node()->id())); } TEST(CreateCycleDetectionGraph, ConnectivityThroughMultipleEnterExitRegions) { Scope root = Scope::NewRootScope().ExitOnError(); Output a = ops::Const(root.WithOpName("a"), Input::Initializer(0.0)); Output enter_0 = ops::internal::Enter(root.WithOpName("enter_0"), a, "frame_0"); Output exit_0 = ops::internal::Exit(root.WithOpName("exit_0"), enter_0); Output enter_1 = ops::internal::Enter(root.WithOpName("enter_1"), a, "frame_1"); Output exit_1 = ops::internal::Exit(root.WithOpName("exit_1"), enter_1); Output b = ops::Add(root.WithOpName("b"), a, exit_1); FixupSourceAndSinkEdges(root.graph()); GraphCycles cycles; TF_ASSERT_OK(CreateCycleDetectionGraph(root.graph(), &cycles).status()); EXPECT_FALSE(cycles.CanContractEdge(a.node()->id(), b.node()->id())); } TEST(CreateCycleDetectionGraph, ReachingEnterExit) { Scope root = Scope::NewRootScope().ExitOnError(); Output a = ops::Const(root.WithOpName("a"), Input::Initializer(0.0)); Output enter_0 = ops::internal::Enter(root.WithOpName("enter_0"), a, "frame_0"); Output exit_0 = ops::internal::Exit(root.WithOpName("exit_0"), enter_0); Output add = ops::Add(root.WithOpName("add"), exit_0, exit_0); Output enter_1 = ops::internal::Enter(root.WithOpName("enter_1"), add, "frame_0"); Output exit_1 = ops::internal::Exit(root.WithOpName("exit_1"), enter_1); FixupSourceAndSinkEdges(root.graph()); GraphCycles cycles; TF_ASSERT_OK_AND_ASSIGN(bool ok, CreateCycleDetectionGraph(root.graph(), &cycles)); EXPECT_FALSE(ok); } const char* kCPU0 = "/job:localhost/replica:0/task:0/device:CPU:0"; const char* kGPU0 = "/job:localhost/replica:0/task:0/device:GPU:0"; const char* kGPU1 = "/job:localhost/replica:0/task:0/device:GPU:1"; TEST(IsSingleGpuGraph, ReturnsTrue) { Scope root = Scope::NewRootScope().WithAssignedDevice(kGPU0).ExitOnError(); Output a = ops::Const(root.WithOpName("a"), Input::Initializer(0.0)); Output b = ops::Add(root.WithOpName("b"), a, a); Output c = ops::Add(root.WithOpName("c"), b, b); FixupSourceAndSinkEdges(root.graph()); EXPECT_TRUE(IsSingleGpuGraph(*root.graph())); } TEST(IsSingleGpuGraph, ReturnsFalseForCpuGraph) { Scope root = Scope::NewRootScope().WithAssignedDevice(kCPU0).ExitOnError(); Output a = ops::Const(root.WithOpName("a"), Input::Initializer(0.0)); Output b = ops::Add(root.WithOpName("b"), a, a); Output c = ops::Add(root.WithOpName("c"), b, b); FixupSourceAndSinkEdges(root.graph()); EXPECT_FALSE(IsSingleGpuGraph(*root.graph())); } TEST(IsSingleGpuGraph, ReturnsFalseForMultiGpuGraph) { Scope root = Scope::NewRootScope().WithAssignedDevice(kGPU0).ExitOnError(); Output a = ops::Const(root.WithOpName("a"), Input::Initializer(0.0)); Output b = ops::Add(root.WithOpName("b").WithAssignedDevice(kGPU1), a, a); Output c = ops::Add(root.WithOpName("c"), b, b); FixupSourceAndSinkEdges(root.graph()); EXPECT_FALSE(IsSingleGpuGraph(*root.graph())); } absl::StatusOr<std::vector<string>> GetNodesRelatedToRefVarsSorted( const Scope& scope, FunctionLibraryDefinition* flib_def = nullptr) { FunctionDefLibrary flib; FunctionLibraryDefinition flib_def_local(OpRegistry::Global(), flib); if (flib_def == nullptr) { flib_def = &flib_def_local; } std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); TF_RETURN_IF_ERROR(scope.ToGraph(graph.get())); std::unique_ptr<ProcessFunctionLibraryRuntime> pflr( new ProcessFunctionLibraryRuntime( nullptr, Env::Default(), nullptr, TF_GRAPH_DEF_VERSION, flib_def, OptimizerOptions{})); FunctionLibraryRuntime* lib_runtime = pflr->GetFLR(ProcessFunctionLibraryRuntime::kDefaultFLRDevice); TF_ASSIGN_OR_RETURN(absl::flat_hash_set<Node*> nodes_related_to_ref_vars, GetNodesRelatedToRefVariables(*graph, lib_runtime)); std::vector<string> names; absl::c_transform(nodes_related_to_ref_vars, std::back_inserter(names), [](Node* n) { return n->name(); }); absl::c_sort(names); return names; } void CreateSubgraphTouchingRefVar(const Scope& s) { Output variable = ops::Variable(s.WithOpName("variable"), PartialTensorShape{}, DT_FLOAT); Output read = ops::Identity(s.WithOpName("read_ref_var"), variable); Output neg = ops::Negate(s.WithOpName("negate_ref"), read); Output add = ops::Add(s.WithOpName("add_ref"), neg, neg); Output constant = ops::Const(s.WithOpName("constant_ref"), Input::Initializer(0.0)); s.graph()->AddControlEdge(constant.node(), variable.node()); } void CreateSubgraphNotTouchingRefVar(const Scope& s) { Output constant = ops::Const(s.WithOpName("constant_normal"), Input::Initializer(0.0)); Output neg = ops::Negate(s.WithOpName("negate_normal"), constant); Output add = ops::Add(s.WithOpName("add_normal"), neg, neg); } void CreateSubgraphCallingFunctionWithRefVar(const Scope& s) { NameAttrList ref_float_function; ref_float_function.set_name("RefFloatFn"); ops::PartitionedCall call(s.WithOpName("RefFloat"), {absl::Span<Input>{}}, {DT_FLOAT}, ref_float_function); Output constant = ops::Const(s.WithOpName("constant_ref_pco"), Input::Initializer(0.0)); s.graph()->AddControlEdge(call.operation.node(), constant.node()); } void CreateSubgraphCallingFunctionWithoutRefVar(const Scope& s) { NameAttrList regular_float_function; regular_float_function.set_name("RegularFloatFn"); ops::PartitionedCall call(s.WithOpName("RegularFloat"), {absl::Span<Input>{}}, {DT_FLOAT}, regular_float_function); Output constant = ops::Const(s.WithOpName("constant_normal_pco"), Input::Initializer(0.0)); s.graph()->AddControlEdge(call.operation.node(), constant.node()); } void AddRefFunctionFunctionDef(FunctionDefLibrary* fdef_lib) { FunctionDef make_ref_float = FunctionDefHelper::Define( "RefFloatFn", {}, {"r:float"}, {}, {{{"var"}, "VariableV2", {}, {{"dtype", DT_FLOAT}, {"shape", TensorShape({})}}}, {{"r"}, "Identity", {"var"}, {{"T", DT_FLOAT}}}}); *fdef_lib->add_function() = make_ref_float; } void AddRegularFunctionFunctionDef(FunctionDefLibrary* fdef_lib) { Tensor seven(DT_FLOAT, {}); seven.scalar<float>()() = 7; FunctionDef make_regular_float = FunctionDefHelper::Define( "RegularFloatFn", {}, {"r:float"}, {}, {{{"r"}, "Const", {}, {{"dtype", DT_FLOAT}, {"value", seven}}}}); *fdef_lib->add_function() = make_regular_float; } TEST(NodesRelatedToRefVariables, Basic) { Scope root = Scope::NewRootScope().ExitOnError(); FunctionDefLibrary fdef_lib; CreateSubgraphTouchingRefVar(root); CreateSubgraphNotTouchingRefVar(root); AddRefFunctionFunctionDef(&fdef_lib); CreateSubgraphCallingFunctionWithRefVar(root); AddRegularFunctionFunctionDef(&fdef_lib); CreateSubgraphCallingFunctionWithoutRefVar(root); FunctionLibraryDefinition flib_def(OpRegistry::Global(), fdef_lib); TF_ASSERT_OK_AND_ASSIGN(std::vector<string> names, GetNodesRelatedToRefVarsSorted(root, &flib_def)); std::vector<string> expected({ "RefFloat", "add_ref", "constant_ref", "constant_ref_pco", "negate_ref", "read_ref_var", "variable", }); EXPECT_EQ(names, expected); } Status MakeLoop(Scope s, Output init_value, absl::string_view loop_name) { s = s.NewSubScope(std::string(loop_name)); ops::internal::Enter enter(s.WithOpName("init_value"), init_value, loop_name); ops::Merge merge(s.WithOpName("merge"), {init_value, init_value}); Output next_iteration = ops::NextIteration(s.WithOpName("next_itr"), merge.output); return s.graph()->UpdateEdge(next_iteration.node(), 0, merge.output.node(), 1); } TEST(NodesRelatedToRefVariables, Cycles) { Scope root = Scope::NewRootScope().ExitOnError(); Output variable = ops::Variable(root.WithOpName("variable"), PartialTensorShape{}, DT_FLOAT); TF_ASSERT_OK( MakeLoop(root, ops::Identity(root.WithOpName("read_ref_var"), variable), "ref_loop")); TF_ASSERT_OK(MakeLoop( root, ops::Const(root.WithOpName("constant"), Input::Initializer(0.0)), "normal_loop")); TF_ASSERT_OK_AND_ASSIGN(std::vector<string> names, GetNodesRelatedToRefVarsSorted(root)); std::vector<string> expected({"read_ref_var", "ref_loop/init_value", "ref_loop/merge", "ref_loop/next_itr", "variable"}); EXPECT_EQ(names, expected); } } }
1,072
cpp
tensorflow/tensorflow
encapsulate_subgraphs_pass
tensorflow/compiler/jit/encapsulate_subgraphs_pass.cc
tensorflow/compiler/jit/encapsulate_subgraphs_pass_test.cc
#ifndef TENSORFLOW_COMPILER_JIT_ENCAPSULATE_SUBGRAPHS_PASS_H_ #define TENSORFLOW_COMPILER_JIT_ENCAPSULATE_SUBGRAPHS_PASS_H_ #include "tensorflow/core/common_runtime/optimization_registry.h" #include "tensorflow/core/framework/function.h" #include "tensorflow/core/graph/graph.h" #include "tensorflow/core/lib/core/status.h" namespace tensorflow { class EncapsulateSubgraphsPass : public GraphOptimizationPass { public: Status Run(const GraphOptimizationPassOptions& options) override; }; typedef std::function<Status( const std::vector<OutputTensor>& arg_source_tensors, std::unique_ptr<Graph>* graph, std::vector<int>* input_permutation, std::vector<int>* output_permutation, NodeDef* node_def)> RewriteSubgraphFn; Status EncapsulateSubgraphsInFunctions( string group_attribute, const Graph& graph_in, const RewriteSubgraphFn& rewrite_subgraph_fn, bool reuse_existing_functions, std::unique_ptr<Graph>* graph_out, FunctionLibraryDefinition* library); extern const char* const kXlaCompiledKernelAttr; bool IsXlaCompiledKernel(const Node& node); extern const char* const kXlaNumConstantArgsAttr; extern const char* const kXlaNumResourceArgsAttr; extern const char* const kXlaHasReferenceVarsAttr; } #endif #include "tensorflow/compiler/jit/encapsulate_subgraphs_pass.h" #include <functional> #include <memory> #include <numeric> #include <string> #include <vector> #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/strings/match.h" #include "absl/strings/str_cat.h" #include "absl/types/optional.h" #include "tensorflow/compiler/jit/flags.h" #include "tensorflow/compiler/jit/mark_for_compilation_pass.h" #include "tensorflow/compiler/jit/shape_inference_helpers.h" #include "tensorflow/compiler/jit/xla_cluster_util.h" #include "tensorflow/compiler/tf2xla/const_analysis.h" #include "xla/service/graphcycles/graphcycles.h" #include "xla/status_macros.h" #include "tensorflow/core/common_runtime/device_factory.h" #include "tensorflow/core/common_runtime/function.h" #include "tensorflow/core/common_runtime/optimization_registry.h" #include "tensorflow/core/common_runtime/shape_refiner.h" #include "tensorflow/core/framework/function.h" #include "tensorflow/core/framework/graph_def_util.h" #include "tensorflow/core/framework/graph_to_functiondef.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/node_def_util.h" #include "tensorflow/core/framework/tensor.pb.h" #include "tensorflow/core/graph/algorithm.h" #include "tensorflow/core/graph/control_flow.h" #include "tensorflow/core/graph/graph.h" #include "tensorflow/core/graph/graph_def_builder.h" #include "tensorflow/core/graph/tensor_id.h" #include "tensorflow/core/lib/gtl/map_util.h" #include "tensorflow/core/lib/hash/hash.h" #include "tensorflow/core/public/session_options.h" #include "tensorflow/core/public/version.h" #include "tensorflow/core/util/device_name_utils.h" #include "tensorflow/core/util/dump_graph.h" namespace tensorflow { const char* const kXlaCompiledKernelAttr = "_XlaCompiledKernel"; const char* const kXlaNumConstantArgsAttr = "_XlaNumConstantArgs"; const char* const kXlaNumResourceArgsAttr = "_XlaNumResourceArgs"; const char* const kXlaHostTransferSequencerAttr = "_xla_host_transfer_sequencer"; const char* const kXlaHasReferenceVarsAttr = "_XlaHasReferenceVars"; namespace { bool AreAllParentsGuaranteedConst( const Node& n, const absl::flat_hash_set<const Node*>& runtime_const_nodes) { if (n.type_string() == "GuaranteeConst") { return true; } bool all_parents_const = true; bool atleast_one_non_control_edge = false; for (const Edge* in : n.in_edges()) { atleast_one_non_control_edge = atleast_one_non_control_edge || !in->IsControlEdge(); if (!in->IsControlEdge() && runtime_const_nodes.count(in->src()) == 0) { all_parents_const = false; break; } } return all_parents_const && atleast_one_non_control_edge; } void MarkGuaranteedConstants( const Graph& graph, const std::vector<std::pair<const Node*, Node*>>& src_arg_pairs) { absl::flat_hash_set<const Node*> guaranteed_const_nodes; std::vector<const Node*> srcs; srcs.reserve(src_arg_pairs.size()); for (const auto& src_arg : src_arg_pairs) { srcs.push_back(src_arg.first); } ReverseDFSFrom( graph, srcs, nullptr, [&guaranteed_const_nodes](const Node* n) { if (AreAllParentsGuaranteedConst(*n, guaranteed_const_nodes)) { guaranteed_const_nodes.insert(n); } }); for (auto& src_arg : src_arg_pairs) { if (guaranteed_const_nodes.count(src_arg.first) != 0) { VLOG(1) << "Guaranteed const found: " << src_arg.first->DebugString(); src_arg.second->AddAttr("_is_guaranteed_constant", true); } } } struct OutputInputTensorPairHasher { uint64 operator()(std::pair<OutputTensor, InputTensor> const& s) const { return Hash64Combine(OutputTensor::Hash()(s.first), InputTensor::Hash()(s.second)); } }; static const char* const kArgOp = "_Arg"; static const char* const kRetValOp = "_Retval"; class Encapsulator { public: Encapsulator(string group_attribute, Graph const* graph_in) : group_attribute_(std::move(group_attribute)), graph_in_(graph_in) {} Status SplitIntoSubgraphs(FunctionLibraryDefinition* library); Status BuildFunctionDefs(const RewriteSubgraphFn& rewrite_subgraph_fn, bool reuse_existing_functions, FunctionLibraryDefinition* library); Status BuildOutputGraph(Graph* graph_out, FunctionLibraryDefinition* library); private: class Subgraph { public: Node* MakeNodeImage(const Graph* graph_in, Node* node); Graph* GetGraph() const; Status BuildFunctionDef(const string& name_in, const RewriteSubgraphFn& rewrite_subgraph_fn, bool reuse_existing_functions, FunctionLibraryDefinition* library); Status AddFunctionCallNode( const absl::flat_hash_map<const Node*, Node*>& node_images, Graph* graph_out); Node* GetCallNode() const; int GetArgIndexForEdge(const Edge* edge) const; int GetResultIndexForEdge(const Edge* edge) const; Status RecordArg(const Edge* edge, const absl::flat_hash_map<const Node*, Node*>& node_images, std::vector<std::pair<const Node*, Node*>>* src_arg_pairs); Status RecordControlResult( const Edge* edge, const absl::flat_hash_map<const Node*, Node*>& node_images); Status RecordResult( const Edge* edge, const absl::flat_hash_map<const Node*, Node*>& node_images); Status MakeSequencingNode(const string& subgraph_name, Graph* graph_out); void ConnectSequencerToCallNode(Graph* graph_out); Status ReplaceFunctionDef(FunctionLibraryDefinition* library); private: std::unique_ptr<Graph> graph_; string device_; NodeDef call_node_def_; string function_def_name_; Node* host_compute_key_placeholder_ = nullptr; Node* call_node_; absl::flat_hash_map<OutputTensor, int, OutputTensor::Hash> args_by_src_; absl::flat_hash_map<InputTensor, int, InputTensor::Hash> args_by_dst_; std::vector<Node*> args_; absl::flat_hash_map<OutputTensor, int, OutputTensor::Hash> results_; absl::flat_hash_set<string> control_output_nodes_; Node* sequencer_ = nullptr; }; Status GetFunctionNameAttr(Node const* node, string* attr) const; Status CopySubgraphEdges( const absl::flat_hash_map<const Node*, Node*>& node_images, std::vector<std::pair<const Node*, Node*>>* src_arg_pairs); Status CopySubgraphNodes( absl::flat_hash_map<const Node*, Node*>* node_images); Status CopyNodesToOutputGraph( Graph* graph_out, absl::flat_hash_map<const Node*, Node*>* node_images); Status AddFunctionCallNodes( const absl::flat_hash_map<const Node*, Node*>& node_images, Graph* graph_out); Status FindOutputImageOfEdgeSrc( const string& src_func_id, const string& dst_func_id, const absl::flat_hash_map<const Node*, Node*>& node_images, const Node* original_src_node, Node** src_image); int FindOutputSlotOfEdgeSrc(const string& src_func_id, const string& dst_func_id, const Edge* edge); Status FindOutputImageOfEdgeDst( const string& src_func_id, const string& dst_func_id, const absl::flat_hash_map<const Node*, Node*>& node_images, const Node* original_dst_node, Node** dst_image); int FindOutputSlotOfEdgeDst(const string& src_func_id, const string& dst_func_id, const Edge* edge); Status CopyEdgeToOutputGraph( const Edge* edge, const string& src_func_id, const string& dst_func_id, const absl::flat_hash_map<const Node*, Node*>& node_images, Graph* graph_out, absl::flat_hash_set<std::pair<OutputTensor, InputTensor>, OutputInputTensorPairHasher>* edges_added); Status AddEdgesToOutputGraph( const absl::flat_hash_map<const Node*, Node*>& node_images, Graph* graph_out); Status MakePrunedGraphCopyAndInline( const Graph& graph, const std::vector<Node*>& sink_nodes, std::unique_ptr<Graph>* pruned_graph, absl::flat_hash_map<const Node*, Node*>* node_images, FunctionLibraryDefinition* library); const string group_attribute_; const Graph* graph_in_; absl::flat_hash_map<string, Subgraph> subgraphs_; Encapsulator(const Encapsulator&) = delete; void operator=(const Encapsulator&) = delete; }; namespace { void TopologicalClusterSort( const absl::flat_hash_set<string>& clusters, const absl::flat_hash_set<string>& has_successors, const absl::flat_hash_map<string, absl::flat_hash_set<string>>& ancestors, std::vector<string>* sorted) { sorted->clear(); struct Work { string cluster; bool leave; }; std::set<string> visited; std::vector<Work> stack; for (const auto& cluster : clusters) { if (has_successors.find(cluster) == has_successors.end()) { stack.push_back({cluster, false}); } } while (!stack.empty()) { const Work item = stack.back(); stack.pop_back(); if (item.leave) { sorted->push_back(item.cluster); continue; } if (visited.find(item.cluster) != visited.end()) continue; visited.insert(item.cluster); stack.push_back({item.cluster, true}); const auto& iter = ancestors.find(item.cluster); if (iter != ancestors.end()) { for (const auto& ancestor : iter->second) { stack.push_back({ancestor, false}); } } } CHECK(sorted->size() == clusters.size()); } } Node* Encapsulator::Subgraph::GetCallNode() const { return call_node_; } int Encapsulator::Subgraph::GetArgIndexForEdge(const Edge* edge) const { return args_by_dst_.at(InputTensor(edge->dst(), edge->dst_input())); } int Encapsulator::Subgraph::GetResultIndexForEdge(const Edge* edge) const { return results_.at(OutputTensor(edge->src(), edge->src_output())); } Node* Encapsulator::Subgraph::MakeNodeImage(const Graph* graph_in, Node* node) { if (!graph_) { graph_.reset(new Graph(graph_in->op_registry())); graph_->set_versions(graph_in->versions()); } if (device_.empty()) { device_ = node->assigned_device_name().empty() ? node->requested_device() : node->assigned_device_name(); } return graph_->CopyNode(node); } Graph* Encapsulator::Subgraph::GetGraph() const { return graph_.get(); } Status Encapsulator::Subgraph::RecordArg( const Edge* edge, const absl::flat_hash_map<const Node*, Node*>& node_images, std::vector<std::pair<const Node*, Node*>>* src_arg_pairs) { Node* src_node = edge->src(); int src_slot = edge->src_output(); absl::flat_hash_map<OutputTensor, int, OutputTensor::Hash>::iterator iter; bool inserted; std::tie(iter, inserted) = args_by_src_.emplace( OutputTensor(src_node, src_slot), args_by_src_.size()); int arg_index = iter->second; if (inserted) { NodeDef arg_def; NodeDefBuilder builder( absl::StrCat(src_node->name(), "_", src_slot, "_arg"), kArgOp, NodeDebugInfo(src_node->def())); DataType dtype = edge->dst()->input_type(edge->dst_input()); builder.Attr("T", dtype); builder.Attr("index", arg_index); Status s = builder.Finalize(&arg_def); if (!s.ok()) return s; TF_ASSIGN_OR_RETURN(Node * arg, graph_->AddNode(arg_def)); src_arg_pairs->push_back({src_node, arg}); args_.push_back(arg); } Node* dst_node = edge->dst(); Node* dst_image = node_images.at(dst_node); int dst_slot = edge->dst_input(); args_by_dst_[InputTensor(dst_node, dst_slot)] = arg_index; graph_->AddEdge(args_[arg_index], 0, dst_image, dst_slot); return absl::OkStatus(); } Status Encapsulator::Subgraph::RecordControlResult( const Edge* edge, const absl::flat_hash_map<const Node*, Node*>& node_images) { Node* src_node = edge->src(); Node* src_image = node_images.at(src_node); control_output_nodes_.insert(src_image->name()); return absl::OkStatus(); } Status Encapsulator::Subgraph::RecordResult( const Edge* edge, const absl::flat_hash_map<const Node*, Node*>& node_images) { Node* src_node = edge->src(); Node* src_image = node_images.at(src_node); int src_slot = edge->src_output(); absl::flat_hash_map<OutputTensor, int, OutputTensor::Hash>::iterator iter; bool inserted; std::tie(iter, inserted) = results_.emplace(OutputTensor(src_node, src_slot), results_.size()); int ret_index = iter->second; if (inserted) { NodeDef ret_def; NodeDefBuilder builder( absl::StrCat(src_node->name(), "_", src_slot, "_retval"), kRetValOp, NodeDebugInfo(src_node->def())); DataType dtype = src_node->output_type(src_slot); builder.Attr("T", dtype); builder.Attr("index", ret_index); builder.Input(src_image->name(), src_slot, dtype); Status s = builder.Finalize(&ret_def); if (!s.ok()) return s; TF_ASSIGN_OR_RETURN(Node * ret, graph_->AddNode(ret_def)); graph_->AddEdge(src_image, src_slot, ret, 0); } return absl::OkStatus(); } Status Encapsulator::Subgraph::MakeSequencingNode(const string& subgraph_name, Graph* graph_out) { if (sequencer_ == nullptr) { NodeDef seq_def; NodeDefBuilder builder(absl::StrCat(subgraph_name, "_sequencer"), "NoOp"); builder.Attr(kXlaHostTransferSequencerAttr, subgraph_name); builder.Device(device_); Status s = builder.Finalize(&seq_def); if (!s.ok()) return s; TF_ASSIGN_OR_RETURN(sequencer_, graph_out->AddNode(seq_def)); } return absl::OkStatus(); } void Encapsulator::Subgraph::ConnectSequencerToCallNode(Graph* graph_out) { if (sequencer_ != nullptr) { VLOG(2) << "ConnectSequencerToCallNode"; graph_out->AddControlEdge(sequencer_, call_node_, true); } } Status Encapsulator::Subgraph::BuildFunctionDef( const string& name_in, const RewriteSubgraphFn& rewrite_subgraph_fn, bool reuse_existing_functions, FunctionLibraryDefinition* library) { string name = name_in; call_node_def_.set_op(name); call_node_def_.set_name(name); call_node_def_.set_device(device_); if (rewrite_subgraph_fn) { std::vector<OutputTensor> arg_source_tensors(args_by_src_.size()); for (const auto& arg : args_by_src_) { arg_source_tensors.at(arg.second) = arg.first; } std::vector<int> input_permutation(args_by_src_.size()); std::iota(input_permutation.begin(), input_permutation.end(), 0); std::vector<int> output_permutation(results_.size()); std::iota(output_permutation.begin(), output_permutation.end(), 0); TF_RETURN_IF_ERROR( rewrite_subgraph_fn(arg_source_tensors, &graph_, &input_permutation, &output_permutation, &call_node_def_)); if (input_permutation.size() != args_by_src_.size()) { return errors::InvalidArgument("Input permutation has incorrect size."); } if (output_permutation.size() != results_.size()) { return errors::InvalidArgument("Output permutation has incorrect size."); } for (auto& arg : args_by_src_) { arg.second = input_permutation[arg.second]; } for (auto& arg : args_by_dst_) { arg.second = input_permutation[arg.second]; } for (auto& result : results_) { result.second = output_permutation[result.second]; } name = call_node_def_.op(); } function_def_name_ = name; FunctionDef fdef; auto lookup = [this](const Node* node) -> std::optional<string> { if (control_output_nodes_.contains(node->name())) { return std::make_optional(node->name()); } return std::nullopt; }; std::vector<ControlFlowInfo> dummy; TF_RETURN_IF_ERROR(BuildControlFlowInfo(graph_.get(), &dummy)); TF_RETURN_IF_ERROR(GraphToFunctionDef(*graph_, name, lookup, &fdef)); if (VLOG_IS_ON(1)) { VLOG(2) << "Build function def " << name; DumpGraphToFile(absl::StrCat("encapsulate_fdef_graph_", name), *graph_, library); DumpFunctionDefToFile(absl::StrCat("encapsulate_fdef_", name), fdef); } const FunctionDef* original_fdef = library->Find(name); if (!reuse_existing_functions || original_fdef == nullptr) { TF_RETURN_IF_ERROR(library->AddFunctionDef(fdef)); } else if (!FunctionDefsEqual(*original_fdef, fdef)) { TF_RETURN_IF_ERROR(library->ReplaceFunction(name, fdef)); } return absl::OkStatus(); } Status Encapsulator::Subgraph::ReplaceFunctionDef( FunctionLibraryDefinition* library) { const string& name = function_def_name_; FunctionDef fdef; TF_RETURN_IF_ERROR(GraphToFunctionDef(*graph_, name, &fdef)); if (VLOG_IS_ON(1)) { VLOG(2) << "Replace function def " << name; DumpGraphToFile(absl::StrCat("replace_encapsulate_fdef_graph_", name), *graph_, library); DumpFunctionDefToFile(absl::StrCat("replace_encapsulate_fdef_", name), fdef); } TF_RETURN_IF_ERROR(library->ReplaceFunction(name, fdef)); return absl::OkStatus(); } Status Encapsulator::Subgraph::AddFunctionCallNode( const absl::flat_hash_map<const Node*, Node*>& node_images, Graph* graph_out) { TF_ASSIGN_OR_RETURN(call_node_, graph_out->AddNode(call_node_def_)); call_node_->set_assigned_device_name(device_); return absl::OkStatus(); } Status Encapsulator::GetFunctionNameAttr(Node const* node, string* attr) const { AttrSlice attrs = node->attrs(); attr->clear(); for (const auto& node_attr : attrs) { if (node_attr.first == group_attribute_) { TF_RETURN_IF_ERROR(AttrValueHasType(node_attr.second, "string")); *attr = node_attr.second.s(); break; } } return absl::OkStatus(); } bool IsInSubgraph(const string& func_id) { return !func_id.empty(); } Status Encapsulator::CopySubgraphNodes( absl::flat_hash_map<const Node*, Node*>* node_images) { for (Node* node : graph_in_->op_nodes()) { string func_id; TF_RETURN_IF_ERROR(GetFunctionNameAttr(node, &func_id)); if (!IsInSubgraph(func_id)) continue; Subgraph& subgraph = subgraphs_[func_id]; Node* image = subgraph.MakeNodeImage(graph_in_, node); image->ClearAttr(group_attribute_); (*node_images)[node] = image; } return absl::OkStatus(); } Status Encapsulator::CopySubgraphEdges( const absl::flat_hash_map<const Node*, Node*>& node_images, std::vector<std::pair<const Node*, Node*>>* src_arg_pairs) { for (const Edge* edge : graph_in_->edges()) { string src_func_id; TF_RETURN_IF_ERROR(GetFunctionNameAttr(edge->src(), &src_func_id)); string dst_func_id; TF_RETURN_IF_ERROR(GetFunctionNameAttr(edge->dst(), &dst_func_id)); Node* src_image = gtl::FindWithDefault(node_images, edge->src(), nullptr); Node* dst_image = gtl::FindWithDefault(node_images, edge->dst(), nullptr); if (IsInSubgraph(src_func_id) && IsInSubgraph(dst_func_id) && src_func_id == dst_func_id) { Graph* g = subgraphs_[src_func_id].GetGraph(); if (edge->IsControlEdge()) { g->AddControlEdge(src_image, dst_image, true); } else { g->AddEdge(src_image, edge->src_output(), dst_image, edge->dst_input()); } continue; }
#include "tensorflow/compiler/jit/encapsulate_subgraphs_pass.h" #include <memory> #include <utility> #include "absl/strings/match.h" #include "absl/strings/str_cat.h" #include "tensorflow/cc/framework/ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/compiler/jit/encapsulate_util.h" #include "tensorflow/compiler/jit/extract_outside_compilation_pass.h" #include "tensorflow/compiler/jit/test_util.h" #include "tensorflow/compiler/tf2xla/side_effect_util.h" #include "tensorflow/core/common_runtime/device_factory.h" #include "tensorflow/core/common_runtime/function.h" #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/framework/function_testlib.h" #include "tensorflow/core/framework/graph_to_functiondef.h" #include "tensorflow/core/graph/graph_def_builder.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/public/session_options.h" #include "tensorflow/core/public/version.h" #include "tensorflow/core/util/equal_graph_def.h" namespace tensorflow { namespace { const char* const kXlaHostTransferSequencerAttr = "_xla_host_transfer_sequencer"; Status AddGraphDefToFunctionLibrary(const GraphDefBuilder& graphdef_builder, const string& name_suffix, FunctionDefLibrary* library) { GraphDef graphdef; TF_RETURN_IF_ERROR(graphdef_builder.ToGraphDef(&graphdef)); std::unique_ptr<Graph> graph = std::unique_ptr<Graph>(new Graph(OpRegistry::Global())); GraphConstructorOptions opts; opts.allow_internal_ops = true; TF_RETURN_IF_ERROR(ConvertGraphDefToGraph(opts, graphdef, graph.get())); FunctionDef* fdef = library->add_function(); TF_RETURN_IF_ERROR(GraphToFunctionDef( *graph, absl::StrCat("_outside_compilation_shape_inference_", name_suffix), fdef)); return absl::OkStatus(); } template <class Tkey, class Tvalue> bool EqualProtoMap(const ::tensorflow::protobuf::Map<Tkey, Tvalue>& a, const ::tensorflow::protobuf::Map<Tkey, Tvalue>& b, const std::function<string(const Tkey&)>& key_to_string, const std::function<string(const Tvalue&)>& value_to_string, const std::function<bool(const Tkey&, const Tvalue&, const Tvalue&)>& compare, const string& map_name, string* diff) { for (const auto& elt_a : a) { const auto iter = b.find(elt_a.first); if (iter == b.end()) { if (diff) { *diff = absl::StrCat(map_name, " expected: contains element with key '", key_to_string(elt_a.first), "' got: map has no such element"); } return false; } if (!compare(elt_a.first, elt_a.second, iter->second)) { if (diff) { *diff = absl::StrCat(map_name, " expected: element with key '", key_to_string(elt_a.first), "' has value '", value_to_string(elt_a.second), "' got: '", value_to_string(iter->second), "'"); } return false; } } for (const auto& elt_b : b) { const auto iter = a.find(elt_b.first); if (iter == a.end()) { if (diff) { *diff = absl::StrCat(map_name, " got: contains element with key '", key_to_string(elt_b.first), "' expected: map has no such element"); } return false; } } return true; } bool EqualFunctionNodeDef(const NodeDef& a, const NodeDef& b, const string& diff_preamble, string* diff) { if (a.op() != b.op()) { if (diff) { *diff = absl::StrCat(diff_preamble, " mismatch for node ", a.name(), ", expected op '", a.op(), "' got '", b.op()); } return false; } if (a.device() != b.device()) { if (diff) { *diff = absl::StrCat(diff_preamble, " mismatch for node ", a.name(), ", expected device '", a.device(), "' got '", b.device()); } return false; } if (a.input_size() != b.input_size()) { if (diff) { *diff = absl::StrCat(diff_preamble, " mismatch for node ", a.name(), ", expected ", a.input_size(), " inputs got ", b.input_size(), " expected:\n", a.DebugString(), "\ngot:\n", b.DebugString()); } return false; } std::unordered_set<string> control_input_a; std::unordered_set<string> control_input_b; for (int i = 0; i < a.input_size(); ++i) { if (absl::StartsWith(a.input(i), "^")) { if (!absl::StartsWith(b.input(i), "^")) { if (diff) { *diff = absl::StrCat(diff_preamble, " mismatch for node ", a.name(), " input ", i, ", expected control input ", a.input(i), " got ", b.input(i), " expected:\n", a.DebugString(), "\ngot:\n", b.DebugString()); } return false; } control_input_a.insert(a.input(i)); control_input_b.insert(b.input(i)); } else if (a.input(i) != b.input(i)) { if (diff) { *diff = absl::StrCat(diff_preamble, " mismatch for node ", a.name(), " input ", i, ", expected ", a.input(i), " got ", b.input(i), " expected:\n", a.DebugString(), "\ngot:\n", b.DebugString()); } return false; } } if (control_input_a != control_input_b) { if (diff) { *diff = absl::StrCat(diff_preamble, " mismatch for node ", a.name(), " control inputs differ expected:\n", a.DebugString(), "\ngot:\n", b.DebugString()); } return false; } return EqualProtoMap<string, AttrValue>( a.attr(), b.attr(), [](const string& s) { return s; }, [](const AttrValue& v) { return v.DebugString(); }, [](const string& key, const AttrValue& av, const AttrValue& bv) { if (key == "ancestors") { std::unordered_set<string> a_set(av.list().s().begin(), av.list().s().end()); std::unordered_set<string> b_set(bv.list().s().begin(), bv.list().s().end()); return a_set == b_set; } else { return av.DebugString() == bv.DebugString(); } }, absl::StrCat(diff_preamble, " attr mismatch for node ", a.name()), diff); } bool EqualFunctionDef(const FunctionDef& a, const FunctionDef& b, string* diff) { if (a.signature().DebugString() != b.signature().DebugString()) { if (diff) { *diff = absl::StrCat("Signature mismatch for function ", a.signature().name(), ", expected:\n", a.signature().DebugString(), "\ngot:\n", b.signature().DebugString()); } return false; } if (!EqualProtoMap<string, AttrValue>( a.attr(), b.attr(), [](const string& s) { return s; }, [](const AttrValue& v) { return v.DebugString(); }, [](const string& key, const AttrValue& av, const AttrValue& bv) { return av.DebugString() == bv.DebugString(); }, absl::StrCat("attr mismatch for function ", a.signature().name()), diff)) { return false; } if (!EqualProtoMap<string, string>( a.ret(), b.ret(), [](const string& s) { return s; }, [](const string& s) { return s; }, [](const string& key, const string& av, const string& bv) { return av == bv; }, absl::StrCat("ret mismatch for function ", a.signature().name()), diff)) { return false; } for (int i = 0; i < a.node_def_size(); ++i) { bool found = false; for (int j = 0; j < b.node_def_size(); ++j) { if (a.node_def(i).name() == b.node_def(j).name()) { if (!EqualFunctionNodeDef( a.node_def(i), b.node_def(j), absl::StrCat("Function ", a.signature().name()), diff)) { return false; } found = true; break; } } if (!found) { if (diff) { *diff = absl::StrCat("Function ", a.signature().name(), ", expected: has node '", a.node_def(i).name(), "' got: no node of that name"); } return false; } } for (int i = 0; i < b.node_def_size(); ++i) { bool found = false; for (int j = 0; j < a.node_def_size(); ++j) { if (b.node_def(i).name() == a.node_def(j).name()) { found = true; break; } } if (!found) { if (diff) { *diff = absl::StrCat("Function ", a.signature().name(), ", got: has node '", b.node_def(i).name(), "' expected: no node of that name"); } return false; } } return true; } bool EqualFunctionDefLibrary(const FunctionDefLibrary& expected, const FunctionDefLibrary& actual, string* diff) { std::unordered_map<string, const FunctionDef*> actual_index; for (const FunctionDef& function : actual.function()) { actual_index[function.signature().name()] = &function; } for (const FunctionDef& expected_function : expected.function()) { auto it = actual_index.find(expected_function.signature().name()); if (it == actual_index.end()) { if (diff) { *diff = absl::StrCat("Did not find expected function '", expected_function.signature().name(), "'"); } return false; } if (!EqualFunctionDef(expected_function, *it->second, diff)) return false; actual_index.erase(it); } if (!actual_index.empty()) { if (diff != nullptr) { *diff = absl::StrCat("Found unexpected function '", actual_index.begin()->second->signature().name(), "'"); } return false; } return true; } #define TF_EXPECT_FUNCTIONDEFLIBRARY_EQ(expected, actual) \ do { \ string diff; \ EXPECT_TRUE(EqualFunctionDefLibrary(expected, actual, &diff)) \ << diff << "\nActual: " << actual.DebugString(); \ } while (false) REGISTER_OP("InputTest") .Output("o: float") .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { c->set_output(0, c->UnknownShape()); return absl::OkStatus(); }); REGISTER_OP("InputTestShaped") .Output("o: float") .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { c->set_output(0, c->Vector(2)); return absl::OkStatus(); }); REGISTER_OP("UnaryTest") .Input("a: float") .Output("o: float") .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { ::tensorflow::shape_inference::ShapeHandle o; TF_RETURN_IF_ERROR(c->Merge(c->UnknownShape(), c->input(0), &o)); c->set_output(0, o); return absl::OkStatus(); }); REGISTER_OP("BinaryTest") .Input("a: float") .Input("b: float") .Output("o: float") .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { ::tensorflow::shape_inference::ShapeHandle o; TF_RETURN_IF_ERROR(c->Merge(c->UnknownShape(), c->input(0), &o)); c->set_output(0, o); return absl::OkStatus(); }); REGISTER_OP("BinaryTest2") .Input("a: float") .Input("b: float") .Output("o: float") .SetShapeFn(::tensorflow::shape_inference::UnknownShape); REGISTER_OP("AddNLikeTest") .Input("inputs: N * T") .Output("sum: T") .Attr("N: int >= 1") .Attr("T: numbertype") .SetIsCommutative() .SetIsAggregate(); Node* Sequencer(const GraphDefBuilder::Options& opts, const string& call_node_name) { if (opts.HaveError()) return nullptr; NodeBuilder node_builder(opts.GetNameForOp("NoOp"), "NoOp", opts.op_registry()); return opts.WithAttr(kXlaHostTransferSequencerAttr, call_node_name) .FinalizeBuilder(&node_builder); } Node* Input(const GraphDefBuilder::Options& opts) { return ops::SourceOp("InputTest", opts); } Node* InputShaped(const GraphDefBuilder::Options& opts) { return ops::SourceOp("InputTestShaped", opts); } Node* KnownShapeBase(DataType dtype, absl::Span<const int> shape, const GraphDefBuilder::Options& opts) { if (opts.HaveError()) return nullptr; NodeBuilder node_builder(opts.GetNameForOp("Const"), "Const", opts.op_registry()); TensorProto value; value.set_dtype(dtype); for (int dim : shape) { value.mutable_tensor_shape()->add_dim()->set_size(dim); } return opts.WithAttr("value", value) .WithAttr("dtype", dtype) .FinalizeBuilder(&node_builder); } Node* KnownShape(absl::Span<const int> shape, const GraphDefBuilder::Options& opts) { return KnownShapeBase(DT_FLOAT, shape, opts); } Node* KeyPlaceholderShape(const GraphDefBuilder::Options& opts) { return KnownShapeBase(DT_STRING, {2}, opts); } Node* KeyPlaceholder(const string& call_node, const GraphDefBuilder::Options& opts) { if (opts.HaveError()) return nullptr; NodeBuilder node_builder(absl::StrCat(call_node, "_key_placeholder"), "Placeholder", opts.op_registry()); TensorShapeProto shape; shape.add_dim()->set_size(2); return opts.WithAttr("shape", shape) .WithAttr("dtype", DT_STRING) .WithAttr("_host_compute_call_node", call_node) .FinalizeBuilder(&node_builder); } Node* RecvAtHost(ops::NodeOut key_input, const string& cluster, const string& new_func_name, const string& oc_cluster, absl::Span<const DataType> dtypes, const GraphDefBuilder::Options& opts) { if (opts.HaveError()) return nullptr; string key = absl::StrCat("host_compute_channel_", cluster, "_", new_func_name, "_", oc_cluster); string name = absl::StrCat("outside_compilation_", cluster, "_", new_func_name, "_", oc_cluster, "_recv"); NodeBuilder node_builder(opts.WithName(name).GetNameForOp("_XlaRecvAtHost"), "_XlaRecvAtHost", opts.op_registry()); node_builder.Input(std::move(key_input)); return opts.WithAttr("Toutputs", dtypes) .WithAttr("key", key) .WithAttr("device_ordinal", 0) .WithAttr("_encapsulate", cluster) .WithAttr("_outside", oc_cluster) .FinalizeBuilder(&node_builder); } Node* SendFromHost(ops::NodeOut key_input, const string& cluster, const string& new_func_name, const string& oc_cluster, const std::vector<ops::NodeOut>& inputs, const GraphDefBuilder::Options& opts) { if (opts.HaveError()) return nullptr; string key = absl::StrCat("host_compute_channel_", cluster, "_", new_func_name, "_", oc_cluster); string name = absl::StrCat("outside_compilation_", cluster, "_", new_func_name, "_", oc_cluster, "_send"); NodeBuilder node_builder(opts.WithName(name).GetNameForOp("_XlaSendFromHost"), "_XlaSendFromHost", opts.op_registry()); node_builder.Input(inputs); node_builder.Input(std::move(key_input)); std::vector<DataType> dtypes; for (const auto& node : inputs) { dtypes.push_back(node.dt); } return opts.WithAttr("Tinputs", dtypes) .WithAttr("key", key) .WithAttr("device_ordinal", 0) .WithAttr("_encapsulate", cluster) .WithAttr("_outside", oc_cluster) .FinalizeBuilder(&node_builder); } Node* Unary(ops::NodeOut a, const GraphDefBuilder::Options& opts) { return ops::UnaryOp("UnaryTest", std::move(a), opts); } Node* Binary(ops::NodeOut a, ops::NodeOut b, const GraphDefBuilder::Options& opts) { return ops::BinaryOp("BinaryTest", std::move(a), std::move(b), opts); } Node* BinaryUnknownShape(ops::NodeOut a, ops::NodeOut b, const GraphDefBuilder::Options& opts) { return ops::BinaryOp("BinaryTest2", std::move(a), std::move(b), opts); } Node* AddNLike(const std::vector<ops::NodeOut>& inputs, const GraphDefBuilder::Options& opts) { if (opts.HaveError()) return nullptr; NodeBuilder node_builder(opts.GetNameForOp("AddN"), "AddNLikeTest", opts.op_registry()); node_builder.Input(inputs); return opts.FinalizeBuilder(&node_builder); } Node* ArgOp(int index, DataType type, const GraphDefBuilder::Options& opts) { return ops::SourceOp("_Arg", opts.WithAttr("T", type).WithAttr("index", index)); } Node* RetOp(int index, ops::NodeOut a, const GraphDefBuilder::Options& opts) { if (opts.HaveError()) return nullptr; NodeBuilder node_builder(opts.GetNameForOp("Retval"), "_Retval", opts.op_registry()); node_builder.Input(std::move(a)).Attr("index", index); return opts.FinalizeBuilder(&node_builder); } Status Encapsulate(GraphDef* graphdef, FunctionDefLibrary* library, const std::vector<string>& encapsulated_functions) { Status s; std::unique_ptr<FunctionLibraryDefinition> lib_def( new FunctionLibraryDefinition(OpRegistry::Global(), *library)); GraphConstructorOptions options; options.allow_internal_ops = true; std::unique_ptr<Graph> graph(new Graph(lib_def.get())); s = ConvertGraphDefToGraph(options, *graphdef, graph.get()); if (!s.ok()) return s; s = PerformStaticShapeInferenceBeforeEncapsulation(graph.get()); if (!s.ok()) return s; SessionOptions session_options; std::vector<std::unique_ptr<Device>> devices; TF_CHECK_OK(DeviceFactory::AddDevices( session_options, "/job:localhost/replica:0/task:0", &devices)); OptimizerOptions opts; auto device_mgr = std::make_unique<StaticDeviceMgr>(std::move(devices)); auto pflr = std::make_unique<ProcessFunctionLibraryRuntime>( device_mgr.get(), Env::Default(), nullptr, TF_GRAPH_DEF_VERSION, lib_def.get(), opts, nullptr, nullptr); auto flr = pflr->GetFLR("/job:localhost/replica:0/task:0/cpu:0"); std::unique_ptr<Graph> graph_out; s = EncapsulateSubgraphsInFunctions("_encapsulate", *graph, {}, false, &graph_out, lib_def.get()); if (!s.ok()) return s; std::unordered_map<string, XlaClusterInfo> clusters; for (const auto& func : encapsulated_functions) { Node* xla_computation_node; for (Node* n : graph_out->nodes()) { if (n->name() == func) { xla_computation_node = n; } } if (!xla_computation_node) { return errors::Internal("Cannot find node ", func); } NameAttrList func_name_attrs; func_name_attrs.set_name(func); clusters.emplace(func, XlaClusterInfo{func, func_name_attrs, xla_computation_node, std::map<string, int>{}}); } bool modified; s = ExtractOutsideCompilation("_encapsulate", "_outside", clusters, graph_out.get(), flr, lib_def.get(), &modified); if (!s.ok()) return s; GraphDef graphdef_out; graph_out->ToGraphDef(&graphdef_out); graphdef->Swap(&graphdef_out); *library = lib_def->ToProto(); for (FunctionDef& fdef : *library->mutable_function()) { for (NodeDef& node_def : *fdef.mutable_node_def()) { node_def.mutable_attr()->erase("_xla_inferred_shapes"); } } return s; } Status Encapsulate(GraphDef* graphdef, FunctionDefLibrary* library) { std::vector<string> encapsulated_functions; return Encapsulate(graphdef, library, encapsulated_functions); } TEST(EncapsulateSubgraphsTest, NoFunctions) { GraphDefBuilder builder(GraphDefBuilder::kFailImmediately); Node* a = Input(builder.opts().WithName("A")); Node* b = Input(builder.opts().WithName("B")); Node* c = Unary(a, builder.opts().WithName("C")); Binary(b, c, builder.opts().WithName("D")); GraphDef graphdef_in; FunctionDefLibrary library_in; TF_EXPECT_OK(builder.ToGraphDef(&graphdef_in)); *library_in.add_function() = test::function::XTimesTwo(); GraphDef graphdef_out = graphdef_in; FunctionDefLibrary library_out = library_in; TF_EXPECT_OK(Encapsulate(&graphdef_out, &library_out)); TF_EXPECT_GRAPH_EQ(graphdef_in, graphdef_out); TF_EXPECT_FUNCTIONDEFLIBRARY_EQ(library_in, library_out); } TEST(EncapsulateSubgraphsTest, OneFunction) { FunctionDefLibrary library; GraphDef graphdef; { *library.add_function() = test::function::XTimesTwo(); GraphDefBuilder b1(GraphDefBuilder::kFailImmediately); Node* a = Input(b1.opts().WithName("A")); Node* b = Input(b1.opts().WithName("B")); Node* c = Unary(a, b1.opts().WithName("C").WithAttr("_encapsulate", "F1")); Node* d = Binary(b, c, b1.opts().WithName("c").WithControlInput(c).WithAttr( "_encapsulate", "F1")); Binary(a, d, b1.opts().WithName("E")); TF_EXPECT_OK(b1.ToGraphDef(&graphdef)); } TF_EXPECT_OK(Encapsulate(&graphdef, &library)); FunctionDefLibrary library_expected; GraphDef graphdef_expected; *library_expected.add_function() = test::function::XTimesTwo(); *library_expected.add_function() = FunctionDefHelper::Create( "F1", {"a_0_arg:float", "b_0_arg:float"}, {"c_0_retval:float"}, {}, { {{"C"}, "UnaryTest", {"a_0_arg"}}, {{"c"}, "BinaryTest", {"b_0_arg", "C:o:0"}, {}, {"C"}}, }, {{"c_0_retval", "c:o:0"}}); { std::unique_ptr<FunctionLibraryDefinition> lib_def( new FunctionLibraryDefinition(OpRegistry::Global(), library_expected)); GraphDefBuilder b2(GraphDefBuilder::kFailImmediately, lib_def.get()); Node* a = Input(b2.opts().WithName("A")); Node* b = Input(b2.opts().WithName("B")); NodeBuilder node_builder("F1", "F1", lib_def.get()); node_builder.Input(a).Input(b); Node* call = b2.opts().FinalizeBuilder(&node_builder); Binary(a, call, b2.opts().WithName("E")); TF_EXPECT_OK(b2.ToGraphDef(&graphdef_expected)); } TF_EXPECT_GRAPH_EQ(graphdef_expected, graphdef); TF_EXPECT_FUNCTIONDEFLIBRARY_EQ(library_expected, library); } TEST(EncapsulateSubgraphsTest, TwoFunctions) { FunctionDefLibrary library; GraphDef graphdef; { *library.add_function() = test::function::XTimesTwo(); GraphDefBuilder b1(GraphDefBuilder::kFailImmediately); Node* a = Input(b1.opts().WithName("A")); Node* b = Input(b1.opts().WithName("B")); Node* control = Input(b1.opts().WithName("Control")); Node* c = Unary(a, b1.opts().WithName("C").WithControlInput(control).WithAttr( "_encapsulate", "F1")); Node* d = Binary(b, c, b1.opts().WithName("D").WithControlInput(control).WithAttr( "_encapsulate", "F2")); Binary(a, d, b1.opts().WithName("E")); TF_EXPECT_OK(b1.ToGraphDef(&graphdef)); } TF_EXPECT_OK(Encapsulate(&graphdef, &library)); FunctionDefLibrary library_expected; GraphDef graphdef_expected; *library_expected.add_function() = test::function::XTimesTwo(); *library_expected.add_function() = FunctionDefHelper::Create( "F1", {"a_0_arg:float"}, {"c_0_retval:float"}, {}, { {{"C"}, "UnaryTest", {"a_0_arg"}}, }, {{"c_0_retval", "C:o:0"}}); *library_expected.add_function() = FunctionDefHelper::Create( "F2", {"b_0_arg:float", "c_0_arg:float"}, {"d_0_retval:float"}, {}, { {{"D"}, "BinaryTest", {"b_0_arg", "c_0_arg"}}, }, {{"d_0_retval", "D:o:0"}}); { std::unique_ptr<FunctionLibraryDefinition> lib_def( new FunctionLibraryDefinition(OpRegistry::Global(), library_expected)); GraphDefBuilder b2(GraphDefBuilder::kFailImmediately, lib_def.get()); Node* a = Input(b2.opts().WithName("A")); Node* b = Input(b2.opts().WithName("B")); Node* control = Input(b2.opts().WithName("Control")); NodeBuilder nb("F1", "F1", lib_def.get()); nb.Input(a).ControlInput(control); Node* call1 = b2.opts().FinalizeBuilder(&nb); NodeBuilder nb2("F2", "F2", lib_def.get()); nb2.Input(b).Input(call1).ControlInput(control); Node* call2 = b2.opts().FinalizeBuilder(&nb2); Binary(a, call2, b2.opts().WithName("E")); TF_EXPECT_OK(b2.ToGraphDef(&graphdef_expected)); } TF_EXPECT_GRAPH_EQ(graphdef_expected, graphdef); TF_EXPECT_FUNCTIONDEFLIBRARY_EQ(library_expected, library); } std::vector<string> GraphNodes(const Graph& graph) { std::vector<string> nodes; for (const auto& node : graph.nodes()) { if (!node->IsSource() && !node->IsSink()) { nodes.push_back(node->name()); } } std::sort(nodes.begin(), nodes.end()); return nodes; } std::vector<std::pair<string, string>> GraphEdges(const Graph& graph) { std::vector<std::pair<string, string>> edges; for (const Edge* edge : graph.edges()) { if (edge->src()->IsSource() || edge->dst()->IsSink()) continue; edges.emplace_back( absl::StrCat(edge->src()->name(), ":", edge->src_output()), absl::StrCat(edge->dst()->name(), ":", edge->dst_input())); } std::sort(edges.begin(), edges.end()); return edges; } TEST(EncapsulateSubgraphsTest, InputDeduplication) { Scope root = Scope::NewRootScope().ExitOnError().WithDevice( "/job:localhost/replica:0/task:0/cpu:0"); auto x = ops::Placeholder(root.WithOpName("x"), DT_FLOAT); auto add1 = ops::Add(root.WithOpName("add1"), x, x); add1.node()->AddAttr("_cluster", "cluster1"); auto add2 = ops::Add(root.WithOpName("add2"), add1, add1); add2.node()->AddAttr("_cluster", "cluster2"); auto out = ops::Mul(root.WithOpName("mul"), add1, add2); Graph graph_before_encapsulation(OpRegistry::Global()); TF_ASSERT_OK(root.ToGraph(&graph_before_encapsulation)); FunctionLibraryDefinition library(OpRegistry::Global(), FunctionDefLibrary()); std::unique_ptr<Graph> graph; TF_ASSERT_OK(EncapsulateSubgraphsInFunctions( "_cluster", graph_before_encapsulation, {}, false, &graph, &library)); std::vector<string> expected_nodes = {"cluster1", "cluster2", "mul", "x"}; EXPECT_EQ(expected_nodes, GraphNodes(*graph)); std::vector<std::pair<string, string>> expected_edges = { {"cluster1:0", "cluster2:0"}, {"cluster1:0", "mul:0"}, {"cluster2:0", "mul:1"}, {"x:0", "cluster1:0"}}; EXPECT_EQ(expected_edges, GraphEdges(*graph)); } const Node* FindNodeByName(const Graph& graph, const string& name) { for (const Node* node : graph.nodes()) { if (node->name() == name) return node; } return nullptr; } bool HasGuaranteeConstAttr(const Node& n) { bool is_guaranteed_constant = false; if (!GetNodeAttr(n.attrs(), "_is_guaranteed_constant", &is_guaranteed_constant) .ok()) { return false; } return is_guaranteed_constant; } TEST(EncapsulateSubgraphsWithGuaranteeConstOpTest, Simple) { Scope root = Scope::NewRootScope().ExitOnError().WithDevice( "/job:localhost/replica:0/task:0/cpu:0"); auto x1 = ops::Placeholder(root.WithOpName("x1"), DT_FLOAT); auto x2 = ops::Placeholder(root.WithOpName("x2"), DT_FLOAT); auto const_guarantee_x2 = ops::GuaranteeConst(root.WithOpName("const_guarantee_x2"), x2); auto const_guarantee_x1 = ops::GuaranteeConst(root.WithOpName("const_guarantee_x1"), x1); auto add1 = ops::Add(root.WithOpName("add1"), const_guarantee_x1, const_guarantee_x2); add1.node()->AddAttr("_encapsulate", "encapsulate1"); Graph graph_before(OpRegistry::Global()); TF_ASSERT_OK(root.ToGraph(&graph_before)); std::unique_ptr<Graph> graph_after; FunctionLibraryDefinition library(OpRegistry::Global(), FunctionDefLibrary()); int guaranteed_consts = 0; TF_ASSERT_OK(EncapsulateSubgraphsInFunctions( "_encapsulate", graph_before, [&guaranteed_consts](const std::vector<OutputTensor>& arg_source_tensors, std::unique_ptr<Graph>* graph_ptr, std::vector<int>* input_permutation, std::vector<int>* output_permutation, NodeDef* call_def) { Graph* graph = graph_ptr->get(); for (const Node* n : graph->nodes()) { if (n->type_string() == "_Arg" && absl::StartsWith(n->name(), "const")) { ++guaranteed_consts; EXPECT_TRUE(HasGuaranteeConstAttr(*n)); } else { EXPECT_FALSE(HasGuaranteeConstAttr(*n)); } } return absl::OkStatus(); }, false, &graph_after, &library)); EXPECT_EQ(2, guaranteed_consts); } TEST(EncapsulateSubgraphsWithGuaranteeConstOpTest, Add) { Scope root = Scope::NewRootScope().ExitOnError().WithDevice( "/job:localhost/replica:0/task:0/cpu:0"); auto x1 = ops::Placeholder(root.WithOpName("x1"), DT_FLOAT); auto x2 = ops::Placeholder(root.WithOpName("x2"), DT_FLOAT); auto const_guarantee_x1 = ops::GuaranteeConst(root.WithOpName("const_guarantee_x1"), x1); auto const_guarantee_x2 = ops::GuaranteeConst(root.WithOpName("const_guarantee_x2"), x2); auto const_guarantee_add1 = ops::Add(root.WithOpName("const_guarantee_add1"), const_guarantee_x1, const_guarantee_x2); auto add2 = ops::Add(root.WithOpName("add2"), const_guarantee_x1, x2); auto mul1 = ops::Mul(root.WithOpName("mul1"), const_guarantee_add1, add2); mul1.node()->AddAttr("_encapsulate", "encapsulate1"); Graph graph_before(OpRegistry::Global()); TF_ASSERT_OK(root.ToGraph(&graph_before)); std::unique_ptr<Graph> graph_after; FunctionLibraryDefinition library(OpRegistry::Global(), FunctionDefLibrary()); int guaranteed_consts = 0; TF_ASSERT_OK(EncapsulateSubgraphsInFunctions( "_encapsulate", graph_before, [&guaranteed_consts](const std::vector<OutputTensor>& arg_source_tensors, std::unique_ptr<Graph>* graph_ptr, std::vector<int>* input_permutation, std::vector<int>* output_permutation, NodeDef* call_def) { Graph* graph = graph_ptr->get(); for (const Node* n : graph->nodes()) { if (n->type_string() == "_Arg" && absl::StartsWith(n->name(), "const")) { ++guaranteed_consts; EXPECT_TRUE(HasGuaranteeConstAttr(*n)); } else { EXPECT_FALSE(HasGuaranteeConstAttr(*n)); } } return absl::OkStat
1,073
cpp
tensorflow/tensorflow
clone_constants_for_better_clustering
tensorflow/compiler/jit/clone_constants_for_better_clustering.cc
tensorflow/compiler/jit/clone_constants_for_better_clustering_test.cc
#ifndef TENSORFLOW_COMPILER_JIT_CLONE_CONSTANTS_FOR_BETTER_CLUSTERING_H_ #define TENSORFLOW_COMPILER_JIT_CLONE_CONSTANTS_FOR_BETTER_CLUSTERING_H_ #include "absl/container/flat_hash_set.h" #include "tensorflow/core/common_runtime/optimization_registry.h" namespace tensorflow { class CloneConstantsForBetterClusteringPass : public GraphOptimizationPass { public: CloneConstantsForBetterClusteringPass() = default; Status Run(const GraphOptimizationPassOptions& options) override; }; } #endif #include "tensorflow/compiler/jit/clone_constants_for_better_clustering.h" #include <string> #include "absl/algorithm/container.h" #include "tensorflow/compiler/jit/xla_cluster_util.h" #include "xla/status_macros.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/tensor.pb.h" #include "tensorflow/core/util/dump_graph.h" namespace tensorflow { using tsl::StatusOr; class CloneConstantsForBetterClusteringPassImpl { public: explicit CloneConstantsForBetterClusteringPassImpl(Graph* graph) : graph_(graph), unique_name_counter_(0) {} Status Run(); private: Status CloneSmallConstantInputs(const absl::flat_hash_set<string>& name_set, Node* n); string GenerateUniqueName(const absl::flat_hash_set<string>& name_set, absl::string_view prefix); absl::StatusOr<Node*> CloneNode(const absl::flat_hash_set<string>& name_set, Node* n); Graph* graph_; int unique_name_counter_; }; string CloneConstantsForBetterClusteringPassImpl::GenerateUniqueName( const absl::flat_hash_set<string>& name_set, absl::string_view prefix) { string candidate; do { candidate = absl::StrCat(prefix, "/clone_", unique_name_counter_++); } while (name_set.contains(candidate)); return candidate; } absl::StatusOr<Node*> CloneConstantsForBetterClusteringPassImpl::CloneNode( const absl::flat_hash_set<string>& name_set, Node* n) { NodeDef new_in_def = n->def(); new_in_def.clear_input(); new_in_def.set_name(GenerateUniqueName(name_set, new_in_def.name())); TF_ASSIGN_OR_RETURN(Node * new_in, graph_->AddNode(new_in_def)); for (const Edge* e : n->in_edges()) { if (e->IsControlEdge()) { graph_->AddControlEdge(e->src(), new_in); } else { graph_->AddEdge(e->src(), e->src_output(), new_in, e->dst_input()); } } new_in->set_assigned_device_name(n->assigned_device_name()); return new_in; } namespace { absl::StatusOr<bool> IsConstantSmall(Node* n) { const TensorProto* proto = nullptr; TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "value", &proto)); int64_t total_elements = 1; for (const auto& dim : proto->tensor_shape().dim()) { if (dim.size() < 0) { return errors::Internal("Unknown dimension size in constant tensor ", n->name()); } total_elements *= dim.size(); } const int kSmallTensorThreshold = 16; return total_elements < kSmallTensorThreshold; } absl::StatusOr<bool> IsSmallConstant(Node* n) { if (!n->IsConstant()) { return false; } return IsConstantSmall(n); } bool IsInPlaceOp(absl::string_view op_name) { return op_name == "InplaceUpdate" || op_name == "InplaceAdd" || op_name == "InplaceSub"; } } Status CloneConstantsForBetterClusteringPassImpl::CloneSmallConstantInputs( const absl::flat_hash_set<string>& name_set, Node* n) { std::vector<const Edge*> in_edges; absl::c_copy(n->in_edges(), std::back_inserter(in_edges)); absl::c_stable_sort(in_edges, [](const Edge* e1, const Edge* e2) { return e1->id() < e2->id(); }); for (const Edge* e : in_edges) { Node* input = e->src(); TF_ASSIGN_OR_RETURN(bool is_small_constant, IsSmallConstant(input)); if (is_small_constant && input->out_edges().size() != 1) { VLOG(2) << "Cloning small constant " << input->name(); TF_ASSIGN_OR_RETURN(Node* const input_cloned, CloneNode(name_set, input)); if (e->IsControlEdge()) { graph_->AddControlEdge(input_cloned, e->dst()); } else { int dst_input = e->dst_input(); TF_RET_CHECK(e->src_output() == 0) << "expected constant to have exactly one non-control output, but " "found output index = " << e->src_output(); graph_->RemoveEdge(e); graph_->AddEdge(input_cloned, 0, n, dst_input); } } } return absl::OkStatus(); } Status CloneConstantsForBetterClusteringPassImpl::Run() { absl::flat_hash_set<string> name_set; absl::c_transform(graph_->nodes(), std::inserter(name_set, name_set.begin()), [](Node* n) { return n->name(); }); std::vector<Node*> nodes; for (Node* n : graph_->nodes()) { if (IsInPlaceOp(n->type_string())) { return absl::OkStatus(); } nodes.push_back(n); } for (Node* n : nodes) { TF_RETURN_IF_ERROR(CloneSmallConstantInputs(name_set, n)); } return absl::OkStatus(); } Status CloneConstantsForBetterClusteringPass::Run( const GraphOptimizationPassOptions& options) { if (GetGlobalJitLevelForGraph(options) == OptimizerOptions::OFF) { return absl::OkStatus(); } Graph* g = options.graph->get(); if (VLOG_IS_ON(1)) { DumpGraphToFile("before_clone_constants_for_better_clustering", *g); } TF_RETURN_IF_ERROR(CloneConstantsForBetterClusteringPassImpl{g}.Run()); if (VLOG_IS_ON(1)) { DumpGraphToFile("after_clone_constants_for_better_clustering", *g); } return absl::OkStatus(); } }
#include "tensorflow/compiler/jit/clone_constants_for_better_clustering.h" #include "tensorflow/cc/framework/ops.h" #include "tensorflow/cc/ops/array_ops.h" #include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/math_ops.h" #include "tensorflow/compiler/jit/node_matchers.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/public/session_options.h" namespace tensorflow { namespace { using ::tensorflow::testing::FindNodeByName; Status CloneConstantsForBetterClustering(const Scope& s, std::unique_ptr<Graph>* result) { auto graph = std::make_unique<Graph>(OpRegistry::Global()); SessionOptions session_options; session_options.config.mutable_graph_options() ->mutable_optimizer_options() ->set_global_jit_level(OptimizerOptions::ON_2); GraphOptimizationPassOptions options; options.graph = &graph; options.session_options = &session_options; GraphConstructorOptions opts; opts.expect_device_spec = true; TF_RETURN_IF_ERROR(s.ToGraph(graph.get(), opts)); CloneConstantsForBetterClusteringPass rewriter; TF_RETURN_IF_ERROR(rewriter.Run(options)); *result = std::move(graph); return absl::OkStatus(); } const char* kCPU = "/job:localhost/replica:0/task:0/device:CPU:0"; const char* kGPU = "/job:localhost/replica:0/task:0/device:GPU:0"; TEST(CloneConstantsForBetterClusteringTest, ScalarConstantPlacedOnGpu) { Scope root = Scope::NewRootScope().ExitOnError(); Scope on_gpu = root.WithAssignedDevice(kGPU).WithDevice(kGPU); Output in = ops::Placeholder(on_gpu.WithOpName("in"), DT_FLOAT); Output c = ops::Const(on_gpu.WithOpName("const"), 1.0f, {}); Output add1 = ops::AddV2(on_gpu.WithOpName("add1"), in, c); Output add2 = ops::AddV2(on_gpu.WithOpName("add2"), add1, c); std::unique_ptr<Graph> result; TF_ASSERT_OK(CloneConstantsForBetterClustering(root, &result)); OutputTensor add1_operand; TF_ASSERT_OK( FindNodeByName(result.get(), "add1")->input_tensor(1, &add1_operand)); OutputTensor add2_operand; TF_ASSERT_OK( FindNodeByName(result.get(), "add2")->input_tensor(1, &add2_operand)); EXPECT_NE(add1_operand.node, add2_operand.node); } TEST(CloneConstantsForBetterClusteringTest, HostConstantPlacedOnCpu) { Scope root = Scope::NewRootScope().ExitOnError(); Scope on_gpu = root.WithAssignedDevice(kGPU).WithDevice(kGPU); Scope on_cpu = root.WithAssignedDevice(kCPU).WithDevice(kCPU); Output in0 = ops::Placeholder(on_gpu.WithOpName("in0"), DT_FLOAT); Output in1 = ops::Placeholder(on_gpu.WithOpName("in1"), DT_FLOAT); Output perm = ops::Const(on_cpu.WithOpName("perm"), {3, 1, 2, 0}); { Output tr0 = ops::Transpose(on_gpu.WithOpName("tr0"), in0, perm); Output tr1 = ops::Transpose(on_gpu.WithOpName("tr1"), in1, perm); } std::unique_ptr<Graph> result; TF_ASSERT_OK(CloneConstantsForBetterClustering(root, &result)); OutputTensor tr0_perm; TF_ASSERT_OK(FindNodeByName(result.get(), "tr0")->input_tensor(1, &tr0_perm)); OutputTensor tr1_perm; TF_ASSERT_OK(FindNodeByName(result.get(), "tr1")->input_tensor(1, &tr1_perm)); EXPECT_NE(tr0_perm.node, tr1_perm.node); } TEST(CloneConstantsForBetterClusteringTest, HostConstantPlacedOnGpu) { Scope root = Scope::NewRootScope().ExitOnError(); Scope on_gpu = root.WithAssignedDevice(kGPU).WithDevice(kGPU); Output in0 = ops::Placeholder(on_gpu.WithOpName("in0"), DT_FLOAT); Output in1 = ops::Placeholder(on_gpu.WithOpName("in1"), DT_FLOAT); Output perm = ops::Const(on_gpu.WithOpName("perm"), {3, 1, 2, 0}); { Output tr0 = ops::Transpose(on_gpu.WithOpName("tr0"), in0, perm); Output tr1 = ops::Transpose(on_gpu.WithOpName("tr1"), in1, perm); } std::unique_ptr<Graph> result; TF_ASSERT_OK(CloneConstantsForBetterClustering(root, &result)); OutputTensor tr0_perm; TF_ASSERT_OK(FindNodeByName(result.get(), "tr0")->input_tensor(1, &tr0_perm)); OutputTensor tr1_perm; TF_ASSERT_OK(FindNodeByName(result.get(), "tr1")->input_tensor(1, &tr1_perm)); EXPECT_NE(tr0_perm.node, tr1_perm.node); } TEST(CloneConstantsForBetterClusteringTest, CloneSmallDeviceConstants) { Scope root = Scope::NewRootScope().ExitOnError(); Scope on_gpu = root.WithAssignedDevice(kGPU).WithDevice(kGPU); Output in0 = ops::Placeholder(on_gpu.WithOpName("in0"), DT_FLOAT); Output in1 = ops::Placeholder(on_gpu.WithOpName("in1"), DT_FLOAT); Output perm_f32 = ops::Const(on_gpu.WithOpName("perm"), {3.0, 1.0, 2.0, 0.0}); Output perm_int0 = ops::Cast(on_gpu.WithOpName("perm_cast_0"), perm_f32, DT_INT32); Output perm_int1 = ops::Cast(on_gpu.WithOpName("perm_cast_1"), perm_f32, DT_INT32); { Output tr0 = ops::Transpose(on_gpu.WithOpName("tr0"), in0, perm_int0); Output tr1 = ops::Transpose(on_gpu.WithOpName("tr1"), in1, perm_int1); } std::unique_ptr<Graph> result; TF_ASSERT_OK(CloneConstantsForBetterClustering(root, &result)); OutputTensor tr0_perm; TF_ASSERT_OK( FindNodeByName(result.get(), "perm_cast_0")->input_tensor(0, &tr0_perm)); OutputTensor tr1_perm; TF_ASSERT_OK( FindNodeByName(result.get(), "perm_cast_1")->input_tensor(0, &tr1_perm)); EXPECT_NE(tr0_perm.node, tr1_perm.node); } TEST(CloneConstantsForBetterClusteringTest, DontCloneLargeConstants) { Scope root = Scope::NewRootScope().ExitOnError(); Scope on_gpu = root.WithAssignedDevice(kGPU).WithDevice(kGPU); Scope on_cpu = root.WithAssignedDevice(kCPU).WithDevice(kCPU); Output in0 = ops::Placeholder(on_gpu.WithOpName("in0"), DT_FLOAT); Output in1 = ops::Placeholder(on_gpu.WithOpName("in1"), DT_FLOAT); Output perm = ops::Const( on_cpu.WithOpName("perm"), {17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0}); { Output tr0 = ops::Transpose(on_gpu.WithOpName("tr0"), in0, perm); Output tr1 = ops::Transpose(on_gpu.WithOpName("tr1"), in1, perm); } std::unique_ptr<Graph> result; TF_ASSERT_OK(CloneConstantsForBetterClustering(root, &result)); OutputTensor tr0_perm; TF_ASSERT_OK(FindNodeByName(result.get(), "tr0")->input_tensor(1, &tr0_perm)); OutputTensor tr1_perm; TF_ASSERT_OK(FindNodeByName(result.get(), "tr1")->input_tensor(1, &tr1_perm)); EXPECT_EQ(tr0_perm.node, tr1_perm.node); } TEST(CloneConstantsForBetterClusteringTest, InplaceOps) { Scope root = Scope::NewRootScope().ExitOnError(); Scope on_gpu = root.WithAssignedDevice(kGPU).WithDevice(kGPU); Scope on_cpu = root.WithAssignedDevice(kCPU).WithDevice(kCPU); Output in0 = ops::Placeholder(on_gpu.WithOpName("in0"), DT_FLOAT); Output in1 = ops::Placeholder(on_gpu.WithOpName("in1"), DT_FLOAT); Output perm = ops::Const(on_cpu.WithOpName("perm"), {3, 1, 2, 0}); { Output tr0 = ops::Transpose(on_gpu.WithOpName("tr0"), in0, perm); Output tr1 = ops::Transpose(on_gpu.WithOpName("tr1"), in1, perm); } Output in_place_add = ops::InplaceAdd(on_cpu.WithOpName("tr0"), perm, ops::Placeholder(on_cpu.WithOpName("i"), DT_INT32), perm); std::unique_ptr<Graph> result; TF_ASSERT_OK(CloneConstantsForBetterClustering(root, &result)); OutputTensor tr0_perm; TF_ASSERT_OK(FindNodeByName(result.get(), "tr0")->input_tensor(1, &tr0_perm)); OutputTensor tr1_perm; TF_ASSERT_OK(FindNodeByName(result.get(), "tr1")->input_tensor(1, &tr1_perm)); EXPECT_EQ(tr0_perm.node, tr1_perm.node); } } }
1,074
cpp
tensorflow/tensorflow
device_compilation_profiler
tensorflow/compiler/jit/device_compilation_profiler.cc
tensorflow/compiler/jit/device_compilation_profiler_test.cc
#ifndef TENSORFLOW_COMPILER_JIT_DEVICE_COMPILATION_PROFILER_H_ #define TENSORFLOW_COMPILER_JIT_DEVICE_COMPILATION_PROFILER_H_ #include <cstdint> #include <string> #include "tensorflow/compiler/jit/xla_compile_util.h" #include "tensorflow/core/framework/attr_value.pb.h" namespace tensorflow { class DeviceCompilationProfiler : public ResourceBase { public: DeviceCompilationProfiler() = default; ~DeviceCompilationProfiler() override; struct ClusterCompileStats { int64_t compile_count = 0; int64_t execution_count = 0; int64_t cumulative_compile_time_us = 0; bool is_megamorphic = false; std::string DebugString() const { return absl::StrCat( "DeviceCompilationProfiler::ClusterCompileStats {compile_count=", compile_count, ", execution_count=", execution_count, ", cumulative_compile_time_us=", cumulative_compile_time_us, ", is_megamorphic=", is_megamorphic, "}"); } }; absl::StatusOr<ClusterCompileStats> GetCompileStats( const NameAttrList& function) const; virtual bool ShouldCompileCluster(const NameAttrList& function, DeviceCompileMode compile_mode, int64_t current_request_count); void RegisterExecution(const NameAttrList& function); virtual Status RegisterCompilation(const NameAttrList& function, int64_t compile_time_us, bool used_persistent_cache); void IncrementOngoingAsyncCompilations(); void DecrementOngoingAsyncCompilations(); int64_t GetNumOngoingAsyncCompilations() const; std::string DebugString() const override; private: mutable mutex mu_; absl::flat_hash_map<std::string, ClusterCompileStats> cluster_compile_stats_ TF_GUARDED_BY(mu_); int64_t num_ongoing_compilations_ TF_GUARDED_BY(mu_) = 0; DeviceCompilationProfiler(const DeviceCompilationProfiler&) = delete; void operator=(const DeviceCompilationProfiler&) = delete; }; } #endif #include "tensorflow/compiler/jit/device_compilation_profiler.h" #include <cstdint> #include <optional> #include <string> #include <utility> #include "absl/strings/str_cat.h" #include "tensorflow/compiler/jit/xla_activity.pb.h" #include "tensorflow/compiler/jit/xla_activity_listener.h" #include "tensorflow/core/framework/attr_value.pb.h" #include "tensorflow/core/framework/metrics.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/status.h" #include "tsl/platform/mutex.h" namespace tensorflow { namespace { bool ShouldBeMegamorphic(int64_t compile_count, int64_t execution_count) { const int64_t kCompileThreshold = 10; const int64_t kMinExecutionsPerCompile = 50; return compile_count > kCompileThreshold && execution_count < kMinExecutionsPerCompile * compile_count; } void RegisterExecutionForCluster( const NameAttrList& function, DeviceCompilationProfiler::ClusterCompileStats* stats) { ++stats->execution_count; if (!stats->is_megamorphic && ShouldBeMegamorphic(stats->compile_count, stats->execution_count)) { VLOG(1) << "Marking " << function.name() << " as megamorphic, compile_count=" << stats->compile_count << " execution_count=" << stats->execution_count; stats->is_megamorphic = true; } } constexpr int64_t kDefaultCompilationThreshold = 2; constexpr int64_t kMaxNumOngoingCompilations = kNumAsyncDeviceCompilerThreads; } DeviceCompilationProfiler::~DeviceCompilationProfiler() { mutex_lock lock(mu_); cluster_compile_stats_.clear(); } absl::StatusOr<DeviceCompilationProfiler::ClusterCompileStats> DeviceCompilationProfiler::GetCompileStats(const NameAttrList& function) const { mutex_lock lock(mu_); if (auto it = cluster_compile_stats_.find(function.name()); it != cluster_compile_stats_.end()) { return it->second; } return errors::NotFound("Couldn't find compilation stats for cluster: ", function.name()); } void DeviceCompilationProfiler::RegisterExecution( const NameAttrList& function) { mutex_lock lock(mu_); auto it = cluster_compile_stats_.emplace(function.name(), ClusterCompileStats{}) .first; RegisterExecutionForCluster(function, &it->second); } Status DeviceCompilationProfiler::RegisterCompilation( const NameAttrList& function, int64_t compile_time_us, bool used_persistent_cache) { metrics::UpdateXlaCompilationTime(compile_time_us); const std::string& function_name = function.name(); mutex_lock lock(mu_); auto it = cluster_compile_stats_.emplace(function.name(), ClusterCompileStats{}) .first; const uint64 compile_time_s = compile_time_us / 1.0e6; it->second.compile_count++; it->second.cumulative_compile_time_us += compile_time_us; VLOG(1) << "Compiled " << function_name << " " << it->second.compile_count << " times, compile time: " << compile_time_us << " us, cumulative: " << it->second.cumulative_compile_time_us << " us (" << tensorflow::strings::HumanReadableElapsedTime(compile_time_s) << " / " << tensorflow::strings::HumanReadableElapsedTime( it->second.cumulative_compile_time_us / 1.0e6) << ")"; XlaJitCompilationActivity jit_compilation_activity; jit_compilation_activity.set_cluster_name(function_name); jit_compilation_activity.set_compile_count(it->second.compile_count); jit_compilation_activity.set_compile_time_us(compile_time_us); jit_compilation_activity.set_cumulative_compile_time_us( it->second.cumulative_compile_time_us); jit_compilation_activity.set_used_persistent_cache(used_persistent_cache); return BroadcastXlaActivity(std::move(jit_compilation_activity)); } bool DeviceCompilationProfiler::ShouldCompileCluster( const NameAttrList& function, DeviceCompileMode compile_mode, int64_t current_request_count) { std::optional<int64_t> compile_threshold; if (compile_mode == DeviceCompileMode::kLazy) { compile_threshold = kDefaultCompilationThreshold; } else if (compile_mode == DeviceCompileMode::kAsync) { compile_threshold = 0; } if (compile_mode == DeviceCompileMode::kStrict) { return true; } mutex_lock lock(mu_); auto [it, cluster_not_found] = cluster_compile_stats_.emplace(function.name(), ClusterCompileStats{}); if (cluster_not_found) { RegisterExecutionForCluster(function, &it->second); } if (it->second.is_megamorphic) { BroadcastOptimizationRemark(XlaOptimizationRemark::MEGAMORPHIC_FUNCTION, function.name()) .IgnoreError(); VLOG(2) << "Not compiling cluster " << function.name() << " because it is megamorphic."; return false; } if (it->second.execution_count == 1) { return true; } if (compile_mode == DeviceCompileMode::kAsync) { if (num_ongoing_compilations_ >= kMaxNumOngoingCompilations) { VLOG(2) << "Not asynchronously compiling cluster " << function.name() << " because of too many ongoing compilations."; return false; } } bool reached_compile_threshold = current_request_count >= *compile_threshold; if (!reached_compile_threshold) { VLOG(2) << "Not compiling cluster " << function.name() << " because it has not reached compile threshold; threshold is " << *compile_threshold << " execution count " << current_request_count << "."; } return reached_compile_threshold; } void DeviceCompilationProfiler::IncrementOngoingAsyncCompilations() { mutex_lock lock(mu_); num_ongoing_compilations_++; } void DeviceCompilationProfiler::DecrementOngoingAsyncCompilations() { mutex_lock lock(mu_); num_ongoing_compilations_--; } int64_t DeviceCompilationProfiler::GetNumOngoingAsyncCompilations() const { mutex_lock lock(mu_); return num_ongoing_compilations_; } std::string DeviceCompilationProfiler::DebugString() const { std::string debug_string = "DeviceCompilationProfiler {\ncluster_compile_stats: {\n"; { mutex_lock lock(mu_); for (const auto& [key, stats] : cluster_compile_stats_) { absl::StrAppend(&debug_string, key, ": ", stats.DebugString(), "\n"); } } absl::StrAppend(&debug_string, "}\nnum_ongoing_compilations=", GetNumOngoingAsyncCompilations(), "\n}\n"); return debug_string; } }
#include "tensorflow/compiler/jit/device_compilation_profiler.h" #include <memory> #include <utility> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/compiler/jit/tests/device_compiler_test_helper.h" #include "tensorflow/compiler/jit/xla_activity.pb.h" #include "tensorflow/core/framework/attr_value.pb.h" namespace tensorflow { namespace { TEST(DeviceCompilationProfilerTest, RegisterExecution) { DeviceCompilationProfiler* profiler = new DeviceCompilationProfiler(); core::ScopedUnref profiler_ref(profiler); NameAttrList function; function.set_name("TestFunc"); for (int i = 0; i < 5; ++i) { profiler->RegisterExecution(function); } TF_ASSERT_OK_AND_ASSIGN(auto stats, profiler->GetCompileStats(function)); EXPECT_EQ(stats.execution_count, 5); } TEST(DeviceCompilationProfilerTest, RegisterCompilation) { DeviceCompilationProfiler* profiler = new DeviceCompilationProfiler(); core::ScopedUnref profiler_ref(profiler); auto listener = std::make_unique<JitCompilationListener>(); auto listener_ptr = listener.get(); RegisterXlaActivityListener(std::move(listener)); NameAttrList function; function.set_name("TestFunc"); std::vector<XlaJitCompilationActivity> expected_activities; for (int i = 0; i < 5; ++i) { EXPECT_TRUE(profiler->RegisterCompilation(function, 4, false).ok()); TF_ASSERT_OK_AND_ASSIGN(auto stats, profiler->GetCompileStats(function)); XlaJitCompilationActivity expected_activity; expected_activity.set_cluster_name(function.name()); expected_activity.set_compile_count(stats.compile_count); expected_activity.set_compile_time_us(4); expected_activity.set_cumulative_compile_time_us( stats.cumulative_compile_time_us); expected_activity.set_used_persistent_cache(false); expected_activities.push_back(expected_activity); } TF_ASSERT_OK_AND_ASSIGN(auto stats, profiler->GetCompileStats(function)); EXPECT_EQ(stats.compile_count, 5); EXPECT_EQ(stats.cumulative_compile_time_us, 5 * 4); const auto& actual_activities = listener_ptr->GetListenerHistory(); EXPECT_EQ(actual_activities.size(), expected_activities.size()); for (size_t i = 0; i < actual_activities.size(); ++i) { EXPECT_EQ(actual_activities[i].SerializeAsString(), expected_activities[i].SerializeAsString()); } } TEST(DeviceCompilationProfilerTest, OngoingAsyncCompilations) { DeviceCompilationProfiler* profiler = new DeviceCompilationProfiler(); core::ScopedUnref profiler_ref(profiler); for (int i = 0; i < 5; ++i) { profiler->IncrementOngoingAsyncCompilations(); } EXPECT_EQ(profiler->GetNumOngoingAsyncCompilations(), 5); for (int i = 0; i < 5; ++i) { profiler->DecrementOngoingAsyncCompilations(); } EXPECT_EQ(profiler->GetNumOngoingAsyncCompilations(), 0); for (int i = 0; i < 5; ++i) { profiler->IncrementOngoingAsyncCompilations(); profiler->DecrementOngoingAsyncCompilations(); } EXPECT_EQ(profiler->GetNumOngoingAsyncCompilations(), 0); } TEST(DeviceCompilationProfilerTest, ShouldCompileClusterNotFound) { DeviceCompilationProfiler* profiler = new DeviceCompilationProfiler(); core::ScopedUnref profiler_ref(profiler); NameAttrList function; function.set_name("TestFunc"); EXPECT_TRUE( profiler->ShouldCompileCluster(function, DeviceCompileMode::kAsync, 0)); EXPECT_TRUE( profiler->ShouldCompileCluster(function, DeviceCompileMode::kLazy, 0)); EXPECT_TRUE( profiler->ShouldCompileCluster(function, DeviceCompileMode::kStrict, 0)); } TEST(DeviceCompilationProfilerTest, ShouldCompileClusterFirstExecution) { DeviceCompilationProfiler* profiler = new DeviceCompilationProfiler(); core::ScopedUnref profiler_ref(profiler); NameAttrList function; function.set_name("TestFunc"); profiler->RegisterExecution(function); EXPECT_TRUE( profiler->ShouldCompileCluster(function, DeviceCompileMode::kAsync, 0)); EXPECT_TRUE( profiler->ShouldCompileCluster(function, DeviceCompileMode::kLazy, 0)); } TEST(DeviceCompilationProfilerTest, ShouldCompileClusterMegamorphic) { DeviceCompilationProfiler* profiler = new DeviceCompilationProfiler(); core::ScopedUnref profiler_ref(profiler); NameAttrList function; function.set_name("TestFunc"); const int64_t kCompileThreshold = 10; const int64_t kMinExecutionsPerCompile = 50; for (int i = 0; i < kCompileThreshold + 1; ++i) { EXPECT_TRUE(profiler->RegisterCompilation(function, 1, false).ok()); } profiler->RegisterExecution(function); EXPECT_FALSE( profiler->ShouldCompileCluster(function, DeviceCompileMode::kAsync, 0)); EXPECT_FALSE( profiler->ShouldCompileCluster(function, DeviceCompileMode::kLazy, 0)); TF_ASSERT_OK_AND_ASSIGN(auto stats, profiler->GetCompileStats(function)); EXPECT_TRUE(stats.is_megamorphic); EXPECT_TRUE( profiler->ShouldCompileCluster(function, DeviceCompileMode::kStrict, 0)); for (int i = 0; i < kCompileThreshold * kMinExecutionsPerCompile + 1; ++i) { profiler->RegisterExecution(function); } EXPECT_FALSE( profiler->ShouldCompileCluster(function, DeviceCompileMode::kAsync, 0)); EXPECT_FALSE( profiler->ShouldCompileCluster(function, DeviceCompileMode::kLazy, 0)); TF_ASSERT_OK_AND_ASSIGN(stats, profiler->GetCompileStats(function)); EXPECT_TRUE(stats.is_megamorphic); EXPECT_TRUE( profiler->ShouldCompileCluster(function, DeviceCompileMode::kStrict, 0)); } TEST(DeviceCompilationProfilerTest, ShouldCompileClusterAsync) { DeviceCompilationProfiler* profiler = new DeviceCompilationProfiler(); core::ScopedUnref profiler_ref(profiler); NameAttrList function; function.set_name("TestFunc"); const int64_t kMaxNumOngoingCompilations = 10; for (int i = 0; i < kMaxNumOngoingCompilations; ++i) { profiler->IncrementOngoingAsyncCompilations(); } profiler->RegisterExecution(function); EXPECT_TRUE( profiler->ShouldCompileCluster(function, DeviceCompileMode::kAsync, 0)); profiler->RegisterExecution(function); EXPECT_FALSE( profiler->ShouldCompileCluster(function, DeviceCompileMode::kAsync, 0)); profiler->DecrementOngoingAsyncCompilations(); EXPECT_TRUE( profiler->ShouldCompileCluster(function, DeviceCompileMode::kAsync, 0)); } TEST(DeviceCompilationProfilerTest, ShouldCompileClusterLazy) { DeviceCompilationProfiler* profiler = new DeviceCompilationProfiler(); core::ScopedUnref profiler_ref(profiler); NameAttrList function; function.set_name("TestFunc"); constexpr int64_t kDefaultCompilationThreshold = 2; profiler->RegisterExecution(function); EXPECT_TRUE( profiler->ShouldCompileCluster(function, DeviceCompileMode::kLazy, 0)); profiler->RegisterExecution(function); for (int current_request_count = 0; current_request_count < kDefaultCompilationThreshold; ++current_request_count) { EXPECT_FALSE(profiler->ShouldCompileCluster( function, DeviceCompileMode::kLazy, current_request_count)); } EXPECT_TRUE(profiler->ShouldCompileCluster(function, DeviceCompileMode::kLazy, kDefaultCompilationThreshold)); } } }
1,075
cpp
tensorflow/tensorflow
cluster_scoping_pass
tensorflow/compiler/jit/cluster_scoping_pass.cc
tensorflow/compiler/jit/cluster_scoping_pass_test.cc
#ifndef TENSORFLOW_COMPILER_JIT_CLUSTER_SCOPING_PASS_H_ #define TENSORFLOW_COMPILER_JIT_CLUSTER_SCOPING_PASS_H_ #include "tensorflow/core/common_runtime/optimization_registry.h" namespace tensorflow { class ClusterScopingPass : public GraphOptimizationPass { public: Status Run(const GraphOptimizationPassOptions& options) override; }; } #endif #include "tensorflow/compiler/jit/cluster_scoping_pass.h" #include "absl/algorithm/container.h" #include "absl/container/flat_hash_set.h" #include "absl/strings/str_cat.h" #include "tensorflow/compiler/jit/defs.h" #include "tensorflow/compiler/jit/xla_cluster_util.h" #include "tensorflow/core/framework/node_def_util.h" #include "tensorflow/core/graph/algorithm.h" namespace tensorflow { namespace { class ClusterScopingPassImpl { public: ClusterScopingPassImpl(Graph* graph, OptimizerOptions::GlobalJitLevel global_jit_level) : graph_(graph), global_jit_level_(global_jit_level), unique_scope_id_(0) {} Status Run(); private: Status ScopingForPipelineStages(); size_t GetUniqueScopeId() { return unique_scope_id_++; } void AddScopeToAllTransitivePredecessors(Node* start); void AddScopeToAllTransitiveSuccessors(Node* start); private: Graph* graph_; OptimizerOptions::GlobalJitLevel global_jit_level_; size_t unique_scope_id_; }; std::optional<string> GetXlaInternalScope(Node* node) { string scope; if (GetNodeAttr(node->attrs(), kXlaInternalScopeAttr, &scope).ok()) { return scope; } return std::nullopt; } void SetXlaInternalScope(Node* node, StringPiece scope) { node->AddAttr(kXlaInternalScopeAttr, scope); } void AddOrAppendXlaInternalScope(Node* node, absl::string_view suffix) { string updated_scope; std::optional<string> cur_scope = GetXlaInternalScope(node); if (cur_scope == std::nullopt) { updated_scope = std::string(suffix); } else { updated_scope = absl::StrCat(cur_scope.value(), "&", suffix); } SetXlaInternalScope(node, updated_scope); } void ClusterScopingPassImpl::AddScopeToAllTransitivePredecessors(Node* start) { const string unique_suffix = absl::StrCat("_", GetUniqueScopeId()); std::vector<Node*> starts; starts.push_back(start); auto enter = [&](Node* n) { AddOrAppendXlaInternalScope(n, unique_suffix); }; ReverseDFSFrom(*graph_, starts, enter, nullptr, NodeComparatorName()); } void ClusterScopingPassImpl::AddScopeToAllTransitiveSuccessors(Node* start) { const string unique_suffix = absl::StrCat("_", GetUniqueScopeId()); std::vector<Node*> starts; starts.push_back(start); auto enter = [&](Node* n) { AddOrAppendXlaInternalScope(n, unique_suffix); }; DFSFrom(*graph_, starts, enter, nullptr, NodeComparatorName(), nullptr); } Status ClusterScopingPassImpl::ScopingForPipelineStages() { for (Node* n : graph_->nodes()) { DCHECK(n); if (n->type_string() == "Unstage") { AddScopeToAllTransitiveSuccessors(n); } if (n->type_string() == "Stage") { AddScopeToAllTransitivePredecessors(n); } } return absl::OkStatus(); } Status ClusterScopingPassImpl::Run() { if (global_jit_level_ == OptimizerOptions::OFF) { return absl::OkStatus(); } return ScopingForPipelineStages(); } } Status ClusterScopingPass::Run(const GraphOptimizationPassOptions& options) { Graph* graph = options.graph->get(); return ClusterScopingPassImpl{graph, GetGlobalJitLevelForGraph(options)} .Run(); } }
#include "tensorflow/compiler/jit/cluster_scoping_pass.h" #include "absl/container/flat_hash_map.h" #include "tensorflow/compiler/jit/defs.h" #include "tensorflow/compiler/jit/test_util.h" #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/common_runtime/graph_def_builder_util.h" #include "tensorflow/core/framework/node_def_util.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/graph/algorithm.h" #include "tensorflow/core/graph/graph_def_builder.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/public/session_options.h" namespace tensorflow { namespace { Status ClusterScoping(std::unique_ptr<Graph>* graph) { FixupSourceAndSinkEdges(graph->get()); GraphOptimizationPassWrapper wrapper; wrapper.session_options.config.mutable_graph_options() ->mutable_optimizer_options() ->set_global_jit_level(OptimizerOptions::ON_2); GraphOptimizationPassOptions opt_options = wrapper.CreateGraphOptimizationPassOptions(graph); ClusterScopingPass pass; return pass.Run(opt_options); } absl::flat_hash_map<string, string> GetXlaInternalScopes(const Graph& graph) { absl::flat_hash_map<string, string> scopes; for (Node* node : graph.nodes()) { string scope; if (GetNodeAttr(node->attrs(), kXlaInternalScopeAttr, &scope).ok()) { scopes[node->name()] = scope; } } if (VLOG_IS_ON(2)) { VLOG(2) << "_XlaInternalScopes:"; for (const auto& p : scopes) { VLOG(2) << " " << p.first << " -> " << p.second; } } return scopes; } Node* BuildStageNode(GraphDefBuilder& builder, string name, std::initializer_list<DataType> dtypes, absl::Span<const ops::NodeOut> values) { auto opts = builder.opts() .WithName(std::move(name)) .WithAttr("dtypes", std::move(dtypes)); if (opts.HaveError()) { return nullptr; } NodeBuilder node_builder(name, "Stage", opts.op_registry()); node_builder.Input(values); return opts.FinalizeBuilder(&node_builder); } TEST(XlaCompilationTest, StagePipelinePreserved) { std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); { GraphDefBuilder builder(GraphDefBuilder::kFailImmediately); Node* a = ops::SourceOp("Const", builder.opts() .WithName("a") .WithAttr("dtype", DT_FLOAT) .WithAttr("value", Tensor())); Node* b = ops::SourceOp("Const", builder.opts() .WithName("b") .WithAttr("dtype", DT_FLOAT) .WithAttr("value", Tensor())); Node* unstage = ops::SourceOp( "Unstage", builder.opts().WithName("unstage").WithAttr("dtypes", {DT_FLOAT})); Node* add0 = ops::BinaryOp("Add", a, b, builder.opts().WithName("add0")); Node* add1 = ops::BinaryOp("Add", unstage, b, builder.opts().WithName("add1")); Node* relu0 = ops::UnaryOp("Relu", add0, builder.opts().WithName("relu0")); ops::UnaryOp("Relu", add1, builder.opts().WithName("relu1")); BuildStageNode(builder, "stage", {DT_FLOAT}, {relu0}); TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get())); } TF_ASSERT_OK(ClusterScoping(&graph)); auto scopes = GetXlaInternalScopes(*graph); EXPECT_NE(scopes["add0"], scopes["add1"]); EXPECT_EQ(scopes["add0"], scopes["relu0"]); EXPECT_EQ(scopes["add1"], scopes["relu1"]); } TEST(XlaCompilationTest, StagePipelinePreservedAndInitialScopesRespected) { std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); { GraphDefBuilder builder(GraphDefBuilder::kFailImmediately); Node* a = ops::SourceOp("Const", builder.opts() .WithName("a") .WithAttr("dtype", DT_FLOAT) .WithAttr("value", Tensor())); Node* b = ops::SourceOp("Const", builder.opts() .WithName("b") .WithAttr("dtype", DT_FLOAT) .WithAttr("value", Tensor())); Node* unstage = ops::SourceOp( "Unstage", builder.opts().WithName("unstage").WithAttr("dtypes", {DT_FLOAT})); Node* add0 = ops::BinaryOp("Add", a, b, builder.opts().WithName("add0").WithAttr( kXlaInternalScopeAttr, "ClusterA")); Node* add1 = ops::BinaryOp("Add", unstage, b, builder.opts().WithName("add1").WithAttr( kXlaInternalScopeAttr, "ClusterA")); Node* relu0 = ops::UnaryOp("Relu", add0, builder.opts().WithName("relu0").WithAttr( kXlaInternalScopeAttr, "ClusterB")); ops::UnaryOp("Relu", add1, builder.opts().WithName("relu1").WithAttr( kXlaInternalScopeAttr, "ClusterD")); BuildStageNode(builder, "stage", {DT_FLOAT}, {relu0}); TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get())); } TF_ASSERT_OK(ClusterScoping(&graph)); auto scopes = GetXlaInternalScopes(*graph); EXPECT_NE(scopes["add0"], scopes["add1"]); EXPECT_NE(scopes["add0"], scopes["relu0"]); EXPECT_NE(scopes["add1"], scopes["relu1"]); } } }
1,076
cpp
tensorflow/tensorflow
device_util
tensorflow/compiler/mlir/tensorflow/utils/device_util.cc
tensorflow/compiler/mlir/tensorflow/utils/device_util_test.cc
#ifndef XLA_DEVICE_UTIL_H_ #define XLA_DEVICE_UTIL_H_ #include <string> #include "absl/strings/str_cat.h" #include "xla/stream_executor/stream_executor.h" #include "xla/types.h" namespace xla { inline std::string DeviceIdentifier(se::StreamExecutor* stream_exec) { return absl::StrCat(stream_exec->GetPlatform()->Name(), ":", stream_exec->device_ordinal()); } } #endif #include "tensorflow/compiler/mlir/tensorflow/utils/device_util.h" #include <string> #include "absl/strings/string_view.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/Error.h" #include "llvm/Support/FormatVariadic.h" #include "llvm/Support/Regex.h" #include "mlir/IR/Attributes.h" #include "mlir/IR/Builders.h" #include "mlir/IR/Diagnostics.h" #include "mlir/IR/Location.h" #include "mlir/IR/Operation.h" #include "mlir/Support/LLVM.h" #include "mlir/Support/LogicalResult.h" #include "tensorflow/core/common_runtime/device.h" #include "tensorflow/core/common_runtime/device_set.h" #include "tensorflow/core/util/device_name_utils.h" namespace tensorflow { constexpr char kDevicesAttr[] = "tf.devices"; namespace { mlir::Attribute ParseGpuDeviceMetadata(const Device& device, mlir::Builder* builder) { static auto* r = new llvm::Regex("compute capability: ([0-9]+)\\.([0-9]+)"); llvm::SmallVector<llvm::StringRef, 3> cc; if (r->match(device.attributes().physical_device_desc(), &cc)) { return mlir::TF::GpuDeviceMetadata::get( builder->getContext(), std::stoi(cc[1].str()), std::stoi(cc[2].str())); } return builder->getUnitAttr(); } mlir::LogicalResult GetDevicesFromOp(mlir::Operation* op, mlir::ArrayAttr array_attr, mlir::TF::RuntimeDevices* devices) { DeviceNameUtils::ParsedName device; for (const auto& kv : llvm::enumerate(array_attr)) { const int idx = kv.index(); auto string_attr = mlir::dyn_cast<mlir::StringAttr>(kv.value()); if (!string_attr) return op->emitOpError(llvm::formatv( "bad '{0}' attribute at index {1}, not a string", kDevicesAttr, idx)); if (DeviceNameUtils::ParseFullName(string_attr.getValue().str(), &device)) { devices->AddDevice(device); } else { return op->emitOpError( llvm::formatv("bad '{0}' attribute, '{1}', not a valid device", kDevicesAttr, string_attr.getValue())); } } return mlir::success(); } mlir::LogicalResult GetDevicesFromOp(mlir::Operation* op, mlir::DictionaryAttr dict_attr, mlir::TF::RuntimeDevices* devices) { DeviceNameUtils::ParsedName device; for (auto& kv : dict_attr) { const mlir::StringAttr name = kv.getName(); const mlir::Attribute attr = kv.getValue(); if (!DeviceNameUtils::ParseFullName(name.str(), &device)) return op->emitOpError( llvm::formatv("bad '{0}' attribute, '{1}', not a valid device", kDevicesAttr, name.strref())); if (auto gpu_metadata = mlir::dyn_cast<mlir::TF::GpuDeviceMetadata>(attr)) { devices->AddGpuDevice(device, gpu_metadata); } else { devices->AddDevice(device); } } return mlir::success(); } } void AddDevicesToOp(mlir::Operation* op, const DeviceSet* device_set) { if (!device_set) return; mlir::MLIRContext* ctx = op->getContext(); mlir::Builder builder(ctx); llvm::SmallVector<mlir::NamedAttribute, 8> devices; devices.reserve(device_set->devices().size()); for (Device* device : device_set->devices()) { string name = DeviceNameUtils::ParsedNameToString(device->parsed_name()); if (device->device_type() == DEVICE_GPU) { auto metadata = ParseGpuDeviceMetadata(*device, &builder); devices.push_back(builder.getNamedAttr(name, metadata)); } else { auto metadata = builder.getUnitAttr(); devices.push_back(builder.getNamedAttr(name, metadata)); } } op->setAttr(kDevicesAttr, builder.getDictionaryAttr(devices)); } mlir::LogicalResult GetDevicesFromOp(mlir::Operation* op, mlir::TF::RuntimeDevices* devices) { auto devices_attr = op->getAttr(kDevicesAttr); if (!devices_attr) return mlir::success(); if (auto array_attr = mlir::dyn_cast<mlir::ArrayAttr>(devices_attr)) { return GetDevicesFromOp(op, array_attr, devices); } else if (auto dict_attr = mlir::dyn_cast<mlir::DictionaryAttr>(devices_attr)) { return GetDevicesFromOp(op, dict_attr, devices); } return op->emitOpError( llvm::formatv("unsupported '{0}' attribute", kDevicesAttr)); } mlir::LogicalResult GetDeviceOrdinalFromDeviceString(mlir::Location loc, llvm::StringRef device, int64_t* device_ordinal) { DeviceNameUtils::ParsedName parsed_name; if (!DeviceNameUtils::ParseFullName( absl::string_view(device.data(), device.size()), &parsed_name)) return mlir::emitError(loc) << "invalid device '" << device << "'"; if (!parsed_name.has_id) return mlir::emitError(loc) << "device '" << device << "' has no id"; *device_ordinal = parsed_name.id; return mlir::success(); } }
#include "tensorflow/compiler/mlir/tensorflow/utils/device_util.h" #include <memory> #include <string> #include <tuple> #include <utility> #include "llvm/ADT/SmallVector.h" #include "mlir/Dialect/Func/IR/FuncOps.h" #include "mlir/IR/Attributes.h" #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinAttributes.h" #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/Location.h" #include "mlir/IR/MLIRContext.h" #include "mlir/Support/LLVM.h" #include "mlir/Support/LogicalResult.h" #include "tensorflow/core/common_runtime/device.h" #include "tensorflow/core/common_runtime/device_set.h" #include "tensorflow/core/framework/device_attributes.pb.h" #include "tensorflow/core/ir/types/dialect.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/util/device_name_utils.h" namespace tensorflow { namespace { class FakeDevice : public Device { public: explicit FakeDevice(const DeviceAttributes& device_attributes) : Device(nullptr, device_attributes) {} Status Sync() override { return errors::Unimplemented("FakeDevice::Sync()"); } static std::unique_ptr<Device> Make(const string& name, const string& desc = "") { DeviceNameUtils::ParsedName parsed_name; DeviceNameUtils::ParseFullName(name, &parsed_name); DeviceAttributes device_attributes; device_attributes.set_name(name); device_attributes.set_device_type(parsed_name.type); device_attributes.set_physical_device_desc(desc); return std::make_unique<FakeDevice>(device_attributes); } }; TEST(DeviceUtilTest, AddDeviceToOp) { mlir::MLIRContext context; context.loadDialect<mlir::tf_type::TFTypeDialect>(); mlir::OwningOpRef<mlir::ModuleOp> module_ref = mlir::ModuleOp::create(mlir::UnknownLoc::get(&context)); const std::string cpu0 = "/job:worker/replica:0/task:0/device:CPU:0"; const std::string gpu0 = "/job:worker/replica:1/task:2/device:GPU:0"; const std::string gpu1 = "/job:worker/replica:1/task:2/device:GPU:1"; llvm::SmallVector<std::unique_ptr<Device>, 2> devices; devices.push_back(FakeDevice::Make(cpu0)); devices.push_back(FakeDevice::Make(gpu0, "compute capability: 7.0")); devices.push_back(FakeDevice::Make(gpu1)); DeviceSet device_set; for (auto& device : devices) device_set.AddDevice(device.get()); AddDevicesToOp(*module_ref, &device_set); auto devices_attr = (*module_ref)->getAttrOfType<mlir::DictionaryAttr>("tf.devices"); ASSERT_NE(devices_attr, nullptr); ASSERT_EQ(devices_attr.size(), 3); auto device_meta_0 = mlir::dyn_cast<mlir::UnitAttr>(devices_attr.get(cpu0)); ASSERT_NE(device_meta_0, nullptr); auto device_meta_1 = mlir::dyn_cast<mlir::TF::GpuDeviceMetadata>(devices_attr.get(gpu0)); ASSERT_NE(device_meta_1, nullptr); ASSERT_EQ(device_meta_1.getCcMajor(), 7); ASSERT_EQ(device_meta_1.getCcMinor(), 0); auto device_meta_2 = mlir::dyn_cast<mlir::UnitAttr>(devices_attr.get(gpu1)); ASSERT_NE(device_meta_2, nullptr); } TEST(DeviceUtilTest, AddDeviceToOpNullDeviceSet) { mlir::MLIRContext context; mlir::OwningOpRef<mlir::ModuleOp> module_ref = mlir::ModuleOp::create(mlir::UnknownLoc::get(&context)); AddDevicesToOp(*module_ref, nullptr); EXPECT_EQ((*module_ref)->getAttr("tf.devices"), nullptr); } TEST(DeviceUtilTest, GetDevicesFromOpNoDevicesAttribute) { mlir::MLIRContext context; mlir::OwningOpRef<mlir::ModuleOp> module_ref = mlir::ModuleOp::create(mlir::UnknownLoc::get(&context)); mlir::TF::RuntimeDevices devices; EXPECT_TRUE(mlir::succeeded(GetDevicesFromOp(*module_ref, &devices))); } TEST(DeviceUtilTest, GetDevicesFromOpBadDevicesAttributeType) { mlir::MLIRContext context; mlir::OwningOpRef<mlir::ModuleOp> module_ref = mlir::ModuleOp::create(mlir::UnknownLoc::get(&context)); mlir::Builder builder(*module_ref); (*module_ref)->setAttr("tf.devices", builder.getBoolAttr(false)); mlir::TF::RuntimeDevices devices; EXPECT_TRUE(mlir::failed(GetDevicesFromOp(*module_ref, &devices))); } TEST(DeviceUtilTest, GetDevicesFromOpBadDevicesAttributeArraySubtype) { mlir::MLIRContext context; mlir::OwningOpRef<mlir::ModuleOp> module_ref = mlir::ModuleOp::create(mlir::UnknownLoc::get(&context)); mlir::Builder builder(*module_ref); (*module_ref)->setAttr("tf.devices", builder.getI32ArrayAttr({8})); mlir::TF::RuntimeDevices devices; EXPECT_TRUE(mlir::failed(GetDevicesFromOp(*module_ref, &devices))); } TEST(DeviceUtilTest, GetDevicesFromOpBadDevicesInDevicesAttribute) { mlir::MLIRContext context; mlir::OwningOpRef<mlir::ModuleOp> module_ref = mlir::ModuleOp::create(mlir::UnknownLoc::get(&context)); mlir::Builder builder(*module_ref); (*module_ref) ->setAttr("tf.devices", builder.getDictionaryAttr(builder.getNamedAttr( "bad_device", builder.getDictionaryAttr({})))); mlir::TF::RuntimeDevices devices; EXPECT_TRUE(mlir::failed(GetDevicesFromOp(*module_ref, &devices))); } TEST(DeviceUtilTest, GetDevicesFromOpValidDeviceInDevicesAttribute) { mlir::MLIRContext context; mlir::OwningOpRef<mlir::ModuleOp> module_ref = mlir::ModuleOp::create(mlir::UnknownLoc::get(&context)); mlir::Builder builder(*module_ref); auto device_dict = builder.getDictionaryAttr( {builder.getNamedAttr("/job:worker/replica:0/task:0/device:CPU:0", builder.getDictionaryAttr({}))}); (*module_ref)->setAttr("tf.devices", device_dict); mlir::TF::RuntimeDevices devices; EXPECT_TRUE(mlir::succeeded(GetDevicesFromOp(*module_ref, &devices))); ASSERT_EQ(devices.NumDevices(), 1); ASSERT_EQ(devices.device_names().size(), 1); ASSERT_EQ(DeviceNameUtils::ParsedNameToString(devices.device_names()[0]), "/job:worker/replica:0/task:0/device:CPU:0"); } TEST(DeviceUtilTest, GetGpuDeviceMetadata) { mlir::MLIRContext context; context.loadDialect<mlir::tf_type::TFTypeDialect>(); mlir::OwningOpRef<mlir::ModuleOp> module_ref = mlir::ModuleOp::create(mlir::UnknownLoc::get(&context)); mlir::Builder builder(*module_ref); const std::string gpu0 = "/job:worker/replica:0/task:0/device:GPU:0"; const std::string gpu1 = "/job:worker/replica:0/task:0/device:GPU:1"; llvm::SmallVector<mlir::NamedAttribute, 2> metadata; metadata.push_back(builder.getNamedAttr( gpu0, mlir::TF::GpuDeviceMetadata::get(module_ref->getContext(), 1, 2))); (*module_ref)->setAttr("tf.devices", builder.getDictionaryAttr(metadata)); mlir::TF::RuntimeDevices devices; EXPECT_TRUE(mlir::succeeded(GetDevicesFromOp(*module_ref, &devices))); DeviceNameUtils::ParsedName parsed_name; DeviceNameUtils::ParseFullName(gpu0, &parsed_name); auto meta_0 = devices.GetGpuDeviceMetadata(parsed_name); ASSERT_TRUE(meta_0.has_value()); ASSERT_EQ(meta_0->getCcMajor(), 1); ASSERT_EQ(meta_0->getCcMinor(), 2); DeviceNameUtils::ParseFullName(gpu1, &parsed_name); auto meta_1 = devices.GetGpuDeviceMetadata(parsed_name); ASSERT_FALSE(meta_1.has_value()); } TEST(DeviceUtilTest, GetDeviceOrdinalFromDeviceString) { const std::string tpu0 = "/job:worker/replica:0/task:0/device:TPU:0"; const std::string tpu1 = "/job:worker/replica:0/task:0/device:TPU:1"; mlir::MLIRContext context; auto unknown_loc = mlir::UnknownLoc::get(&context); int64_t device_ordinal0 = -1; mlir::LogicalResult result0 = GetDeviceOrdinalFromDeviceString(unknown_loc, tpu0, &device_ordinal0); EXPECT_TRUE(mlir::succeeded(result0)); EXPECT_EQ(device_ordinal0, 0); int64_t device_ordinal1 = -1; mlir::LogicalResult result1 = GetDeviceOrdinalFromDeviceString(unknown_loc, tpu1, &device_ordinal1); EXPECT_TRUE(mlir::succeeded(result1)); EXPECT_EQ(device_ordinal1, 1); } TEST(DeviceUtilTest, GetDeviceOrdinalFromDeviceStringInvalid) { mlir::MLIRContext context; auto unknown_loc = mlir::UnknownLoc::get(&context); int64_t device_ordinal = -1; mlir::LogicalResult result = GetDeviceOrdinalFromDeviceString( unknown_loc, "bad_device", &device_ordinal); EXPECT_TRUE(mlir::failed(result)); } TEST(DeviceUtilTest, GetDeviceOrdinalFromDeviceStringNoId) { const std::string tpu_no_id = "/job:worker/replica:0/task:0/device:TPU"; mlir::MLIRContext context; auto unknown_loc = mlir::UnknownLoc::get(&context); int64_t device_ordinal = -1; mlir::LogicalResult result = GetDeviceOrdinalFromDeviceString(unknown_loc, tpu_no_id, &device_ordinal); EXPECT_TRUE(mlir::failed(result)); } } }
1,077
cpp
tensorflow/tensorflow
xla_compiler_options_util
tensorflow/compiler/jit/xla_compiler_options_util.cc
tensorflow/compiler/jit/xla_compiler_options_util_test.cc
#ifndef TENSORFLOW_COMPILER_JIT_XLA_COMPILER_OPTIONS_UTIL_H_ #define TENSORFLOW_COMPILER_JIT_XLA_COMPILER_OPTIONS_UTIL_H_ #include "tensorflow/compiler/jit/device_compiler.h" #include "tensorflow/compiler/jit/xla_platform_info.h" #include "tensorflow/compiler/tf2xla/xla_compiler.h" #include "xla/client/local_client.h" #include "xla/pjrt/pjrt_client.h" namespace tensorflow { XlaCompiler::Options GenerateCompilerOptions( const DeviceCompiler<xla::LocalExecutable, xla::LocalClient>& xla_device_compiler, const FunctionLibraryRuntime& function_library, DeviceBase* device, se::Stream* stream, const XlaPlatformInfo& platform_info, bool has_ref_vars); XlaCompiler::Options GenerateCompilerOptionsForTfrtTpu( const DeviceCompiler<xla::LocalExecutable, xla::LocalClient>& xla_device_compiler, const FunctionLibraryRuntime& function_library); XlaCompiler::Options GenerateCompilerOptionsForPjRt( const FunctionLibraryRuntime& function_library, const DeviceBase* device_base, const XlaPlatformInfo& platform_info, const DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>* pjrt_device_compiler); XlaCompiler::Options GenerateCompilerOptionsForPjRt( const FunctionLibraryDefinition* function_library_def, int graph_def_version, const DeviceBase* device_base, const XlaPlatformInfo& platform_info, const DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>* pjrt_device_compiler); XlaCompiler::CompileOptions GenerateCompileOptions( bool has_ref_vars, bool may_alias_resource_update); } #endif #include "tensorflow/compiler/jit/xla_compiler_options_util.h" #include "xla/pjrt/pjrt_client.h" #include "xla/tsl/framework/device_id_utils.h" #include "tensorflow/core/framework/function.h" namespace tensorflow { namespace { using XlaDeviceCompiler = DeviceCompiler<xla::LocalExecutable, xla::LocalClient>; using PjRtDeviceCompiler = DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>; inline void LogOptions(const XlaCompiler::Options& options) { VLOG(2) << "XlaCompiler::Options[device_type=" << options.device_type << ",device_ordinal=" << options.device_ordinal << ",client=" << options.client << ",flib_def=" << options.flib_def << ",graph_def_version=" << options.graph_def_version << ",options.shape_determination_fns.layout_preference_fn?=" << (options.shape_determination_fns.layout_preference_fn != nullptr) << ",options.shape_determination_fns.shape_representation_fn?=" << (options.shape_determination_fns.shape_representation_fn != nullptr) << ",allow_cpu_custom_calls=" << options.allow_cpu_custom_calls << ",populate_resource_manager=" << options.populate_resource_manager << ",alias_passthrough_params=" << options.alias_passthrough_params << ",detailed_logging=" << options.detailed_logging << "]"; } } XlaCompiler::Options GenerateCompilerOptions( const XlaDeviceCompiler& xla_device_compiler, const FunctionLibraryRuntime& function_library, DeviceBase* device, se::Stream* stream, const XlaPlatformInfo& platform_info, bool has_ref_vars) { XlaCompiler::Options options; options.client = static_cast<xla::LocalClient*>(xla_device_compiler.client()); if (stream != nullptr) { options.device_ordinal = stream->parent()->device_ordinal(); } options.device_type = xla_device_compiler.device_type(); options.flib_def = function_library.GetFunctionLibraryDefinition(); options.graph_def_version = function_library.graph_def_version(); options.allow_cpu_custom_calls = (platform_info.platform_id() == se::host::kHostPlatformId); options.device_allocator = GetAllocator(device, stream, platform_info); if (platform_info.xla_device_metadata()) { options.shape_determination_fns = platform_info.xla_device_metadata()->default_shape_determination_fns(); } options.alias_passthrough_params = !has_ref_vars && !platform_info.is_on_xla_device(); LogOptions(options); return options; } XlaCompiler::Options GenerateCompilerOptionsForTfrtTpu( const XlaDeviceCompiler& xla_device_compiler, const FunctionLibraryRuntime& function_library) { XlaCompiler::Options options; options.device_type = xla_device_compiler.device_type(); options.flib_def = function_library.GetFunctionLibraryDefinition(); options.graph_def_version = function_library.graph_def_version(); options.allow_cpu_custom_calls = false; options.alias_passthrough_params = false; return options; } XlaCompiler::Options GenerateCompilerOptionsForPjRt( const FunctionLibraryRuntime& function_library, const DeviceBase* device_base, const XlaPlatformInfo& platform_info, const DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>* pjrt_device_compiler) { return GenerateCompilerOptionsForPjRt( function_library.GetFunctionLibraryDefinition(), function_library.graph_def_version(), device_base, platform_info, pjrt_device_compiler); } XlaCompiler::Options GenerateCompilerOptionsForPjRt( const FunctionLibraryDefinition* function_library_def, int graph_def_version, const DeviceBase* device_base, const XlaPlatformInfo& platform_info, const PjRtDeviceCompiler* pjrt_device_compiler) { XlaCompiler::Options options; absl::StatusOr<int> platform_device_id = tsl::GetPlatformDeviceIdFromDeviceParsedName( device_base->parsed_name(), DeviceType(tensorflow::down_cast<const Device*>(device_base) ->device_type())); if (platform_device_id.ok()) { options.device_ordinal = *platform_device_id; } else { options.device_ordinal = device_base->parsed_name().id; } options.flib_def = function_library_def; options.graph_def_version = graph_def_version; if (const auto* metadata = platform_info.xla_device_metadata(); metadata != nullptr) { options.device_type = metadata->jit_device_type(); options.shape_determination_fns = metadata->default_shape_determination_fns(); } else if (const auto* metadata = platform_info.pjrt_device_metadata(); metadata != nullptr) { options.device_type = metadata->jit_device_type(); options.shape_determination_fns = metadata->default_shape_determination_fns(); } else if (pjrt_device_compiler != nullptr) { options.device_type = pjrt_device_compiler->device_type(); } options.allow_cpu_custom_calls = false; options.alias_passthrough_params = false; LogOptions(options); return options; } XlaCompiler::CompileOptions GenerateCompileOptions( bool has_ref_vars, bool may_alias_resource_update) { XlaCompiler::CompileOptions compile_options; compile_options.is_entry_computation = true; compile_options.always_return_tuple = false; compile_options.alias_resource_update = !has_ref_vars && may_alias_resource_update; return compile_options; } }
#include "tensorflow/compiler/jit/xla_compiler_options_util.h" #include <memory> #include <optional> #include <string> #include <utility> #include <vector> #include <gtest/gtest.h> #include "tensorflow/compiler/jit/flags.h" #include "tensorflow/compiler/jit/pjrt_device_compiler_client.h" #include "tensorflow/compiler/jit/test_util.h" #include "tensorflow/compiler/jit/xla_platform_info.h" #include "tensorflow/compiler/tf2xla/xla_compiler.h" #include "tensorflow/compiler/tf2xla/xla_helpers.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/client/client_library.h" #include "xla/pjrt/pjrt_client.h" #include "xla/xla_data.pb.h" #include "tensorflow/core/framework/device.h" #include "tensorflow/core/framework/device_base.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/tpu/tpu_defs.h" namespace tensorflow { namespace { using XlaDeviceCompiler = DeviceCompiler<xla::LocalExecutable, xla::LocalClient>; using XlaDeviceExecutablePersistor = DeviceExecutablePersistor<xla::LocalExecutable, xla::LocalClient>; using PjRtDeviceCompiler = DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>; using PjRtDeviceExecutablePersistor = DeviceExecutablePersistor<xla::PjRtLoadedExecutable, xla::PjRtClient>; XlaDeviceCompiler* CreateXlaDeviceCompiler(DeviceType device_type, xla::LocalClient* local_client) { auto persistor = std::make_unique<XlaDeviceExecutablePersistor>( XlaDeviceExecutablePersistor::Config(), device_type); auto compiler_client = std::make_unique<XlaDeviceCompilerClient>(local_client); return new XlaDeviceCompiler(std::move(persistor), std::move(compiler_client)); } PjRtDeviceCompiler* CreatePjRtDeviceCompiler(DeviceType device_type, xla::PjRtClient* pjrt_client) { auto persistor = std::make_unique<PjRtDeviceExecutablePersistor>( PjRtDeviceExecutablePersistor::Config(), device_type); auto compiler_client = std::make_unique<PjRtDeviceCompilerClient>(pjrt_client); return new PjRtDeviceCompiler(std::move(persistor), std::move(compiler_client)); } std::vector<XlaShapeLayoutHelpers::ShapeDeterminationFns> GetShapeDeterminationFns() { XlaHelpers::ShapeRepresentationFn shape_representation_fn = [](const TensorShape&, DataType, bool, XlaLayoutPreference) { return xla::Shape(); }; XlaShapeLayoutHelpers::LayoutPreferenceFn layout_preference_fn = [](const TensorShape&, DataType, std::optional<XlaArgument::Kind>) { return tensorflow::XlaLayoutPreference::kTpuPreferLinearLayout; }; return {XlaShapeLayoutHelpers::ShapeDeterminationFns{ layout_preference_fn, shape_representation_fn}}; } std::unique_ptr<XlaDevice::Metadata> CreateXlaDeviceMetadata( DeviceType compilation_device_type) { return std::make_unique<XlaDevice::Metadata>( 0, nullptr, compilation_device_type, GetShapeDeterminationFns(), XlaDevice::PaddedShapeFn(), false); } std::unique_ptr<PjRtBaseDevice::Metadata> CreatePjRtDeviceMetadata( DeviceType compilation_device_type) { return std::make_unique<PjRtBaseDevice::Metadata>(compilation_device_type, GetShapeDeterminationFns()); } class XlaCompilerOptionsTest : public ::testing::Test { protected: void SetUp() override { tensorflow::GetXlaDeviceFlags()->tf_xla_enable_xla_devices = true; } DeviceSetup device_setup_; }; TEST_F(XlaCompilerOptionsTest, PjRtOptionsXlaDevice) { device_setup_.AddDevicesAndSetUp({DEVICE_XLA_GPU}); Device* device = device_setup_.GetDevice(DEVICE_XLA_GPU); DeviceType compilation_device_type = DeviceType(DEVICE_GPU_XLA_JIT); se::Platform::Id platform_id = nullptr; auto xla_device_metadata = CreateXlaDeviceMetadata(compilation_device_type); std::shared_ptr<se::DeviceMemoryAllocator> custom_allocator; XlaPlatformInfo platform_info( compilation_device_type, platform_id, xla_device_metadata.get(), nullptr, custom_allocator); XlaCompiler::Options options = GenerateCompilerOptionsForPjRt( *device_setup_.flr(), device, platform_info, nullptr); EXPECT_EQ(options.device_type, compilation_device_type); EXPECT_EQ(options.device_ordinal, 0); EXPECT_NE(options.flib_def, nullptr); EXPECT_EQ(options.graph_def_version, TF_GRAPH_DEF_VERSION); EXPECT_FALSE(options.allow_cpu_custom_calls); EXPECT_FALSE(options.alias_passthrough_params); TF_ASSERT_OK_AND_ASSIGN( auto shape, options.shape_determination_fns.shape_representation_fn( TensorShape(), DT_FLOAT, false, tensorflow::XlaLayoutPreference::kTpuPreferLinearLayout)); EXPECT_EQ(shape, xla::Shape()); EXPECT_EQ(options.shape_determination_fns.layout_preference_fn( TensorShape(), DT_FLOAT, std::nullopt), tensorflow::XlaLayoutPreference::kTpuPreferLinearLayout); } TEST_F(XlaCompilerOptionsTest, PjRtOptionsPjRtBaseDevice) { device_setup_.AddDevicesAndSetUp({DEVICE_CPU}); Device* device = device_setup_.GetDevice(DEVICE_CPU); DeviceType compilation_device_type = DeviceType(DEVICE_CPU_XLA_JIT); auto pjrt_device_metadata = CreatePjRtDeviceMetadata(compilation_device_type); XlaPlatformInfo platform_info( compilation_device_type, nullptr, nullptr, pjrt_device_metadata.get(), nullptr); XlaCompiler::Options options = GenerateCompilerOptionsForPjRt( *device_setup_.flr(), device, platform_info, nullptr); EXPECT_EQ(options.device_type, compilation_device_type); EXPECT_EQ(options.device_ordinal, 0); EXPECT_NE(options.flib_def, nullptr); EXPECT_EQ(options.graph_def_version, TF_GRAPH_DEF_VERSION); EXPECT_FALSE(options.allow_cpu_custom_calls); EXPECT_FALSE(options.alias_passthrough_params); TF_ASSERT_OK_AND_ASSIGN( auto shape, options.shape_determination_fns.shape_representation_fn( TensorShape(), DT_FLOAT, false, tensorflow::XlaLayoutPreference::kTpuPreferLinearLayout)); EXPECT_EQ(shape, xla::Shape()); EXPECT_EQ(options.shape_determination_fns.layout_preference_fn( TensorShape(), DT_FLOAT, std::nullopt), tensorflow::XlaLayoutPreference::kTpuPreferLinearLayout); } TEST_F(XlaCompilerOptionsTest, PjRtOptionsNonXlaDevice) { device_setup_.AddDevicesAndSetUp({DEVICE_CPU}); Device* device = device_setup_.GetDevice(DEVICE_CPU); DeviceType compilation_device_type = DeviceType(DEVICE_CPU_XLA_JIT); XlaPlatformInfo platform_info(compilation_device_type, nullptr, nullptr, nullptr, nullptr); auto pjrt_device_compiler = CreatePjRtDeviceCompiler(compilation_device_type, nullptr); core::ScopedUnref pjrt_device_compiler_ref(pjrt_device_compiler); XlaCompiler::Options options = GenerateCompilerOptionsForPjRt( *device_setup_.flr(), device, platform_info, pjrt_device_compiler); EXPECT_EQ(options.device_type, compilation_device_type); EXPECT_EQ(options.device_ordinal, 0); EXPECT_NE(options.flib_def, nullptr); EXPECT_EQ(options.graph_def_version, TF_GRAPH_DEF_VERSION); EXPECT_FALSE(options.allow_cpu_custom_calls); EXPECT_FALSE(options.alias_passthrough_params); TF_ASSERT_OK_AND_ASSIGN( auto shape, options.shape_determination_fns.shape_representation_fn( TensorShape(), DT_FLOAT, false, tensorflow::XlaLayoutPreference::kNoPreference)); xla::ShapeProto shape_proto; shape_proto.set_element_type(xla::PrimitiveType::F32); shape_proto.mutable_layout(); EXPECT_EQ(shape, xla::Shape(shape_proto)); EXPECT_EQ(options.shape_determination_fns.layout_preference_fn( TensorShape(), DT_FLOAT, std::nullopt), tensorflow::XlaLayoutPreference::kNoPreference); } TEST_F(XlaCompilerOptionsTest, XlaOptions) { device_setup_.AddDevicesAndSetUp({DEVICE_XLA_GPU}); Device* device = device_setup_.GetDevice(DEVICE_XLA_GPU); xla::LocalClient* client = xla::ClientLibrary::LocalClientOrDie(); DeviceType device_type = DeviceType(DEVICE_XLA_GPU); DeviceType compilation_device_type = DeviceType(DEVICE_GPU_XLA_JIT); auto xla_device_compiler = CreateXlaDeviceCompiler(compilation_device_type, client); core::ScopedUnref xla_device_compiler_ref(xla_device_compiler); se::Platform::Id platform_id = se::host::kHostPlatformId; auto xla_device_metadata = CreateXlaDeviceMetadata(compilation_device_type); std::shared_ptr<se::DeviceMemoryAllocator> custom_allocator; XlaPlatformInfo platform_info( device_type, platform_id, xla_device_metadata.get(), nullptr, custom_allocator); XlaCompiler::Options options = GenerateCompilerOptions(*xla_device_compiler, *device_setup_.flr(), device, nullptr, platform_info, false); EXPECT_EQ(options.device_type, compilation_device_type); EXPECT_NE(options.flib_def, nullptr); EXPECT_EQ(options.graph_def_version, TF_GRAPH_DEF_VERSION); EXPECT_TRUE(options.allow_cpu_custom_calls); EXPECT_NE(options.device_allocator, nullptr); EXPECT_FALSE(options.alias_passthrough_params); TF_ASSERT_OK_AND_ASSIGN( auto shape, options.shape_determination_fns.shape_representation_fn( TensorShape(), DT_FLOAT, false, tensorflow::XlaLayoutPreference::kTpuPreferLinearLayout)); EXPECT_EQ(shape, xla::Shape()); EXPECT_EQ(options.shape_determination_fns.layout_preference_fn( TensorShape(), DT_FLOAT, std::nullopt), tensorflow::XlaLayoutPreference::kTpuPreferLinearLayout); } TEST_F(XlaCompilerOptionsTest, XlaOptionsHasRefVarsNoXlaDeviceMetadata) { device_setup_.AddDevicesAndSetUp({DEVICE_CPU}); Device* device = device_setup_.GetDevice(DEVICE_CPU); xla::LocalClient* client = xla::ClientLibrary::LocalClientOrDie(); DeviceType device_type = DeviceType(DEVICE_CPU); DeviceType compilation_device_type = DeviceType(DEVICE_CPU_XLA_JIT); auto xla_device_compiler = CreateXlaDeviceCompiler(compilation_device_type, client); core::ScopedUnref xla_device_compiler_ref(xla_device_compiler); se::Platform::Id platform_id = se::host::kHostPlatformId; std::shared_ptr<se::DeviceMemoryAllocator> custom_allocator; XlaPlatformInfo platform_info( device_type, platform_id, nullptr, nullptr, custom_allocator); XlaCompiler::Options options = GenerateCompilerOptions(*xla_device_compiler, *device_setup_.flr(), device, nullptr, platform_info, false); EXPECT_EQ(options.device_type, compilation_device_type); EXPECT_NE(options.flib_def, nullptr); EXPECT_EQ(options.graph_def_version, TF_GRAPH_DEF_VERSION); EXPECT_TRUE(options.allow_cpu_custom_calls); EXPECT_NE(options.device_allocator, nullptr); EXPECT_TRUE(options.alias_passthrough_params); TF_ASSERT_OK_AND_ASSIGN( auto shape, options.shape_determination_fns.shape_representation_fn( TensorShape(), DT_FLOAT, false, tensorflow::XlaLayoutPreference::kNoPreference)); xla::ShapeProto shape_proto; shape_proto.set_element_type(xla::PrimitiveType::F32); shape_proto.mutable_layout(); EXPECT_EQ(shape, xla::Shape(shape_proto)); EXPECT_EQ(options.shape_determination_fns.layout_preference_fn( TensorShape(), DT_FLOAT, std::nullopt), tensorflow::XlaLayoutPreference::kNoPreference); } TEST_F(XlaCompilerOptionsTest, TfRtTpuOptions) { device_setup_.AddDevicesAndSetUp({DEVICE_TPU_NODE}); xla::LocalClient* client = xla::ClientLibrary::LocalClientOrDie(); DeviceType compilation_device_type = DeviceType(DEVICE_TPU_XLA_JIT); auto xla_device_compiler = CreateXlaDeviceCompiler(compilation_device_type, client); core::ScopedUnref xla_device_compiler_ref(xla_device_compiler); XlaCompiler::Options options = GenerateCompilerOptionsForTfrtTpu( *xla_device_compiler, *device_setup_.flr()); EXPECT_EQ(options.device_type, compilation_device_type); EXPECT_NE(options.flib_def, nullptr); EXPECT_EQ(options.graph_def_version, TF_GRAPH_DEF_VERSION); EXPECT_FALSE(options.allow_cpu_custom_calls); EXPECT_FALSE(options.alias_passthrough_params); } TEST_F(XlaCompilerOptionsTest, GenerateCompileOptions) { XlaCompiler::CompileOptions option1 = GenerateCompileOptions( false, false); EXPECT_TRUE(option1.is_entry_computation); EXPECT_FALSE(option1.always_return_tuple); EXPECT_FALSE(option1.alias_resource_update); XlaCompiler::CompileOptions option2 = GenerateCompileOptions( false, true); EXPECT_TRUE(option2.alias_resource_update); XlaCompiler::CompileOptions option3 = GenerateCompileOptions( true, false); EXPECT_FALSE(option3.alias_resource_update); XlaCompiler::CompileOptions option4 = GenerateCompileOptions( true, true); EXPECT_FALSE(option4.alias_resource_update); } } }
1,078
cpp
tensorflow/tensorflow
force_xla_constants_on_host_pass
tensorflow/compiler/jit/force_xla_constants_on_host_pass.cc
tensorflow/compiler/jit/force_xla_constants_on_host_pass_test.cc
#ifndef TENSORFLOW_COMPILER_JIT_FORCE_XLA_CONSTANTS_ON_HOST_PASS_H_ #define TENSORFLOW_COMPILER_JIT_FORCE_XLA_CONSTANTS_ON_HOST_PASS_H_ #include "absl/container/flat_hash_set.h" #include "tensorflow/compiler/jit/compilability_check_util.h" #include "tensorflow/core/common_runtime/optimization_registry.h" namespace tensorflow { class ForceXlaConstantsOnHostPass : public GraphOptimizationPass { public: ForceXlaConstantsOnHostPass() = default; Status Run(const GraphOptimizationPassOptions& options) override; }; } #endif #include "tensorflow/compiler/jit/force_xla_constants_on_host_pass.h" #include "tensorflow/compiler/jit/compilability_check_util.h" #include "tensorflow/compiler/jit/defs.h" #include "tensorflow/core/common_runtime/optimization_registry.h" namespace tensorflow { Status ForceXlaConstantsOnHostPass::Run( const GraphOptimizationPassOptions& options) { Graph* graph = options.graph->get(); OptimizerOptions opts; auto pflr = std::make_unique<ProcessFunctionLibraryRuntime>( nullptr, options.session_options->env, nullptr, TF_GRAPH_DEF_VERSION, options.flib_def, opts); FunctionLibraryRuntime* flr = pflr->GetFLR(ProcessFunctionLibraryRuntime::kDefaultFLRDevice); for (Node* node : graph->nodes()) { if (CanCreateXlaKernel(node->def())) { const FunctionBody* fbody = nullptr; std::vector<int> constant_arg_indices; std::vector<int> resource_arg_indices; NameAttrList function; TF_RETURN_IF_ERROR(NameAndAttrsFromFunctionCall(node->def(), &function)); TF_RETURN_IF_ERROR(GetBodyAndConstantsAndResources( flr, function, &fbody, &constant_arg_indices, &resource_arg_indices)); VLOG(3) << "Found constant arg indices: " << absl::StrJoin(constant_arg_indices, ", "); node->AddAttr("_input_hostmem", constant_arg_indices); } } return absl::OkStatus(); } }
#include "tensorflow/compiler/jit/force_xla_constants_on_host_pass.h" #include "absl/strings/match.h" #include "absl/strings/str_cat.h" #include "tensorflow/cc/framework/ops.h" #include "tensorflow/cc/ops/functional_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/compiler/jit/compilability_check_util.h" #include "tensorflow/compiler/jit/defs.h" #include "tensorflow/compiler/jit/test_util.h" #include "tensorflow/core/common_runtime/function.h" #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/framework/function_testlib.h" #include "tensorflow/core/framework/node_def_util.h" #include "tensorflow/core/graph/graph_def_builder.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/public/session_options.h" #include "tensorflow/core/public/version.h" namespace tensorflow { namespace { Status ForceXlaConstantsOnHost(const Scope& s, FunctionLibraryDefinition* flib_def, std::unique_ptr<Graph>* result) { auto graph = std::make_unique<Graph>(OpRegistry::Global()); GraphOptimizationPassOptions options; SessionOptions session_options; session_options.env = Env::Default(); options.graph = &graph; options.session_options = &session_options; options.flib_def = flib_def; TF_RETURN_IF_ERROR(s.ToGraph(graph.get())); ForceXlaConstantsOnHostPass rewriter; TF_RETURN_IF_ERROR(rewriter.Run(options)); *result = std::move(graph); return absl::OkStatus(); } TEST(ForceXlaConstantsOnHostPassTest, Simple) { GraphDefBuilder b(GraphDefBuilder::kFailImmediately); Scope root = Scope::NewRootScope().ExitOnError(); FunctionDefLibrary library; FunctionDef called_func = FunctionDefHelper::Create("TransposeCall", {"a:float", "b:int32"}, {"c:float"}, {}, {{{"t0"}, "Transpose", {"a", "b"}, { {"T", DT_FLOAT}, {"Tperm", DT_INT32}, }}}, {{"c", "t0:y:0"}}); AttrValue true_attribute; true_attribute.set_b(true); (*called_func.mutable_attr())[kXlaMustCompileAttr] = true_attribute; *library.add_function() = called_func; TF_ASSERT_OK(root.graph()->AddFunctionLibrary(library)); FunctionLibraryDefinition flib_def(OpRegistry::Global(), library); Output in = ops::Placeholder(root, DT_FLOAT); Output perm = ops::Const(root, {3, 1, 2, 0}); NameAttrList b_name_attr; b_name_attr.set_name("TransposeCall"); ops::PartitionedCall call(root.WithOpName("call"), {in, perm}, {DT_FLOAT}, b_name_attr); call.output.front().node()->AddAttr(kXlaMustCompileAttr, true); std::unique_ptr<Graph> graph; TF_ASSERT_OK(ForceXlaConstantsOnHost(root, &flib_def, &graph)); bool found = false; for (Node* node : graph->nodes()) { if (CanCreateXlaKernel(node->def())) { EXPECT_FALSE(found); found = true; std::vector<int32> hostmem_attr; EXPECT_TRUE(TryGetNodeAttr(node->def(), "_input_hostmem", &hostmem_attr)); EXPECT_EQ(hostmem_attr.size(), 1); EXPECT_EQ(hostmem_attr[0], 1); } } EXPECT_TRUE(found); } } }
1,079
cpp
tensorflow/tensorflow
xla_compile_util
tensorflow/compiler/jit/xla_compile_util.cc
tensorflow/compiler/jit/xla_compile_util_test.cc
#ifndef TENSORFLOW_COMPILER_JIT_XLA_COMPILE_UTIL_H_ #define TENSORFLOW_COMPILER_JIT_XLA_COMPILE_UTIL_H_ #include <memory> #include <string> #include "tensorflow/compiler/tf2xla/xla_argument.h" #include "tensorflow/core/graph/graph.h" namespace tensorflow { inline constexpr int64_t kNumAsyncDeviceCompilerThreads = 10; enum class DeviceCompileMode { kLazy, kStrict, kAsync, }; enum class DeviceCompileState { kUncompiled, kCompiling, kCompiled, }; absl::StatusOr<std::unique_ptr<Graph>> CreateSingleOpGraph( const NodeDef& node_def, absl::Span<const XlaArgument> args, absl::Span<const DataType> result_types); bool UsePjRtForSingleDeviceCompilation(const DeviceType& device_type); std::string GetPjRtDeviceCompilerResourceName(const DeviceType& device_type); std::string GetPjRtDeviceCompilationProfilerResourceName( const DeviceType& device_type); absl::StatusOr<ResourceMgr*> GetResourceMgrForDeviceCompiler( const OpKernelContext& ctx, const DeviceType& device_type); } #endif #include "tensorflow/compiler/jit/xla_compile_util.h" #include <memory> #include <string> #include <vector> #include "absl/status/status.h" #include "tensorflow/compiler/jit/flags.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/resource_mgr.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/graph/algorithm.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/tfrt/common/global_state.h" #include "tensorflow/core/util/determinism.h" namespace tensorflow { namespace { constexpr const char* kPjRtDeviceCompilerResourceName = "pjrt_device_compiler"; constexpr const char* kPjRtDeviceCompilationProfilerResourceName = "pjrt_device_compilation_profiler"; } absl::StatusOr<std::unique_ptr<Graph>> CreateSingleOpGraph( const NodeDef& node_def, absl::Span<const XlaArgument> args, absl::Span<const DataType> result_types) { std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); TF_ASSIGN_OR_RETURN(Node * main_node, graph->AddNode(node_def)); for (int64_t i = 0, end = args.size(); i < end; ++i) { Node* node; string arg_name = absl::StrCat("_arg", i); Status status = NodeBuilder(arg_name, FunctionLibraryDefinition::kArgOp) .ControlInput(graph->source_node()) .Attr("T", args[i].kind == XlaArgument::kResource ? DT_RESOURCE : args[i].type) .Attr("index", i) .Finalize(graph.get(), &node); TF_RETURN_IF_ERROR(status); graph->AddEdge(node, 0, main_node, i); } for (int64_t i = 0, end = result_types.size(); i < end; ++i) { Node* node; string retval_name = absl::StrCat("_retval", i); Status status = NodeBuilder(retval_name, FunctionLibraryDefinition::kRetOp) .Input(main_node, i) .Attr("T", result_types[i]) .Attr("index", i) .Finalize(graph.get(), &node); TF_RETURN_IF_ERROR(status); } FixupSourceAndSinkEdges(graph.get()); return graph; } bool UsePjRtForSingleDeviceCompilation(const DeviceType& device_type) { const auto& rollout_config = GetXlaOpsCommonFlags()->tf_xla_use_device_api; return rollout_config.IsEnabledInXlaLaunchForDevice(device_type) || rollout_config.IsEnabledInXlaCompileOnDemandForDevice(device_type) || rollout_config.IsEnabledInXlaCompileAndRunForDevice(device_type); } std::string GetPjRtDeviceCompilerResourceName(const DeviceType& device_type) { return absl::StrCat(kPjRtDeviceCompilerResourceName, "_", device_type.type_string()); } std::string GetPjRtDeviceCompilationProfilerResourceName( const DeviceType& device_type) { return absl::StrCat(kPjRtDeviceCompilationProfilerResourceName, "_", device_type.type_string()); } absl::StatusOr<ResourceMgr*> GetResourceMgrForDeviceCompiler( const OpKernelContext& ctx, const DeviceType& device_type) { ResourceMgr* rm = nullptr; if (device_type == DEVICE_TPU) { rm = tfrt_global::GetTFGlobalResourceMgr(); } else { rm = ctx.resource_manager(); } if (!rm) { return absl::InternalError("No resource manager found."); } return rm; } }
#include "tensorflow/compiler/jit/xla_compile_util.h" #include <memory> #include <vector> #include <gtest/gtest.h> #include "tensorflow/compiler/jit/flags.h" #include "tensorflow/compiler/tf2xla/xla_compiler.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/tpu/tpu_defs.h" namespace tensorflow { namespace { TEST_F(OpsTestBase, CreateSingleOpGraph) { TF_EXPECT_OK(NodeDefBuilder("identity_op", "Identity") .Input(FakeInput(DT_FLOAT)) .Attr("T", DT_FLOAT) .Finalize(node_def())); TF_EXPECT_OK(InitOp()); AddInputFromArray<float>(TensorShape({1, 2}), {6.9, 4.2}); TF_EXPECT_OK(RunOpKernel()); XlaCompiler::SingleOpCompileArgument single_op_arg(*context_); std::vector<XlaArgument> args(1); args[0].kind = XlaArgument::kConstant; args[0].type = DT_FLOAT; args[0].shape = TensorShape({1, 2}); args[0].constant_value = GetInput(0); args[0].initialized = true; TF_ASSERT_OK_AND_ASSIGN( auto graph, CreateSingleOpGraph(*node_def(), args, single_op_arg.output_dtypes)); const auto& node_name_index = graph->BuildNodeNameIndex(); const Node* identity_node = node_name_index.at("identity_op"); EXPECT_EQ(identity_node->op_def().name(), "Identity"); EXPECT_EQ(identity_node->attrs().FindByString("T")->type(), DT_FLOAT); EXPECT_EQ(identity_node->num_inputs(), 1); const Node* identity_input_node = nullptr; TF_EXPECT_OK(identity_node->input_node(0, &identity_input_node)); EXPECT_EQ(identity_input_node->name(), "_arg0"); const Node* arg_node = node_name_index.at("_arg0"); EXPECT_EQ(arg_node->op_def().name(), "_Arg"); EXPECT_EQ(arg_node->attrs().FindByString("T")->type(), DT_FLOAT); const Node* retval_node = node_name_index.at("_retval0"); EXPECT_EQ(retval_node->op_def().name(), "_Retval"); EXPECT_EQ(retval_node->attrs().FindByString("T")->type(), DT_FLOAT); EXPECT_EQ(identity_node->num_outputs(), 1); EXPECT_EQ(retval_node->num_inputs(), 1); const Node* retval_input_node = nullptr; TF_EXPECT_OK(retval_node->input_node(0, &retval_input_node)); EXPECT_EQ(retval_input_node->name(), "identity_op"); } TEST(XlaCompileUtilTest, PjRtXlaLaunchFlagTest) { EXPECT_FALSE(UsePjRtForSingleDeviceCompilation(DeviceType(DEVICE_CPU))); auto& rollout_config = GetXlaOpsCommonFlags()->tf_xla_use_device_api; rollout_config.enabled_for_xla_launch_ = true; EXPECT_FALSE(UsePjRtForSingleDeviceCompilation(DeviceType(DEVICE_CPU))); rollout_config.AllowForDeviceInXlaLaunch(DeviceType(DEVICE_GPU)); EXPECT_FALSE(UsePjRtForSingleDeviceCompilation(DeviceType(DEVICE_CPU))); rollout_config.AllowForDeviceInXlaLaunch(DeviceType(DEVICE_CPU)); EXPECT_TRUE(UsePjRtForSingleDeviceCompilation(DeviceType(DEVICE_CPU))); rollout_config.enabled_for_xla_launch_ = false; EXPECT_FALSE(UsePjRtForSingleDeviceCompilation(DeviceType(DEVICE_CPU))); } TEST(XlaCompileUtilTest, PjRtXlaCompileOnDemandFlagTest) { EXPECT_FALSE(UsePjRtForSingleDeviceCompilation(DeviceType(DEVICE_CPU))); auto& rollout_config = GetXlaOpsCommonFlags()->tf_xla_use_device_api; rollout_config.enabled_for_compile_on_demand_ = true; EXPECT_FALSE(UsePjRtForSingleDeviceCompilation(DeviceType(DEVICE_CPU))); rollout_config.AllowForDeviceInXlaCompileOnDemand(DeviceType(DEVICE_GPU)); EXPECT_FALSE(UsePjRtForSingleDeviceCompilation(DeviceType(DEVICE_CPU))); rollout_config.AllowForDeviceInXlaCompileOnDemand(DeviceType(DEVICE_CPU)); EXPECT_TRUE(UsePjRtForSingleDeviceCompilation(DeviceType(DEVICE_CPU))); rollout_config.enabled_for_compile_on_demand_ = false; EXPECT_FALSE(UsePjRtForSingleDeviceCompilation(DeviceType(DEVICE_CPU))); } TEST(XlaCompileUtilTest, PjRtDeviceCompilerResourceName) { EXPECT_EQ(GetPjRtDeviceCompilerResourceName(DeviceType(DEVICE_TPU)), "pjrt_device_compiler_TPU"); EXPECT_EQ(GetPjRtDeviceCompilerResourceName(DeviceType(DEVICE_TPU_NODE)), "pjrt_device_compiler_TPU"); EXPECT_EQ(GetPjRtDeviceCompilerResourceName(DeviceType(DEVICE_CPU)), "pjrt_device_compiler_CPU"); EXPECT_EQ(GetPjRtDeviceCompilerResourceName(DeviceType(DEVICE_GPU)), "pjrt_device_compiler_GPU"); } TEST(XlaCompileUtilTest, PjRtDeviceCompilationProfilerResourceName) { EXPECT_EQ( GetPjRtDeviceCompilationProfilerResourceName(DeviceType(DEVICE_TPU)), "pjrt_device_compilation_profiler_TPU"); EXPECT_EQ( GetPjRtDeviceCompilationProfilerResourceName(DeviceType(DEVICE_TPU_NODE)), "pjrt_device_compilation_profiler_TPU"); EXPECT_EQ( GetPjRtDeviceCompilationProfilerResourceName(DeviceType(DEVICE_CPU)), "pjrt_device_compilation_profiler_CPU"); EXPECT_EQ( GetPjRtDeviceCompilationProfilerResourceName(DeviceType(DEVICE_GPU)), "pjrt_device_compilation_profiler_GPU"); } } }
1,080
cpp
tensorflow/tensorflow
flags
tensorflow/core/config/flags.cc
tensorflow/core/config/flags_test.cc
#ifndef TENSORFLOW_CORE_COMMON_RUNTIME_NEXT_PLUGGABLE_DEVICE_FLAGS_H_ #define TENSORFLOW_CORE_COMMON_RUNTIME_NEXT_PLUGGABLE_DEVICE_FLAGS_H_ #include "absl/flags/declare.h" ABSL_DECLARE_FLAG(bool, next_pluggable_device_use_c_api); #endif #include "tensorflow/core/common_runtime/next_pluggable_device/flags.h" #include "absl/flags/flag.h" ABSL_FLAG(bool, next_pluggable_device_use_c_api, DEFAULT_TF_NEXT_PLUGGABLE_DEVICE_USE_C_API, "Uses next pluggable device c API.");
#include "tensorflow/core/config/flags.h" #include "tensorflow/core/config/flag_defs.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace { TEST(TFFlags, ReadFlagValue) { EXPECT_TRUE(flags::Global().test_only_experiment_1.value()); EXPECT_FALSE(flags::Global().test_only_experiment_2.value()); } TEST(TFFlags, ResetFlagValue) { EXPECT_TRUE(flags::Global().test_only_experiment_1.value()); flags::Global().test_only_experiment_1.reset(false); EXPECT_FALSE(flags::Global().test_only_experiment_1.value()); } } }
1,081
cpp
tensorflow/tensorflow
pjrt_compile_util
tensorflow/compiler/jit/pjrt_compile_util.cc
tensorflow/compiler/jit/pjrt_compile_util_test.cc
#ifndef TENSORFLOW_COMPILER_JIT_PJRT_COMPILE_UTIL_H_ #define TENSORFLOW_COMPILER_JIT_PJRT_COMPILE_UTIL_H_ #include "tensorflow/compiler/jit/xla_compile_util.h" #include "tensorflow/compiler/jit/xla_platform_info.h" #include "tensorflow/compiler/tf2xla/xla_compiler.h" #include "xla/pjrt/pjrt_client.h" #include "tensorflow/core/framework/device_base.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/resource_mgr.h" #include "tensorflow/core/platform/status.h" namespace tensorflow { Status CompileToPjRtLoadedExecutable( const OpKernelContext& ctx, const XlaPlatformInfo& platform_info, const NameAttrList& function, const std::vector<XlaCompiler::Argument>& args, DeviceCompileMode compile_mode, bool has_ref_vars, bool may_alias_resource_update, const XlaCompiler::CompilationResult** compilation_result, xla::PjRtClient** client, xla::PjRtLoadedExecutable** executable); Status CompileToPjRtLoadedExecutable( const DeviceBase* device, const XlaPlatformInfo& platform_info, const NameAttrList& function, const std::vector<XlaCompiler::Argument>& args, DeviceCompileMode compile_mode, bool has_ref_vars, bool may_alias_resource_update, FunctionLibraryRuntime* flr, ResourceMgr* rm, const XlaCompiler::CompilationResult** compilation_result, xla::PjRtClient** client, xla::PjRtLoadedExecutable** executable); } #endif #include "tensorflow/compiler/jit/pjrt_compile_util.h" #include <vector> #include "tensorflow/compiler/jit/device_compilation_profiler.h" #include "tensorflow/compiler/jit/device_compiler.h" #include "tensorflow/compiler/jit/xla_compile_util.h" #include "tensorflow/compiler/jit/xla_compiler_options_util.h" #include "tensorflow/compiler/jit/xla_platform_info.h" #include "tensorflow/compiler/tf2xla/xla_compiler.h" #include "xla/pjrt/pjrt_client.h" #include "tensorflow/core/framework/device_base.h" #include "tensorflow/core/framework/function.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/resource_mgr.h" #include "tensorflow/core/lib/core/refcount.h" #include "tensorflow/core/platform/status.h" #include "tsl/platform/errors.h" #include "tsl/platform/statusor.h" namespace tensorflow { using PjRtDeviceCompiler = DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>; Status CompileToPjRtLoadedExecutable( const DeviceBase* device, const XlaPlatformInfo& platform_info, const NameAttrList& function, const std::vector<XlaCompiler::Argument>& args, DeviceCompileMode compile_mode, bool has_ref_vars, bool may_alias_resource_update, FunctionLibraryRuntime* flr, ResourceMgr* rm, const XlaCompiler::CompilationResult** compilation_result, xla::PjRtClient** client, xla::PjRtLoadedExecutable** executable) { PjRtDeviceCompiler* pjrt_device_compiler; DeviceCompilationProfiler* profiler; TF_RETURN_IF_ERROR(GetOrCreatePjRtDeviceCompilerAndProfiler( platform_info, rm, flr, &pjrt_device_compiler, &profiler)); core::ScopedUnref pjrt_device_compiler_ref(pjrt_device_compiler); core::ScopedUnref profiler_ref(profiler); *client = pjrt_device_compiler->client(); XlaCompiler::Options options = GenerateCompilerOptionsForPjRt( *flr, device, platform_info, pjrt_device_compiler); XlaCompiler::CompileOptions compile_options = GenerateCompileOptions(has_ref_vars, may_alias_resource_update); return pjrt_device_compiler->CompileIfNeeded( options, function, args, compile_options, compile_mode, profiler, compilation_result, executable); } Status CompileToPjRtLoadedExecutable( const OpKernelContext& ctx, const XlaPlatformInfo& platform_info, const NameAttrList& function, const std::vector<XlaCompiler::Argument>& args, DeviceCompileMode compile_mode, bool has_ref_vars, bool may_alias_resource_update, const XlaCompiler::CompilationResult** compilation_result, xla::PjRtClient** client, xla::PjRtLoadedExecutable** executable) { TF_ASSIGN_OR_RETURN(ResourceMgr * rm, GetResourceMgrForDeviceCompiler( ctx, platform_info.device_type())); return CompileToPjRtLoadedExecutable( ctx.device(), platform_info, function, args, compile_mode, has_ref_vars, may_alias_resource_update, ctx.function_library(), rm, compilation_result, client, executable); } }
#if GOOGLE_CUDA || TENSORFLOW_USE_ROCM #include "tensorflow/compiler/jit/pjrt_compile_util.h" #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/cc/framework/scope.h" #include "tensorflow/cc/ops/function_ops.h" #include "tensorflow/cc/ops/math_ops.h" #include "tensorflow/compiler/jit/test_util.h" #include "tensorflow/core/framework/device_base.h" #include "tensorflow/core/framework/function.h" #include "tensorflow/core/framework/graph_to_functiondef.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/platform/statusor.h" namespace tensorflow { namespace { StatusOr<std::unique_ptr<Graph>> SampleGraphAddXY() { std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); Scope scope = Scope::NewRootScope().ExitOnError(); auto a = ops::_Arg(scope.WithOpName("A"), DT_INT32, 0); auto b = ops::_Arg(scope.WithOpName("B"), DT_INT32, 1); auto c = ops::Add(scope.WithOpName("C"), a, b); auto d = ops::_Retval(scope.WithOpName("D"), c, 0); TF_RETURN_IF_ERROR(scope.ToGraph(graph.get())); return graph; } StatusOr<FunctionDef> SampleFuntionAddXY(const std::string& name) { TF_ASSIGN_OR_RETURN(auto graph, SampleGraphAddXY()); FunctionDef fdef; TF_RETURN_IF_ERROR(GraphToFunctionDef(*graph, name, &fdef)); return fdef; } std::vector<XlaCompiler::Argument> SampleArgsForAddXY() { std::vector<XlaCompiler::Argument> args(2); args[0].kind = XlaCompiler::Argument::kParameter; args[0].type = DT_INT32; args[0].shape = TensorShape({2}); args[1].kind = XlaCompiler::Argument::kParameter; args[1].type = DT_INT32; args[1].shape = TensorShape({2}); return args; } TEST(PjrtCompileUtilTest, CompileToPjRtLoadedExecutable) { DeviceSetup device_setup; TF_ASSERT_OK_AND_ASSIGN(auto fdef, SampleFuntionAddXY("foo")); device_setup.AddDevicesAndSetUp({DEVICE_GPU}, fdef); Device* device = device_setup.GetDevice(DEVICE_GPU); const XlaPlatformInfo platform_info = XlaPlatformInfoFromDevice(device); NameAttrList function; function.set_name("foo"); ResourceMgr resource_mgr(""); const XlaCompiler::CompilationResult* compilation_result = nullptr; xla::PjRtLoadedExecutable* pjrt_executable = nullptr; xla::PjRtClient* pjrt_client = nullptr; TF_EXPECT_OK(CompileToPjRtLoadedExecutable( device, platform_info, function, SampleArgsForAddXY(), DeviceCompileMode::kStrict, true, true, device_setup.flr(), &resource_mgr, &compilation_result, &pjrt_client, &pjrt_executable)); EXPECT_TRUE(compilation_result != nullptr); EXPECT_TRUE(pjrt_executable != nullptr); EXPECT_TRUE(pjrt_client != nullptr); } TEST(PjrtCompileUtilTest, CompileToPjRtLoadedExecutableWithOpKernelContext) { DeviceSetup device_setup; TF_ASSERT_OK_AND_ASSIGN(auto fdef, SampleFuntionAddXY("foo")); device_setup.AddDevicesAndSetUp({DEVICE_GPU}, fdef); Device* device = device_setup.GetDevice(DEVICE_GPU); const XlaPlatformInfo platform_info = XlaPlatformInfoFromDevice(device); NameAttrList function; function.set_name("foo"); ResourceMgr resource_mgr(""); OpKernelContext::Params params; params.resource_manager = &resource_mgr; params.device = device; params.function_library = device_setup.flr(); OpKernelContext ctx(&params, 1); const XlaCompiler::CompilationResult* compilation_result = nullptr; xla::PjRtLoadedExecutable* pjrt_executable = nullptr; xla::PjRtClient* pjrt_client = nullptr; TF_EXPECT_OK(CompileToPjRtLoadedExecutable( ctx, platform_info, function, SampleArgsForAddXY(), DeviceCompileMode::kStrict, true, true, &compilation_result, &pjrt_client, &pjrt_executable)); EXPECT_TRUE(compilation_result != nullptr); EXPECT_TRUE(pjrt_executable != nullptr); EXPECT_TRUE(pjrt_client != nullptr); } } } #endif
1,082
cpp
tensorflow/tensorflow
increase_dynamism_for_auto_jit_pass
tensorflow/compiler/jit/increase_dynamism_for_auto_jit_pass.cc
tensorflow/compiler/jit/increase_dynamism_for_auto_jit_pass_test.cc
#ifndef TENSORFLOW_COMPILER_JIT_INCREASE_DYNAMISM_FOR_AUTO_JIT_PASS_H_ #define TENSORFLOW_COMPILER_JIT_INCREASE_DYNAMISM_FOR_AUTO_JIT_PASS_H_ #include "tensorflow/core/common_runtime/optimization_registry.h" namespace tensorflow { class IncreaseDynamismForAutoJitPass : public GraphOptimizationPass { public: Status Run(const GraphOptimizationPassOptions& options) override; }; } #endif #include "tensorflow/compiler/jit/increase_dynamism_for_auto_jit_pass.h" #include <iterator> #include "absl/algorithm/container.h" #include "absl/container/inlined_vector.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_replace.h" #include "absl/types/optional.h" #include "tensorflow/cc/framework/scope_internal.h" #include "tensorflow/cc/ops/array_ops.h" #include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/math_ops.h" #include "tensorflow/compiler/jit/flags.h" #include "tensorflow/compiler/jit/xla_cluster_util.h" #include "tensorflow/compiler/tf2xla/cc/ops/xla_ops.h" #include "xla/status_macros.h" #include "tensorflow/core/common_runtime/shape_refiner.h" #include "tensorflow/core/graph/algorithm.h" #include "tensorflow/core/public/session_options.h" #include "tensorflow/core/util/device_name_utils.h" #include "tensorflow/core/util/dump_graph.h" namespace tensorflow { namespace { template <class T> using StatusOrOptional = StatusOr<std::optional<T>>; StatusOrOptional<Tensor> TryToGetTensorFromConstOp(Node* n) { if (n->type_string() != "Const") { return {std::nullopt}; } const TensorProto* proto = nullptr; TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), "value", &proto)); Tensor tensor(proto->dtype()); TF_RET_CHECK(tensor.FromProto(*proto)); return {tensor}; } struct SliceInputs { Output slice_op; Output input; Output begin; Output size; std::vector<int64_t> size_as_vector; }; std::vector<int64_t> IntTensorAsVector(const Tensor& t) { DCHECK(t.dtype() == DT_INT32 || t.dtype() == DT_INT64); std::vector<int64_t> result; result.reserve(t.NumElements()); for (int i = 0; i < t.NumElements(); i++) { int64_t element = t.dtype() == DT_INT32 ? static_cast<int64_t>(t.flat<int32>()(i)) : t.flat<int64_t>()(i); result.push_back(element); } return result; } StatusOrOptional<SliceInputs> GetSliceInputs(Node* slice) { const int kSliceInputIndex = 0; const int kSliceBeginIndex = 1; const int kSliceSizeIndex = 2; const Edge* slice_input_edge; TF_RETURN_IF_ERROR(slice->input_edge(kSliceInputIndex, &slice_input_edge)); const Edge* slice_size_edge; TF_RETURN_IF_ERROR(slice->input_edge(kSliceSizeIndex, &slice_size_edge)); const Edge* slice_begin_edge; TF_RETURN_IF_ERROR(slice->input_edge(kSliceBeginIndex, &slice_begin_edge)); SliceInputs slice_inputs; slice_inputs.input = Output(slice_input_edge->src(), slice_input_edge->src_output()); slice_inputs.begin = Output(slice_begin_edge->src(), slice_begin_edge->src_output()); slice_inputs.size = Output(slice_size_edge->src(), slice_size_edge->src_output()); TF_ASSIGN_OR_RETURN(std::optional<Tensor> tf_slice_size, TryToGetTensorFromConstOp(slice_inputs.size.node())); if (!tf_slice_size.has_value()) { return {std::nullopt}; } if (tf_slice_size->dims() != 1) { return {std::nullopt}; } slice_inputs.size_as_vector = IntTensorAsVector(*tf_slice_size); return {slice_inputs}; } Output MakeInt64(const Scope& host_scope, absl::string_view name, const Output& x) { return x.type() == DT_INT64 ? x : ops::Cast(host_scope.WithOpName(name, "_s64"), x, DT_INT64); } SliceInputs MakeSliceIndexAndSizeInt64(const Scope& host_scope, const SliceInputs& slice_inputs) { SliceInputs result; result.input = slice_inputs.input; result.begin = MakeInt64(host_scope, "begin", slice_inputs.begin); result.size = MakeInt64(host_scope, "size", slice_inputs.size); result.size_as_vector = slice_inputs.size_as_vector; return result; } class ConstantCache { public: explicit ConstantCache(const Scope& s, const std::vector<const Edge*>& control_deps) : scope_(s), control_deps_(control_deps) {} Output Get1DHostConstant(int64_t constant) { auto it = cache_.find(constant); if (it == cache_.end()) { Output new_const = ops::Const(scope_.WithOpName("const_", constant), {constant}); it = cache_.insert({constant, new_const}).first; for (const Edge* e : control_deps_) { scope_.graph()->AddControlEdge(e->src(), new_const.node()); } } return it->second; } private: Scope scope_; std::unordered_map<int, Output> cache_; std::vector<const Edge*> control_deps_; }; Status ComputeSliceSize(const Scope& host_scope, const SliceInputs& slice_inputs, std::vector<const Edge*> control_deps, Output* size) { if (absl::c_all_of(slice_inputs.size_as_vector, [](int64_t i) { return i >= 0; })) { *size = slice_inputs.size; return absl::OkStatus(); } Output input_shape = ops::Shape(host_scope.WithOpName("input_shape"), slice_inputs.input, ops::Shape::OutType(DT_INT64)); ConstantCache constant_pool(host_scope, control_deps); std::vector<Output> slice_size; for (int i = 0, end = slice_inputs.size_as_vector.size(); i < end; i++) { if (slice_inputs.size_as_vector[i] >= 0) { slice_size.push_back( constant_pool.Get1DHostConstant(slice_inputs.size_as_vector[i])); continue; } DCHECK_EQ(slice_inputs.size_as_vector[i], -1); Output begin_i = ops::Slice( host_scope.WithOpName("begin_", i), slice_inputs.begin, constant_pool.Get1DHostConstant(i), constant_pool.Get1DHostConstant(1)); Output input_shape_i = ops::Slice( host_scope.WithOpName("input_shape_", i), input_shape, constant_pool.Get1DHostConstant(i), constant_pool.Get1DHostConstant(1)); slice_size.push_back(ops::Sub(host_scope.WithOpName("slice_size_", i), input_shape_i, begin_i)); DCHECK_EQ(slice_size.back().type(), DT_INT64); } if (slice_size.size() == 1) { *size = slice_size[0]; } else { auto concat_axis = ops::Const(host_scope.WithOpName("concat_axis"), 0); for (const Edge* e : control_deps) { host_scope.graph()->AddControlEdge(e->src(), concat_axis.node()); } *size = ops::Concat(host_scope.WithOpName("slice_size"), slice_size, concat_axis); } return absl::OkStatus(); } Status ConvertTensorFlowSliceToStaticShapedSlice( Graph* g, Node* slice, const SliceInputs& slice_inputs, absl::string_view cluster_name, Node** result) { string host_name; TF_RETURN_IF_ERROR(DeviceNameUtils::DeviceNameToCpuDeviceName( slice->assigned_device_name(), &host_name)); Status status; Scope main_scope = NewInternalScope(g, &status, nullptr) .WithXlaCluster(string(cluster_name)) .NewSubScope(absl::StrCat(slice->name(), "/static_shaped_slice")); Scope host_scope = main_scope.WithAssignedDevice(host_name); SliceInputs slice_inputs_int64 = MakeSliceIndexAndSizeInt64(host_scope, slice_inputs); Node* old_size; std::vector<const Edge*> old_size_ctrl_deps; TF_RETURN_IF_ERROR(slice->input_node(2, &old_size)); absl::c_copy_if(old_size->in_edges(), std::back_inserter(old_size_ctrl_deps), [](const Edge* e) { return e->IsControlEdge(); }); Output slice_size; TF_RETURN_IF_ERROR(ComputeSliceSize(host_scope, slice_inputs_int64, old_size_ctrl_deps, &slice_size)); *result = ops::Slice(main_scope.WithAssignedDevice(slice->assigned_device_name()) .WithOpName("static_shaped_slice"), slice_inputs_int64.input, slice_inputs_int64.begin, slice_size) .node(); TF_RETURN_IF_ERROR(main_scope.status()); std::vector<string> compile_time_const_inputs; compile_time_const_inputs.push_back("size"); (*result)->AddAttr(kXlaCompileTimeConstantInputsAttr, compile_time_const_inputs); return status; } void ReplaceTensorFlowSliceWithStaticShapedSlice(Graph* g, Node* slice, Node* static_shaped_slice) { absl::InlinedVector<const Edge*, 6> edges_to_remove; std::vector<const Edge*> slice_out_edges; absl::c_copy(slice->out_edges(), std::back_inserter(slice_out_edges)); for (const Edge* e : slice_out_edges) { DCHECK(e->src_output() == 0 || e->src_output() == Graph::kControlSlot); int src_output = e->src_output(); int dst_input = e->dst_input(); Node* dst = e->dst(); g->RemoveEdge(e); g->AddEdge(static_shaped_slice, src_output, dst, dst_input); } for (const Edge* e : slice->in_edges()) { if (e->IsControlEdge()) { g->AddControlEdge(e->src(), static_shaped_slice); } } g->RemoveNode(slice); } Status RewriteSlice(Graph* g, Node* slice, const SliceInputs& slice_inputs, absl::string_view cluster_name) { VLOG(3) << "Rewriting slice " << slice->name() << " to a \"static shaped\" Slice"; Node* static_shaped_slice; TF_RETURN_IF_ERROR(ConvertTensorFlowSliceToStaticShapedSlice( g, slice, slice_inputs, cluster_name, &static_shaped_slice)); ReplaceTensorFlowSliceWithStaticShapedSlice(g, slice, static_shaped_slice); return absl::OkStatus(); } absl::StatusOr<bool> ShouldRewriteSlice(Node* n) { if (n->type_string() != "Slice") { return false; } if (!GetXlaClusterForNode(*n).has_value()) { return false; } TF_ASSIGN_OR_RETURN(std::optional<SliceInputs> slice_inputs, GetSliceInputs(n)); if (!slice_inputs.has_value()) { return false; } bool slice_size_has_error = absl::c_all_of(slice_inputs->size_as_vector, [](int64_t size_i) { return size_i >= -1; }); if (!slice_size_has_error) { return false; } return !slice_inputs->begin.node()->IsConstant(); } Status FindAndRewriteSlices(Graph* g, bool* changed) { std::vector<Node*> slices_to_rewrite; for (Node* n : g->nodes()) { TF_ASSIGN_OR_RETURN(bool is_rewritable, ShouldRewriteSlice(n)); if (is_rewritable) { slices_to_rewrite.push_back(n); } } for (Node* n : slices_to_rewrite) { TF_ASSIGN_OR_RETURN(std::optional<SliceInputs> slice_inputs, GetSliceInputs(n)); TF_RET_CHECK(slice_inputs.has_value()); TF_RETURN_IF_ERROR( RewriteSlice(g, n, *slice_inputs, *GetXlaClusterForNode(*n))); } if (!slices_to_rewrite.empty()) { FixupSourceAndSinkEdges(g); } *changed = !slices_to_rewrite.empty(); return absl::OkStatus(); } } Status IncreaseDynamismForAutoJitPass::Run( const GraphOptimizationPassOptions& options) { MarkForCompilationPassFlags* flags = GetMarkForCompilationPassFlags(); if (flags->tf_xla_clustering_debug) { DumpGraphToFile("before_increase_dynamism_for_auto_jit_pass", **options.graph, options.flib_def); } bool changed; TF_RETURN_IF_ERROR(FindAndRewriteSlices(options.graph->get(), &changed)); if (changed && flags->tf_xla_clustering_debug) { DumpGraphToFile("increase_dynamism_for_auto_jit_pass", **options.graph, options.flib_def); } return absl::OkStatus(); } }
#include "tensorflow/compiler/jit/increase_dynamism_for_auto_jit_pass.h" #include "tensorflow/cc/framework/ops.h" #include "tensorflow/cc/ops/array_ops.h" #include "tensorflow/cc/ops/const_op.h" #include "tensorflow/compiler/jit/node_matchers.h" #include "tensorflow/compiler/jit/xla_cluster_util.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/public/session_options.h" namespace tensorflow { namespace { using ::testing::_; using testing::matchers::AssignedDevice; using testing::matchers::Attr; using testing::matchers::Const; using testing::matchers::CtrlDeps; using testing::matchers::Inputs; using testing::matchers::Name; using testing::matchers::NodeWith; using testing::matchers::Op; using testing::matchers::Out; class FakeDevice : public Device { public: explicit FakeDevice(const DeviceAttributes& device_attributes) : Device(nullptr, device_attributes) {} Status Sync() override { return errors::Unimplemented("FakeDevice::Sync()"); } Allocator* GetAllocator(AllocatorAttributes attr) override { return nullptr; } static std::unique_ptr<Device> Make(const string& name, const string& type) { DeviceAttributes device_attributes; device_attributes.set_name(name); device_attributes.set_device_type(DeviceType(type).type()); return std::make_unique<FakeDevice>(device_attributes); } }; const char* kHostName = "/job:worker/replica:0/task:0/device:CPU:0"; const char* kDeviceName = "/job:worker/replica:0/task:0/device:GPU:0"; Status IncreaseDynamismForAutoJit(const Scope& s, std::unique_ptr<Graph>* result) { std::vector<std::unique_ptr<Device>> devices; devices.push_back(FakeDevice::Make(kDeviceName, DEVICE_GPU)); devices.push_back(FakeDevice::Make(kHostName, DEVICE_CPU)); std::unique_ptr<DeviceSet> device_set(new DeviceSet()); for (auto& device : devices) { device_set->AddDevice(device.get()); } auto graph = std::make_unique<Graph>(OpRegistry::Global()); SessionOptions session_options; session_options.config.mutable_graph_options() ->mutable_optimizer_options() ->set_global_jit_level(OptimizerOptions::ON_2); GraphOptimizationPassOptions options; options.graph = &graph; options.device_set = device_set.get(); options.session_options = &session_options; std::unordered_map<string, string> assigned_device_names; for (Node* n : s.graph()->nodes()) { assigned_device_names[n->name()] = n->assigned_device_name(); } TF_RETURN_IF_ERROR(s.ToGraph(graph.get())); for (Node* n : graph->nodes()) { n->set_assigned_device_name(assigned_device_names[n->name()]); } IncreaseDynamismForAutoJitPass rewriter; TF_RETURN_IF_ERROR(rewriter.Run(options)); *result = std::move(graph); return absl::OkStatus(); } TEST(SliceToDynamicSliceRewriteTest, Basic) { Scope root = Scope::NewRootScope() .ExitOnError() .WithAssignedDevice(kDeviceName) .WithXlaCluster("cluster_0"); Output input = ops::Placeholder(root.WithOpName("input"), DT_FLOAT); Output begin = ops::Placeholder(root.WithOpName("begin"), DT_INT32); Output size = ops::Const(root.WithOpName("size"), {-1, 500}); Output slice = ops::Slice(root.WithOpName("slice"), input, begin, size); std::unique_ptr<Graph> result; TF_ASSERT_OK(IncreaseDynamismForAutoJit(root, &result)); const int64_t zero_64 = 0; const int32_t zero_32 = 0; const int64_t one_64 = 1; auto m_input = Out(NodeWith(Op("Placeholder"), Name("input"))); auto m_begin_s64 = Out(NodeWith( Op("Cast"), Inputs(Out(NodeWith(Op("Placeholder"), Name("begin")))))); auto m_input_shape = Out(NodeWith(Op("Shape"), Inputs(m_input))); auto m_slice_size_0 = Out(NodeWith( Op("Sub"), AssignedDevice(kHostName), Inputs( Out(NodeWith(Op("Slice"), AssignedDevice(kHostName), Inputs(m_input_shape, Const(zero_64), Const(one_64)))), Out(NodeWith(Op("Slice"), AssignedDevice(kHostName), Inputs(m_begin_s64, Const(zero_64), Const(one_64))))))); auto m_dynamic_slice_size = Out(NodeWith(Op("ConcatV2"), AssignedDevice(kHostName), Inputs(m_slice_size_0, Const(static_cast<int64_t>(500)), Const(zero_32)))); std::vector<string> compile_time_constant_inputs; compile_time_constant_inputs.push_back("size"); auto m_dynamic_slice = NodeWith( Op("Slice"), AssignedDevice(kDeviceName), Attr(kXlaCompileTimeConstantInputsAttr, compile_time_constant_inputs), Inputs(m_input, m_begin_s64, m_dynamic_slice_size)); Node* static_shaped_slice = testing::FindNodeByName( result.get(), "slice/static_shaped_slice/static_shaped_slice"); ASSERT_NE(static_shaped_slice, nullptr); EXPECT_THAT(static_shaped_slice, m_dynamic_slice); } TEST(SliceToDynamicSliceRewriteTest, SliceFromVector) { Scope root = Scope::NewRootScope() .ExitOnError() .WithAssignedDevice(kDeviceName) .WithXlaCluster("cluster_0"); Output input = ops::Placeholder(root.WithOpName("input"), DT_FLOAT); Output begin = ops::Placeholder(root.WithOpName("begin"), DT_INT32); Output size = ops::Const(root.WithOpName("size"), {-1}); Output slice = ops::Slice(root.WithOpName("slice"), input, begin, size); std::unique_ptr<Graph> result; TF_ASSERT_OK(IncreaseDynamismForAutoJit(root, &result)); Node* static_shaped_slice = testing::FindNodeByName( result.get(), "slice/static_shaped_slice/static_shaped_slice"); EXPECT_NE(static_shaped_slice, nullptr); EXPECT_THAT(result->nodes(), Not(Contains(NodeWith(Op("ConcatV2"))))); } TEST(SliceToDynamicSliceRewriteTest, ControlDependencePreserved) { Scope root = Scope::NewRootScope() .ExitOnError() .WithAssignedDevice(kDeviceName) .WithXlaCluster("cluster_0"); Output input = ops::Placeholder(root.WithOpName("input"), DT_FLOAT); Output begin = ops::Placeholder(root.WithOpName("begin"), DT_INT32); Output size = ops::Const(root.WithOpName("size"), {-1, 500}); Output control_pred = ops::Placeholder(root.WithOpName("control"), DT_BOOL); Output slice = ops::Slice(root.WithOpName("slice"), input, begin, size); root.graph()->AddControlEdge(control_pred.node(), slice.node()); std::unique_ptr<Graph> result; TF_ASSERT_OK(IncreaseDynamismForAutoJit(root, &result)); Node* static_shaped_slice = testing::FindNodeByName( result.get(), "slice/static_shaped_slice/static_shaped_slice"); ASSERT_NE(static_shaped_slice, nullptr); EXPECT_THAT(static_shaped_slice, NodeWith(Op("Slice"), CtrlDeps(NodeWith(Op("Placeholder"), Name("control"))))); } int64_t ToInt64(int v) { return static_cast<int64_t>(v); } TEST(SliceToDynamicSliceRewriteTest, Int64Indices) { Scope root = Scope::NewRootScope() .ExitOnError() .WithAssignedDevice(kDeviceName) .WithXlaCluster("cluster_0"); Output input = ops::Placeholder(root.WithOpName("input"), DT_FLOAT); Output begin = ops::Placeholder(root.WithOpName("begin"), DT_INT64); Output size = ops::Const(root.WithOpName("size"), {ToInt64(-1), ToInt64(500)}); Output slice = ops::Slice(root.WithOpName("slice"), input, begin, size); std::unique_ptr<Graph> result; TF_ASSERT_OK(IncreaseDynamismForAutoJit(root, &result)); EXPECT_THAT(result->nodes(), Not(Contains(NodeWith(Op("Cast"))))); } TEST(SliceToDynamicSliceRewriteTest, DontRewriteInvalidSlice) { Scope root = Scope::NewRootScope() .ExitOnError() .WithAssignedDevice(kDeviceName) .WithXlaCluster("cluster_0"); Output input = ops::Placeholder(root.WithOpName("input"), DT_FLOAT); Output begin = ops::Placeholder(root.WithOpName("begin"), DT_INT32); Output size_placeholder = ops::Placeholder(root.WithOpName("size_placeholder"), DT_INT32); Output slice = ops::Slice(root.WithOpName("slice"), input, begin, size_placeholder); Output size = ops::Const(root.WithOpName("size"), {-8, 500}); TF_ASSERT_OK(root.graph()->UpdateEdge(size.node(), 0, slice.node(), 2)); std::unique_ptr<Graph> result; TF_ASSERT_OK(IncreaseDynamismForAutoJit(root, &result)); EXPECT_THAT(result->nodes(), Not(Contains(NodeWith(Op("Slice"), Attr(kXlaCompileTimeConstantInputsAttr))))); } TEST(SliceToDynamicSliceRewriteTest, DontRewriteUnclusteredSlice) { Scope root = Scope::NewRootScope().ExitOnError().WithAssignedDevice(kDeviceName); Output input = ops::Placeholder(root.WithOpName("input"), DT_FLOAT); Output begin = ops::Placeholder(root.WithOpName("begin"), DT_INT32); Output size = ops::Const(root.WithOpName("size"), {-1, 500}); Output slice = ops::Slice(root.WithOpName("slice"), input, begin, size); std::unique_ptr<Graph> result; TF_ASSERT_OK(IncreaseDynamismForAutoJit(root, &result)); EXPECT_THAT(result->nodes(), Not(Contains(NodeWith(Op("Slice"), Attr(kXlaCompileTimeConstantInputsAttr))))); } TEST(SliceToDynamicSliceRewriteTest, DontRewriteSliceWithNonConstSize) { Scope root = Scope::NewRootScope() .ExitOnError() .WithAssignedDevice(kDeviceName) .WithXlaCluster("cluster_0"); Output input = ops::Placeholder(root.WithOpName("input"), DT_FLOAT); Output begin = ops::Placeholder(root.WithOpName("begin"), DT_INT64); Output size = ops::Placeholder(root.WithOpName("size"), DT_INT64); Output slice = ops::Slice(root.WithOpName("slice"), input, begin, size); std::unique_ptr<Graph> result; TF_ASSERT_OK(IncreaseDynamismForAutoJit(root, &result)); EXPECT_THAT(result->nodes(), Not(Contains(NodeWith(Op("Slice"), Attr(kXlaCompileTimeConstantInputsAttr))))); } TEST(SliceToDynamicSliceRewriteTest, ScalarSlice) { Scope root = Scope::NewRootScope() .ExitOnError() .WithAssignedDevice(kDeviceName) .WithXlaCluster("cluster_0"); Output input = ops::Placeholder(root.WithOpName("input"), DT_FLOAT); Output begin = ops::Placeholder(root.WithOpName("begin"), DT_INT64); Output size = ops::Const<int64_t>(root.WithOpName("size"), {}); Output slice = ops::Slice(root.WithOpName("slice"), input, begin, size); std::unique_ptr<Graph> result; TF_ASSERT_OK(IncreaseDynamismForAutoJit(root, &result)); Node* static_shaped_slice = testing::FindNodeByName( result.get(), "slice/static_shaped_slice/static_shaped_slice"); ASSERT_NE(static_shaped_slice, nullptr); EXPECT_THAT(static_shaped_slice, NodeWith(Op("Slice"), Attr(kXlaCompileTimeConstantInputsAttr), Inputs(_, _, Out(NodeWith(Name(size.node()->name())))))); } TEST(SliceToDynamicSliceRewriteTest, IndicesNotVector) { Scope root = Scope::NewRootScope() .ExitOnError() .WithAssignedDevice(kDeviceName) .WithXlaCluster("cluster_0"); auto ToInt64 = [](int v) { return static_cast<int64_t>(v); }; Output input = ops::Placeholder(root.WithOpName("input"), DT_FLOAT); Output begin = ops::Placeholder(root.WithOpName("begin"), DT_INT64); Output size_placeholder = ops::Placeholder(root.WithOpName("size"), DT_INT64); Output slice = ops::Slice(root.WithOpName("slice"), input, begin, size_placeholder); Output size = ops::Const(root.WithOpName("size"), {{ToInt64(-1)}, {ToInt64(500)}}); TF_ASSERT_OK(root.graph()->UpdateEdge(size.node(), 0, slice.node(), 2)); std::unique_ptr<Graph> result; TF_ASSERT_OK(IncreaseDynamismForAutoJit(root, &result)); EXPECT_THAT(result->nodes(), Not(Contains(NodeWith(Op("Slice"), Attr(kXlaCompileTimeConstantInputsAttr))))); } TEST(SliceToDynamicSliceRewriteTest, SliceWithSliceInput) { Scope root = Scope::NewRootScope() .ExitOnError() .WithAssignedDevice(kDeviceName) .WithXlaCluster("cluster_0"); Output input = ops::Placeholder(root.WithOpName("input"), DT_FLOAT); Output begin = ops::Placeholder(root.WithOpName("begin"), DT_INT32); Output size_a = ops::Const(root.WithOpName("size_a"), {-1, 500}); Output slice = ops::Slice(root.WithOpName("slice"), input, begin, size_a); Output size_b = ops::Const(root.WithOpName("size_a"), {-1, 200}); Output slice_with_slice_input = ops::Slice( root.WithOpName("slice_with_slice_input"), slice, begin, size_b); std::unique_ptr<Graph> result; TF_ASSERT_OK(IncreaseDynamismForAutoJit(root, &result)); Node* static_shaped_slice = testing::FindNodeByName( result.get(), "slice_with_slice_input/static_shaped_slice/static_shaped_slice"); ASSERT_NE(static_shaped_slice, nullptr); EXPECT_EQ(static_shaped_slice->output_type(0), DT_FLOAT) << "Expected DT_FLOAT, was " << DataType_Name(static_shaped_slice->output_type(0)); EXPECT_THAT( static_shaped_slice, NodeWith( Op("Slice"), Inputs(Out(NodeWith( Op("Slice"), Name("slice/static_shaped_slice/static_shaped_slice"))), _, _))); } TEST(SliceToDynamicSliceRewriteTest, SliceWithSliceBegin) { Scope root = Scope::NewRootScope() .ExitOnError() .WithAssignedDevice(kDeviceName) .WithXlaCluster("cluster_0"); Output input_float = ops::Placeholder(root.WithOpName("input_float"), DT_FLOAT); Output input_i64 = ops::Placeholder(root.WithOpName("input_i64"), DT_INT64); Output begin_begin = ops::Placeholder(root.WithOpName("begin_begin"), DT_INT32); Output begin_size = ops::Const(root.WithOpName("begin_size"), {-1}); Output begin = ops::Slice(root.WithOpName("begin"), input_i64, begin_begin, begin_size); Output size = ops::Const(root.WithOpName("size"), {ToInt64(-1), ToInt64(200)}); Output slice_with_slice_begin = ops::Slice( root.WithOpName("slice_with_slice_begin"), input_float, begin, size); std::unique_ptr<Graph> result; TF_ASSERT_OK(IncreaseDynamismForAutoJit(root, &result)); Node* static_shaped_slice = testing::FindNodeByName( result.get(), "slice_with_slice_begin/static_shaped_slice/static_shaped_slice"); ASSERT_NE(static_shaped_slice, nullptr); EXPECT_EQ(static_shaped_slice->output_type(0), DT_FLOAT) << "Expected DT_FLOAT, was " << DataType_Name(static_shaped_slice->output_type(0)); EXPECT_THAT( static_shaped_slice, NodeWith( Op("Slice"), Inputs(_, Out(NodeWith( Op("Slice"), Name("begin/static_shaped_slice/static_shaped_slice"))), _))); } TEST(SliceToDynamicSliceRewriteTest, WithControlDepsToConstant) { Scope root = Scope::NewRootScope() .ExitOnError() .WithAssignedDevice(kDeviceName) .WithXlaCluster("cluster_0"); Output input = ops::Placeholder(root.WithOpName("input"), DT_FLOAT); Output begin = ops::Placeholder(root.WithOpName("begin"), DT_INT32); Output size = ops::Const(root.WithOpName("size"), {-1}); Output slice = ops::Slice(root.WithOpName("slice"), input, begin, size); Output dependency = ops::Placeholder(root.WithOpName("dependency"), DT_BOOL); root.graph()->AddControlEdge(dependency.node(), size.node()); std::unique_ptr<Graph> result; TF_ASSERT_OK(IncreaseDynamismForAutoJit(root, &result)); Node* const_0 = testing::FindNodeByName(result.get(), "slice/static_shaped_slice/const_0"); EXPECT_NE(const_0, nullptr); EXPECT_THAT(const_0, NodeWith(Op("Const"), CtrlDeps(NodeWith(Op("Placeholder"), Name("dependency"))))); } TEST(SliceToDynamicSliceRewriteTest, DontRewriteSliceWithConstBegin) { Scope root = Scope::NewRootScope() .ExitOnError() .WithAssignedDevice(kDeviceName) .WithXlaCluster("cluster_0"); Output input = ops::Placeholder(root.WithOpName("input"), DT_FLOAT); Output begin = ops::Const(root.WithOpName("begin"), {10, 10}); Output size = ops::Const(root.WithOpName("size"), {-1, 500}); Output slice = ops::Slice(root.WithOpName("slice"), input, begin, size); std::unique_ptr<Graph> result; TF_ASSERT_OK(IncreaseDynamismForAutoJit(root, &result)); Node* slice_node = testing::FindNodeByName(result.get(), "slice"); EXPECT_THAT(slice_node, NodeWith(Op("Slice"), Inputs(Out(NodeWith(Op("Placeholder"))), Out(NodeWith(Op("Const"))), Out(NodeWith(Op("Const")))))); } } }
1,083
cpp
tensorflow/tensorflow
deadness_analysis
tensorflow/compiler/jit/deadness_analysis.cc
tensorflow/compiler/jit/deadness_analysis_test.cc
#ifndef TENSORFLOW_COMPILER_JIT_DEADNESS_ANALYSIS_H_ #define TENSORFLOW_COMPILER_JIT_DEADNESS_ANALYSIS_H_ #include "tensorflow/core/graph/graph.h" namespace tensorflow { class DeadnessAnalysis { public: struct DeadnessPredicate { public: DeadnessPredicate(const DeadnessPredicate&) = default; DeadnessPredicate(DeadnessPredicate&&) = default; DeadnessPredicate& operator=(const DeadnessPredicate&) = default; DeadnessPredicate& operator=(DeadnessPredicate&&) = default; bool operator==(const DeadnessPredicate& other) const { return other.pred_ == pred_; } bool operator!=(const DeadnessPredicate& other) const { return other.pred_ != pred_; } private: explicit DeadnessPredicate(void* pred) : pred_(pred) {} void* pred_; friend class DeadnessAnalysis; }; virtual absl::StatusOr<DeadnessPredicate> GetPredicateFor(Node* n, int oidx) const = 0; virtual void Print() const = 0; virtual ~DeadnessAnalysis(); string DebugString(DeadnessPredicate predicate) const; static Status Run(const Graph& graph, std::unique_ptr<DeadnessAnalysis>* result); protected: static DeadnessPredicate MakeDeadnessPredicate(void* pred) { return DeadnessPredicate(pred); } }; } #endif #include "tensorflow/compiler/jit/deadness_analysis.h" #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/strings/str_join.h" #include "absl/strings/string_view.h" #include "tensorflow/compiler/jit/deadness_analysis_internal.h" #include "tensorflow/compiler/jit/xla_cluster_util.h" #include "xla/status_macros.h" #include "tensorflow/core/framework/tensor.pb.h" #include "tensorflow/core/graph/algorithm.h" #include "tensorflow/core/graph/control_flow.h" #include "tensorflow/core/graph/graph_node_util.h" #include "tensorflow/core/graph/tensor_id.h" #include "tensorflow/core/lib/hash/hash.h" namespace tensorflow { namespace { using tsl::StatusOr; class Predicate { public: enum class Kind { kAnd, kOr, kNot, kAndRecurrence, kSymbol, kIntSymbol }; virtual string ToString() const = 0; int64_t id() const { return id_; } virtual absl::Span<Predicate* const> GetOperands() const = 0; virtual Kind kind() const = 0; virtual ~Predicate() {} template <typename FunctionTy> static void Visit(Predicate* p, const FunctionTy& func); protected: explicit Predicate(int64_t id) : id_(id) {} private: const int64_t id_; Predicate(const Predicate&) = delete; void operator=(const Predicate&) = delete; }; class AndPredicate : public Predicate { public: explicit AndPredicate(int64_t id, std::vector<Predicate*> operands) : Predicate(id), operands_(std::move(operands)) {} string ToString() const override { if (operands().empty()) { return "#true"; } std::vector<string> operands_str; std::transform(operands().begin(), operands().end(), std::back_inserter(operands_str), [](Predicate* pred) { return pred->ToString(); }); return absl::StrCat("(", absl::StrJoin(operands_str, " & "), ")"); } Kind kind() const override { return Kind::kAnd; } absl::Span<Predicate* const> GetOperands() const override { return operands_; } absl::Span<Predicate* const> operands() const { return operands_; } private: std::vector<Predicate*> operands_; }; class OrPredicate : public Predicate { public: explicit OrPredicate(int64_t id, std::vector<Predicate*> operands) : Predicate(id), operands_(std::move(operands)) {} string ToString() const override { if (operands().empty()) { return "#false"; } std::vector<string> operands_str; std::transform(operands().begin(), operands().end(), std::back_inserter(operands_str), [](Predicate* pred) { return pred->ToString(); }); return absl::StrCat("(", absl::StrJoin(operands_str, " | "), ")"); } Kind kind() const override { return Kind::kOr; } absl::Span<Predicate* const> GetOperands() const override { return operands_; } absl::Span<Predicate* const> operands() const { return operands_; } private: std::vector<Predicate*> operands_; }; class NotPredicate : public Predicate { public: explicit NotPredicate(int64_t id, Predicate* operand) : Predicate(id), operands_({operand}) {} string ToString() const override { return absl::StrCat("~", operand()->ToString()); } Kind kind() const override { return Kind::kNot; } Predicate* operand() const { return operands_[0]; } absl::Span<Predicate* const> GetOperands() const override { return operands_; } private: std::array<Predicate*, 1> operands_; }; class AndRecurrencePredicate : public Predicate { public: explicit AndRecurrencePredicate(int64_t id, Predicate* start, Predicate* step, std::vector<string> frame) : Predicate(id), operands_({start, step}), frame_(std::move(frame)) {} Predicate* start() const { return operands_[0]; } Predicate* step() const { return operands_[1]; } absl::Span<const string> frame() const { return frame_; } string ToString() const override { return absl::StrCat("{", start()->ToString(), ",&,", step()->ToString(), "}<", absl::StrJoin(frame(), ";"), ">"); } Kind kind() const override { return Kind::kAndRecurrence; } absl::Span<Predicate* const> GetOperands() const override { return operands_; } private: std::array<Predicate*, 2> operands_; std::vector<string> frame_; }; class SymbolPredicate : public Predicate { public: explicit SymbolPredicate(int64_t id, TensorId tensor_id, bool must_be_true) : Predicate(id), tensor_id_(std::move(tensor_id)), must_be_true_(must_be_true) {} string ToString() const override { return must_be_true() ? absl::StrCat("*", tensor_id_.ToString()) : tensor_id_.ToString(); } Kind kind() const override { return Kind::kSymbol; } absl::Span<Predicate* const> GetOperands() const override { return {}; } TensorId tensor_id() const { return tensor_id_; } bool must_be_true() const { return must_be_true_; } private: TensorId tensor_id_; bool must_be_true_; }; class IntSymbolPredicate : public Predicate { public: explicit IntSymbolPredicate(int64_t id, TensorId tensor_id, std::optional<int> must_have_value) : Predicate(id), tensor_id_(std::move(tensor_id)), must_have_value_(must_have_value) {} string ToString() const override { return must_have_value().has_value() ? absl::StrCat(tensor_id_.ToString(), "=", *must_have_value_) : tensor_id_.ToString(); } Kind kind() const override { return Kind::kIntSymbol; } absl::Span<Predicate* const> GetOperands() const override { return {}; } TensorId tensor_id() const { return tensor_id_; } const std::optional<int>& must_have_value() const { return must_have_value_; } private: TensorId tensor_id_; std::optional<int> must_have_value_; }; template <typename FunctionTy> void Predicate::Visit(Predicate* p, const FunctionTy& func) { absl::flat_hash_set<Predicate*> visited; std::vector<Predicate*> stack; stack.push_back(p); visited.insert(p); while (!stack.empty()) { Predicate* current = stack.back(); stack.pop_back(); bool done = func(current); if (done) { return; } for (Predicate* op : current->GetOperands()) { if (visited.insert(op).second) { stack.push_back(op); } } } } class PredicateFactory { public: Predicate* MakeAndPredicate(absl::Span<Predicate* const> operands) { return MakeAndOrImpl(operands, true); } Predicate* MakeOrPredicate(absl::Span<Predicate* const> operands) { return MakeAndOrImpl(operands, false); } Predicate* MakeNotPredicate(Predicate* pred) { auto it = make_not_predicate_cache_.find(pred); if (it != make_not_predicate_cache_.end()) { return it->second; } Predicate* result = MakeNotPredicateImpl(pred); bool insert_successful = make_not_predicate_cache_.insert({pred, result}).second; (void)insert_successful; DCHECK(insert_successful); return result; } Predicate* MakeAndRecurrencePredicate(Predicate* start, Predicate* step, std::vector<string> frame) { SignatureForAndRec signature(start, step, std::move(frame)); auto it = interned_and_rec_instances_.find(signature); if (it != interned_and_rec_instances_.end()) { return it->second.get(); } std::unique_ptr<Predicate> new_pred = Make<AndRecurrencePredicate>( std::get<0>(signature), std::get<1>(signature), std::get<2>(signature)); Predicate* new_pred_ptr = new_pred.get(); bool inserted = interned_and_rec_instances_.emplace(signature, std::move(new_pred)) .second; (void)inserted; DCHECK(inserted); return new_pred_ptr; } Status MakeSymbolPredicate(Node* node, int output_idx, bool must_be_true, Predicate** predicate) { TensorId tensor_id(node->name(), output_idx); bool is_boolean_tensor = BaseType(node->output_type(tensor_id.index())) == DT_BOOL; TF_RET_CHECK(!must_be_true || is_boolean_tensor); if (node->type_string() == "Const" && must_be_true) { const TensorProto* proto = nullptr; TF_RETURN_IF_ERROR(GetNodeAttr(node->def(), "value", &proto)); Tensor tensor(proto->dtype()); TF_RET_CHECK(tensor.FromProto(*proto)); *predicate = tensor.scalar<bool>()() ? MakeTrue() : MakeFalse(); return absl::OkStatus(); } SignatureForSymbol signature = {tensor_id, must_be_true}; auto it = interned_symbol_instances_.find(signature); if (it == interned_symbol_instances_.end()) { std::unique_ptr<Predicate> new_pred = Make<SymbolPredicate>(tensor_id, must_be_true); Predicate* new_pred_ptr = new_pred.get(); interned_symbol_instances_.emplace(std::move(signature), std::move(new_pred)); *predicate = new_pred_ptr; } else { *predicate = it->second.get(); } return absl::OkStatus(); } Status MakeSymbolPredicate(Node* node, int output_idx, std::optional<int> must_have_value, Predicate** predicate) { TensorId tensor_id(node->name(), output_idx); TF_RET_CHECK(BaseType(node->output_type(tensor_id.index())) == DT_INT32); if (must_have_value.has_value() && node->type_string() == "Const") { const TensorProto* proto = nullptr; TF_RETURN_IF_ERROR(GetNodeAttr(node->def(), "value", &proto)); Tensor tensor(proto->dtype()); TF_RET_CHECK(tensor.FromProto(*proto)); *predicate = tensor.scalar<int32>()() == *must_have_value ? MakeTrue() : MakeFalse(); return absl::OkStatus(); } SignatureForIntSymbol signature = {tensor_id, must_have_value}; auto it = interned_int_symbol_instances_.find(signature); if (it == interned_int_symbol_instances_.end()) { std::unique_ptr<Predicate> new_pred = Make<IntSymbolPredicate>(tensor_id, must_have_value); Predicate* new_pred_ptr = new_pred.get(); interned_int_symbol_instances_.emplace(std::move(signature), std::move(new_pred)); *predicate = new_pred_ptr; } else { *predicate = it->second.get(); } return absl::OkStatus(); } Predicate* MakeTrue() { return MakeAndPredicate({}); } Predicate* MakeFalse() { return MakeOrPredicate({}); } ~PredicateFactory() { DCHECK_EQ(stack_depth_, 0) << "Unnested IncrementStackDepth?"; } private: Predicate* MakeNotPredicateImpl(Predicate* pred) { IncrementStackDepth stack_frame(this); if (!stack_frame.HasOverflowed()) { if (Predicate* simplified = SimplifyUsingDeMorgan(pred)) { return simplified; } if (auto* not_pred = dynamic_cast<NotPredicate*>(pred)) { return not_pred->operand(); } } SignatureForNot signature = pred; auto it = interned_not_instances_.find(signature); if (it == interned_not_instances_.end()) { std::unique_ptr<Predicate> new_pred = Make<NotPredicate>(pred); Predicate* new_pred_ptr = new_pred.get(); interned_not_instances_.emplace(signature, std::move(new_pred)); return new_pred_ptr; } else { return it->second.get(); } } Predicate* SimplifyUsingDeMorgan(Predicate* pred) { Predicate::Kind kind = pred->kind(); if (kind == Predicate::Kind::kAnd || kind == Predicate::Kind::kOr) { std::vector<Predicate*> new_operands; absl::c_transform(pred->GetOperands(), std::back_inserter(new_operands), [&](Predicate* p) { return MakeNotPredicate(p); }); return kind == Predicate::Kind::kOr ? MakeAndPredicate(new_operands) : MakeOrPredicate(new_operands); } return nullptr; } template <typename PredicateT, typename... Args> std::unique_ptr<Predicate> Make(Args&&... args) { return std::unique_ptr<PredicateT>( new PredicateT(id_counter_++, std::forward<Args>(args)...)); } Predicate* MakeAndOrImpl(absl::Span<Predicate* const> operands, bool is_and); Predicate* MakeInternedAndOr(std::vector<Predicate*> simplified_ops, Predicate::Kind pred_kind); using SignatureForAndOr = std::pair<Predicate::Kind, absl::Span<Predicate* const>>; using SignatureForNot = Predicate*; using SignatureForAndRec = std::tuple<Predicate*, Predicate*, std::vector<string>>; using SignatureForSymbol = std::pair<SafeTensorId, bool>; using SignatureForIntSymbol = std::pair<SafeTensorId, std::optional<int32>>; struct HashSignatureForAndOr { size_t operator()(const SignatureForAndOr& signature) const { size_t hash = ::tensorflow::hash<Predicate::Kind>()(signature.first); for (Predicate* p : signature.second) { hash = Hash64Combine(hash, ::tensorflow::hash<Predicate*>()(p)); } return hash; } }; struct HashSignatureForSymbol { size_t operator()(const SignatureForSymbol& signature) const { return Hash64Combine(SafeTensorId::Hasher()(signature.first), ::tensorflow::hash<bool>()(signature.second)); } }; struct HashSignatureForIntSymbol { size_t operator()(const SignatureForIntSymbol& signature) const { return Hash64Combine( SafeTensorId::Hasher()(signature.first), Hash64Combine( ::tensorflow::hash<bool>()(signature.second.has_value()), ::tensorflow::hash<int32>()( signature.second.has_value() ? *signature.second : 0))); } }; class IncrementStackDepth { public: explicit IncrementStackDepth(PredicateFactory* parent) : parent_(parent) { parent_->stack_depth_++; } bool HasOverflowed() const { const int kMaxStackDepth = 8; return parent_->stack_depth_ >= kMaxStackDepth; } ~IncrementStackDepth() { parent_->stack_depth_--; } private: PredicateFactory* parent_; }; absl::flat_hash_map<Predicate*, Predicate*> make_not_predicate_cache_; absl::flat_hash_map<SignatureForAndOr, std::unique_ptr<Predicate>, HashSignatureForAndOr> interned_and_or_instances_; absl::flat_hash_map<SignatureForNot, std::unique_ptr<Predicate>> interned_not_instances_; absl::flat_hash_map<SignatureForAndRec, std::unique_ptr<Predicate>> interned_and_rec_instances_; absl::flat_hash_map<SignatureForSymbol, std::unique_ptr<Predicate>, HashSignatureForSymbol> interned_symbol_instances_; absl::flat_hash_map<SignatureForIntSymbol, std::unique_ptr<Predicate>, HashSignatureForIntSymbol> interned_int_symbol_instances_; int64_t id_counter_ = 0; int stack_depth_ = 0; }; Predicate* PredicateFactory::MakeInternedAndOr( std::vector<Predicate*> simplified_ops, Predicate::Kind pred_kind) { std::stable_sort( simplified_ops.begin(), simplified_ops.end(), [](Predicate* a, Predicate* b) { return a->id() < b->id(); }); auto it = interned_and_or_instances_.find({pred_kind, simplified_ops}); if (it != interned_and_or_instances_.end()) { return it->second.get(); } simplified_ops.shrink_to_fit(); absl::Span<Predicate* const> operands_slice = simplified_ops; std::unique_ptr<Predicate> new_pred = pred_kind == Predicate::Kind::kAnd ? Make<AndPredicate>(std::move(simplified_ops)) : Make<OrPredicate>(std::move(simplified_ops)); Predicate* new_pred_ptr = new_pred.get(); interned_and_or_instances_.emplace( SignatureForAndOr(pred_kind, operands_slice), std::move(new_pred)); return new_pred_ptr; } Predicate* PredicateFactory::MakeAndOrImpl( absl::Span<Predicate* const> operands, bool is_and) { Predicate::Kind pred_kind = is_and ? Predicate::Kind::kAnd : Predicate::Kind::kOr; IncrementStackDepth stack_frame(this); if (stack_frame.HasOverflowed()) { return MakeInternedAndOr( std::vector<Predicate*>(operands.begin(), operands.end()), pred_kind); } Predicate::Kind other_pred_kind = is_and ? Predicate::Kind::kOr : Predicate::Kind::kAnd; absl::flat_hash_set<Predicate*> simplified_ops_set; std::vector<Predicate*> simplified_ops; for (Predicate* op : operands) { if (!simplified_ops_set.insert(op).second) { continue; } if (op->kind() == pred_kind) { for (Predicate* subop : op->GetOperands()) { if (simplified_ops_set.insert(subop).second) { simplified_ops.push_back(subop); } } } else { simplified_ops.push_back(op); } } if (simplified_ops.size() == 1) { return simplified_ops[0]; } absl::flat_hash_set<Predicate*> negated_ops; for (Predicate* op : simplified_ops) { if (negated_ops.count(op)) { return is_and ? MakeFalse() : MakeTrue(); } Predicate* negated_op = MakeNotPredicate(op); if (negated_op->kind() == pred_kind) { if (absl::c_all_of(negated_op->GetOperands(), [&](Predicate* p) { return simplified_ops_set.contains(p); })) { return is_and ? MakeFalse() : MakeTrue(); } } negated_ops.insert(negated_op); } if (is_and) { absl::flat_hash_set<Predicate*> to_remove; std::vector<Predicate*> to_add; for (Predicate* op : simplified_ops) { if (op->kind() == Predicate::Kind::kAndRecurrence) { auto* and_rec = static_cast<AndRecurrencePredicate*>(op); if (negated_ops.contains(and_rec->step())) { to_remove.insert(and_rec); to_remove.insert(MakeNotPredicate(and_rec->step())); to_add.push_back(and_rec->start()); } } } auto it = simplified_ops.begin(); while (it != simplified_ops.end()) { if (to_remove.contains(*it)) { it = simplified_ops.erase(it); } else { ++it; } } simplified_ops.insert(simplified_ops.end(), to_add.begin(), to_add.end()); } std::vector<Predicate*> common_inner_operands; absl::flat_hash_set<Predicate*> common_inner_operands_set; for (Predicate* op : simplified_ops) { if (op->kind() != other_pred_kind) { common_inner_operands.clear(); break; } if (common_inner_operands.empty()) { common_inner_operands.insert(common_inner_operands.end(), op->GetOperands().begin(), op->GetOperands().end()); } else { common_inner_operands.clear(); absl::c_copy_if(op->GetOperands(), std::back_inserter(common_inner_operands), [&](Predicate* sub_op) { return common_inner_operands_set.count(sub_op) == 1; }); } if (common_inner_operands.empty()) break; common_inner_operands_set.clear(); common_inner_operands_set.insert(common_inner_operands.begin()
#include "tensorflow/compiler/jit/deadness_analysis.h" #include "tensorflow/cc/framework/ops.h" #include "tensorflow/cc/ops/array_ops.h" #include "tensorflow/cc/ops/control_flow_ops_internal.h" #include "tensorflow/cc/ops/function_ops.h" #include "tensorflow/cc/ops/sendrecv_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/compiler/jit/deadness_analysis_internal.h" #include "tensorflow/compiler/jit/defs.h" #include "tensorflow/compiler/tf2xla/xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/framework/node_def_util.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/graph/algorithm.h" #include "tensorflow/core/graph/graph_def_builder.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace { absl::StatusOr<bool> HasInputsWithMismatchingDeadness( const DeadnessAnalysis& deadness_analysis, const Node& n) { std::optional<DeadnessAnalysis::DeadnessPredicate> pred; for (const Edge* edge : n.in_edges()) { TF_ASSIGN_OR_RETURN( DeadnessAnalysis::DeadnessPredicate this_pred, deadness_analysis.GetPredicateFor(edge->src(), edge->src_output())); if (pred && *pred != this_pred) { return true; } pred = this_pred; } return false; } using deadness_analysis_internal::ComputePredicates; using deadness_analysis_internal::PredicateMapTy; Status AnalyzeDeadness(Graph* graph, std::unique_ptr<DeadnessAnalysis>* result) { FixupSourceAndSinkEdges(graph); return DeadnessAnalysis::Run(*graph, result); } ops::Switch CreateSwitch(const Scope& root, const string& prefix) { Output value = ops::Placeholder(root.WithOpName(prefix + "/value"), DT_FLOAT); Output predicate = ops::Placeholder(root.WithOpName(prefix + "/pred"), DT_BOOL); return ops::Switch(root.WithOpName(prefix + "/switch"), value, predicate); } TensorId ControlOutputFor(const Output& o) { return {o.node()->name(), Graph::kControlSlot}; } void VLogGraphIfAsked(const Graph& graph) { if (VLOG_IS_ON(3)) { GraphDef graph_def; graph.ToGraphDef(&graph_def); string serialized; ::tensorflow::protobuf::TextFormat::PrintToString(graph_def, &serialized); LOG(INFO) << serialized; } } struct InductionVarInfo { Output induction_var; Output loop_cond; }; InductionVarInfo CreateInductionVariable(const Scope& root, const string& prefix, const string& frame_name, const Output& initial_value) { Output enter_initial_value = ops::internal::Enter( root.WithOpName(prefix + "/enter"), initial_value, frame_name); ops::Merge iv(root.WithOpName(prefix + "/iv"), {enter_initial_value, enter_initial_value}); Output increment_by = ops::Const(root.WithOpName(prefix + "/incr"), 1); Output final_value = ops::Const(root.WithOpName(prefix + "/final"), 10); Output loop_cond_expr = ops::Less(root.WithOpName(prefix + "/cond"), iv.output, final_value); ops::Switch latch(root.WithOpName(prefix + "/latch"), iv.output, loop_cond_expr); ops::internal::Exit exit(root.WithOpName(prefix + "/exit"), latch.output_false); Output iv_next = ops::Add(root.WithOpName(prefix + "/ivnext"), latch.output_true, increment_by); Output next_iteration = ops::NextIteration(root.WithOpName(prefix + "/next_iteration"), iv_next); CHECK(root.graph() ->UpdateEdge(next_iteration.node(), 0, iv.output.node(), 1) .ok()); root.graph()->AddControlEdge(iv.output.node(), increment_by.node()); root.graph()->AddControlEdge(iv.output.node(), final_value.node()); return {iv.output, loop_cond_expr}; } InductionVarInfo CreateInductionVariable(const Scope& root, const string& prefix, const string& frame_name, int32_t init) { return CreateInductionVariable( root, prefix, frame_name, ops::Const(root.WithOpName(prefix + "/init"), init)); } struct DependentInductionVar { Output induction_var; ops::Switch latch; }; DependentInductionVar CreateDependentLoopInvariantValue( const Scope& root, const string& prefix, const string& frame_name, const Output& loop_cond, const Output& value) { Output enter_value = ops::internal::Enter(root.WithOpName(prefix + "/enter"), value, frame_name); ops::Merge iv(root.WithOpName(prefix + "/iv"), {enter_value, enter_value}); ops::Switch latch(root.WithOpName(prefix + "/latch"), iv.output, loop_cond); ops::internal::Exit exit(root.WithOpName(prefix + "/exit"), latch.output_false); Output next_iteration = ops::NextIteration( root.WithOpName(prefix + "/next_iteration"), latch.output_true); CHECK(root.graph() ->UpdateEdge(next_iteration.node(), 0, iv.output.node(), 1) .ok()); return {iv.output, latch}; } DependentInductionVar CreateDependentLoopInvariantValue( const Scope& root, const string& prefix, const string& frame_name, const Output& loop_cond, int32_t value) { return CreateDependentLoopInvariantValue( root, prefix, frame_name, loop_cond, ops::Const(root.WithOpName(prefix + "/init"), value)); } TEST(DeadnessAnalysisTest, BasicPositive) { Scope root = Scope::NewRootScope().ExitOnError(); ops::Switch sw = CreateSwitch(root, "0"); Output add = ops::Add(root.WithOpName("add"), sw.output_true, sw.output_false); std::unique_ptr<DeadnessAnalysis> result; TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result)); TF_ASSERT_OK_AND_ASSIGN( bool has_inputs_with_mismatching_deadness, HasInputsWithMismatchingDeadness(*result, *add.node())); EXPECT_TRUE(has_inputs_with_mismatching_deadness); } TEST(DeadnessAnalysisTest, BasicNegative) { Scope root = Scope::NewRootScope().ExitOnError(); Output a = ops::Placeholder(root.WithOpName("a"), DT_FLOAT); Output b = ops::Placeholder(root.WithOpName("b"), DT_FLOAT); Output add = ops::Add(root.WithOpName("add"), a, b); std::unique_ptr<DeadnessAnalysis> result; TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result)); TF_ASSERT_OK_AND_ASSIGN( bool has_inputs_with_mismatching_deadness, HasInputsWithMismatchingDeadness(*result, *add.node())); EXPECT_FALSE(has_inputs_with_mismatching_deadness); } TEST(DeadnessAnalysisTest, AndIsCommutative) { Scope root = Scope::NewRootScope().ExitOnError(); ops::Switch sw_0 = CreateSwitch(root, "0"); ops::Switch sw_1 = CreateSwitch(root, "1"); Output a0 = ops::Add(root.WithOpName("a0"), sw_0.output_false, sw_1.output_false); Output a1 = ops::Add(root.WithOpName("a1"), sw_1.output_false, sw_0.output_false); Output b0 = ops::Add(root.WithOpName("b0"), sw_0.output_false, sw_1.output_true); Output b1 = ops::Add(root.WithOpName("b1"), sw_1.output_true, sw_0.output_false); Output live0 = ops::Add(root.WithOpName("live0"), a0, a1); Output live1 = ops::Add(root.WithOpName("live1"), b0, b1); Output halfdead0 = ops::Add(root.WithOpName("halfdead0"), a0, b0); Output halfdead1 = ops::Add(root.WithOpName("halfdead1"), a1, b1); std::unique_ptr<DeadnessAnalysis> result; TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result)); bool has_inputs_with_mismatching_deadness; TF_ASSERT_OK_AND_ASSIGN( has_inputs_with_mismatching_deadness, HasInputsWithMismatchingDeadness(*result, *live0.node())); EXPECT_FALSE(has_inputs_with_mismatching_deadness); TF_ASSERT_OK_AND_ASSIGN( has_inputs_with_mismatching_deadness, HasInputsWithMismatchingDeadness(*result, *live1.node())); EXPECT_FALSE(has_inputs_with_mismatching_deadness); TF_ASSERT_OK_AND_ASSIGN( has_inputs_with_mismatching_deadness, HasInputsWithMismatchingDeadness(*result, *halfdead0.node())); EXPECT_TRUE(has_inputs_with_mismatching_deadness); TF_ASSERT_OK_AND_ASSIGN( has_inputs_with_mismatching_deadness, HasInputsWithMismatchingDeadness(*result, *halfdead1.node())); EXPECT_TRUE(has_inputs_with_mismatching_deadness); } TEST(DeadnessAnalysisTest, AndIsAssociative) { Scope root = Scope::NewRootScope().ExitOnError(); ops::Switch sw_0 = CreateSwitch(root, "0"); ops::Switch sw_1 = CreateSwitch(root, "1"); ops::Switch sw_2 = CreateSwitch(root, "2"); Output a0 = ops::Add(root.WithOpName("a0"), sw_0.output_false, sw_1.output_false); Output a1 = ops::Add(root.WithOpName("a1"), a0, sw_2.output_false); Output b0 = ops::Add(root.WithOpName("b0"), sw_1.output_false, sw_2.output_false); Output b1 = ops::Add(root.WithOpName("b1"), sw_0.output_false, b0); Output add = ops::Add(root.WithOpName("add"), a1, b1); std::unique_ptr<DeadnessAnalysis> result; TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result)); TF_ASSERT_OK_AND_ASSIGN( bool has_inputs_with_mismatching_deadness, HasInputsWithMismatchingDeadness(*result, *add.node())); EXPECT_FALSE(has_inputs_with_mismatching_deadness); } TEST(DeadnessAnalysisTest, OrIsCommutative) { Scope root = Scope::NewRootScope().ExitOnError(); ops::Switch sw_0 = CreateSwitch(root, "0"); ops::Switch sw_1 = CreateSwitch(root, "1"); ops::Merge m0(root.WithOpName("m0"), {sw_0.output_false, sw_1.output_false}); ops::Merge m1(root.WithOpName("m1"), {sw_1.output_false, sw_0.output_false}); ops::Merge m2(root.WithOpName("m2"), {sw_0.output_false, sw_1.output_true}); ops::Merge m3(root.WithOpName("m3"), {sw_1.output_true, sw_0.output_false}); Output live0 = ops::Add(root.WithOpName("live0"), m0.output, m1.output); Output live1 = ops::Add(root.WithOpName("live1"), m2.output, m3.output); Output halfdead0 = ops::Add(root.WithOpName("halfdead0"), m0.output, m2.output); Output halfdead1 = ops::Add(root.WithOpName("halfdead1"), m1.output, m3.output); std::unique_ptr<DeadnessAnalysis> result; TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result)); bool has_inputs_with_mismatching_deadness; TF_ASSERT_OK_AND_ASSIGN( has_inputs_with_mismatching_deadness, HasInputsWithMismatchingDeadness(*result, *live0.node())); EXPECT_FALSE(has_inputs_with_mismatching_deadness); TF_ASSERT_OK_AND_ASSIGN( has_inputs_with_mismatching_deadness, HasInputsWithMismatchingDeadness(*result, *live1.node())); EXPECT_FALSE(has_inputs_with_mismatching_deadness); TF_ASSERT_OK_AND_ASSIGN( has_inputs_with_mismatching_deadness, HasInputsWithMismatchingDeadness(*result, *halfdead0.node())); EXPECT_TRUE(has_inputs_with_mismatching_deadness); TF_ASSERT_OK_AND_ASSIGN( has_inputs_with_mismatching_deadness, HasInputsWithMismatchingDeadness(*result, *halfdead1.node())); EXPECT_TRUE(has_inputs_with_mismatching_deadness); } TEST(DeadnessAnalysisTest, OrIsAssociative) { Scope root = Scope::NewRootScope().ExitOnError(); ops::Switch sw_0 = CreateSwitch(root, "0"); ops::Switch sw_1 = CreateSwitch(root, "1"); ops::Switch sw_2 = CreateSwitch(root, "2"); ops::Merge m0(root.WithOpName("m0"), {sw_0.output_false, sw_1.output_false}); ops::Merge m1(root.WithOpName("m1"), {m0.output, sw_2.output_false}); ops::Merge m2(root.WithOpName("m2"), {sw_1.output_false, sw_2.output_false}); ops::Merge m3(root.WithOpName("m3"), {sw_0.output_false, m2.output}); Output add = ops::Add(root.WithOpName("add"), m1.output, m3.output); std::unique_ptr<DeadnessAnalysis> result; TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result)); TF_ASSERT_OK_AND_ASSIGN( bool has_inputs_with_mismatching_deadness, HasInputsWithMismatchingDeadness(*result, *add.node())); EXPECT_FALSE(has_inputs_with_mismatching_deadness); } TEST(DeadnessAnalysisTest, AndOfOr) { Scope root = Scope::NewRootScope().ExitOnError(); ops::Switch sw_0 = CreateSwitch(root, "0"); ops::Switch sw_1 = CreateSwitch(root, "1"); ops::Switch sw_2 = CreateSwitch(root, "2"); ops::Switch sw_3 = CreateSwitch(root, "3"); ops::Merge m0(root.WithOpName("m0"), {sw_0.output_false, sw_1.output_false}); ops::Merge m1(root.WithOpName("m1"), {sw_2.output_false, sw_3.output_false}); Output add0 = ops::Add(root.WithOpName("add0"), m0.output, m1.output); Output add1 = ops::Add(root.WithOpName("add1"), m0.output, m1.output); Output add2 = ops::Add(root.WithOpName("add2"), add0, add1); std::unique_ptr<DeadnessAnalysis> result; TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result)); TF_ASSERT_OK_AND_ASSIGN( bool has_inputs_with_mismatching_deadness, HasInputsWithMismatchingDeadness(*result, *add2.node())); EXPECT_FALSE(has_inputs_with_mismatching_deadness); } TEST(DeadnessAnalysisTest, OrOfAnd) { Scope root = Scope::NewRootScope().ExitOnError(); ops::Switch sw_0 = CreateSwitch(root, "0"); ops::Switch sw_1 = CreateSwitch(root, "1"); ops::Switch sw_2 = CreateSwitch(root, "2"); ops::Switch sw_3 = CreateSwitch(root, "3"); Output add0 = ops::Add(root.WithOpName("add0"), sw_0.output_false, sw_1.output_false); Output add1 = ops::Add(root.WithOpName("add1"), sw_2.output_false, sw_3.output_false); ops::Merge m0(root.WithOpName("m0"), {add0, add1}); ops::Merge m1(root.WithOpName("m1"), {add0, add1}); Output add2 = ops::Add(root.WithOpName("add2"), m0.output, m1.output); std::unique_ptr<DeadnessAnalysis> result; TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result)); TF_ASSERT_OK_AND_ASSIGN( bool has_inputs_with_mismatching_deadness, HasInputsWithMismatchingDeadness(*result, *add2.node())); EXPECT_FALSE(has_inputs_with_mismatching_deadness); } TEST(DeadnessAnalysisTest, AndOrDistributiveSimplified) { Scope root = Scope::NewRootScope().ExitOnError(); ops::Switch sw_0 = CreateSwitch(root, "A"); ops::Switch sw_1 = CreateSwitch(root, "B"); Output add0 = ops::Add(root.WithOpName("and0"), sw_0.output_false, sw_1.output_true); Output add1 = ops::Add(root.WithOpName("and1"), sw_0.output_false, sw_1.output_false); ops::Merge or2(root.WithOpName("or2"), {add0, add1}); Output add3 = ops::Add(root.WithOpName("and3"), or2.output, sw_0.output_false); ops::Merge or4(root.WithOpName("or4"), {add3, sw_0.output_true}); std::unique_ptr<DeadnessAnalysis> result; TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result)); PredicateMapTy predicate_map; TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map)); EXPECT_EQ(predicate_map[ControlOutputFor(or4.output)], "#true"); } TEST(DeadnessAnalysisTest, AndOrDistributive) { Scope root = Scope::NewRootScope().ExitOnError(); ops::Switch sw_0 = CreateSwitch(root, "0"); ops::Switch sw_1 = CreateSwitch(root, "1"); ops::Switch sw_2 = CreateSwitch(root, "2"); ops::Merge m0(root.WithOpName("m0"), {sw_0.output_false, sw_1.output_false}); Output add0 = ops::Add(root.WithOpName("add0"), m0.output, sw_2.output_false); Output add1 = ops::Add(root.WithOpName("add1"), sw_0.output_false, sw_2.output_false); Output add2 = ops::Add(root.WithOpName("add2"), sw_1.output_false, sw_2.output_false); ops::Merge m1(root.WithOpName("m1"), {add1, add2}); Output add3 = ops::Add(root.WithOpName("add3"), add0, m1.output); std::unique_ptr<DeadnessAnalysis> result; TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result)); TF_ASSERT_OK_AND_ASSIGN( bool has_inputs_with_mismatching_deadness, HasInputsWithMismatchingDeadness(*result, *add3.node())); EXPECT_FALSE(has_inputs_with_mismatching_deadness); } TEST(DeadnessAnalysisTest, Ternary) { Scope root = Scope::NewRootScope().ExitOnError(); Output predicate = ops::Placeholder(root.WithOpName("predicate"), DT_BOOL); Output true_value = ops::Placeholder(root.WithOpName("true_value"), DT_FLOAT); Output false_value = ops::Placeholder(root.WithOpName("false_value"), DT_FLOAT); ops::Switch predicated_true(root.WithOpName("predicated_true"), true_value, predicate); ops::Switch predicated_false(root.WithOpName("predicated_false"), true_value, predicate); ops::Merge merge(root.WithOpName("ternary"), {predicated_true.output_true, predicated_false.output_false}); Output addend = ops::Placeholder(root.WithOpName("addend"), DT_FLOAT); Output add = ops::Add(root.WithOpName("add"), merge.output, addend); std::unique_ptr<DeadnessAnalysis> result; TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result)); TF_ASSERT_OK_AND_ASSIGN( bool has_inputs_with_mismatching_deadness, HasInputsWithMismatchingDeadness(*result, *add.node())); EXPECT_FALSE(has_inputs_with_mismatching_deadness); } TEST(DeadnessAnalysisTest, Recv) { Scope root = Scope::NewRootScope().ExitOnError(); Output recv_a = ops::_Recv(root.WithOpName("recv_a"), DT_FLOAT, "tensor_a", "sender", 0, "receiver"); Output recv_b = ops::_Recv(root.WithOpName("recv_b"), DT_FLOAT, "tensor_b", "sender", 0, "receiver"); Output add = ops::Add(root.WithOpName("add"), recv_a, recv_b); std::unique_ptr<DeadnessAnalysis> result; TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result)); TF_ASSERT_OK_AND_ASSIGN( bool has_inputs_with_mismatching_deadness, HasInputsWithMismatchingDeadness(*result, *add.node())); EXPECT_TRUE(has_inputs_with_mismatching_deadness); } TEST(DeadnessAnalysisTest, HostRecv) { Scope root = Scope::NewRootScope().ExitOnError(); Output recv_a = ops::_HostRecv(root.WithOpName("recv_a"), DT_FLOAT, "tensor_a", "sender", 0, "receiver"); Output recv_b = ops::_HostRecv(root.WithOpName("recv_b"), DT_FLOAT, "tensor_b", "sender", 0, "receiver"); Output add = ops::Add(root.WithOpName("add"), recv_a, recv_b); std::unique_ptr<DeadnessAnalysis> result; TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result)); TF_ASSERT_OK_AND_ASSIGN( bool has_inputs_with_mismatching_deadness, HasInputsWithMismatchingDeadness(*result, *add.node())); EXPECT_TRUE(has_inputs_with_mismatching_deadness); } TEST(DeadnessAnalysisTest, Loop) { Scope root = Scope::NewRootScope().ExitOnError(); Output iv0 = CreateInductionVariable(root, "iv0", "fr0", 0).induction_var; Output iv1 = CreateInductionVariable(root, "iv1", "fr0", 0).induction_var; Output iv2 = CreateInductionVariable(root, "iv2", "fr0", 1).induction_var; Output add0 = ops::Add(root.WithOpName("add0"), iv0, iv1); Output add1 = ops::Add(root.WithOpName("add1"), iv1, iv2); VLogGraphIfAsked(*root.graph()); { std::unique_ptr<DeadnessAnalysis> result; TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result)); bool has_inputs_with_mismatching_deadness; TF_ASSERT_OK_AND_ASSIGN( has_inputs_with_mismatching_deadness, HasInputsWithMismatchingDeadness(*result, *add0.node())); EXPECT_TRUE(has_inputs_with_mismatching_deadness); TF_ASSERT_OK_AND_ASSIGN( has_inputs_with_mismatching_deadness, HasInputsWithMismatchingDeadness(*result, *add1.node())); EXPECT_TRUE(has_inputs_with_mismatching_deadness); } { PredicateMapTy predicate_map; TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map)); EXPECT_EQ(predicate_map[ControlOutputFor(iv0)], "{#true,&,*iv0/cond:0}<fr0>"); EXPECT_EQ(predicate_map[ControlOutputFor(iv1)], "{#true,&,*iv1/cond:0}<fr0>"); EXPECT_EQ(predicate_map[ControlOutputFor(iv2)], "{#true,&,*iv2/cond:0}<fr0>"); EXPECT_EQ(predicate_map[ControlOutputFor(add0)], "({#true,&,*iv0/cond:0}<fr0> & {#true,&,*iv1/cond:0}<fr0>)"); EXPECT_EQ(predicate_map[ControlOutputFor(add1)], "({#true,&,*iv1/cond:0}<fr0> & {#true,&,*iv2/cond:0}<fr0>)"); } } TEST(DeadnessAnalysisTest, ControlEquivalentLoopBodies) { Scope root = Scope::NewRootScope().ExitOnError(); InductionVarInfo iv = CreateInductionVariable(root, "iv0", "loop", 0); Output dependent_iv0 = CreateDependentLoopInvariantValue(root, "div0", "loop", iv.loop_cond, 0) .induction_var; Output dependent_iv1 = CreateDependentLoopInvariantValue(root, "div1", "loop", iv.loop_cond, 0) .induction_var; Output add0 = ops::Add(root.WithOpName("add0"), dependent_iv0, dependent_iv1); VLogGraphIfAsked(*root.graph()); { std::unique_ptr<DeadnessAnalysis> result; TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result)); TF_ASSERT_OK_AND_ASSIGN( bool has_inputs_with_mismatching_deadness, HasInputsWithMismatchingDeadness(*result, *add0.node())); EXPECT_FALSE(has_inputs_with_mismatching_deadness); } { PredicateMapTy predicate_map; TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map, true)); EXPECT_EQ(predicate_map[ControlOutputFor(iv.induction_var)], "{#true,&,*iv0/cond:0}<loop>"); EXPECT_EQ(predicate_map[ControlOutputFor(dependent_iv0)], predicate_map[ControlOutputFor(iv.induction_var)]); EXPECT_EQ(predicate_map[ControlOutputFor(dependent_iv1)], predicate_map[ControlOutputFor(iv.induction_var)]); EXPECT_EQ(predicate_map[ControlOutputFor(add0)], predicate_map[ControlOutputFor(iv.induction_var)]); } { PredicateMapTy predicate_map; TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map, false)); EXPECT_EQ(predicate_map[ControlOutputFor(iv.induction_var)], "{#true,&,*iv0/cond:0}<loop>"); EXPECT_EQ(predicate_map[ControlOutputFor(dependent_iv0)], "{#true,&,(iv0/iv:0 & *iv0/cond:0)}<loop>"); EXPECT_EQ(predicate_map[ControlOutputFor(dependent_iv1)], "{#true,&,(iv0/iv:0 & *iv0/cond:0)}<loop>"); EXPECT_EQ(predicate_map[ControlOutputFor(add0)], "{#true,&,(iv0/iv:0 & *iv0/cond:0)}<loop>"); } } TEST(DeadnessAnalysisTest, LoopInvariantPredicateOnBackedge) { Scope root = Scope::NewRootScope().ExitOnError(); InductionVarInfo iv = CreateInductionVariable(root, "iv0", "frame", 0); DependentInductionVar dependent_iv = CreateDependentLoopInvariantValue(root, "div0", "frame", iv.loop_cond, 0); FixupSourceAndSinkEdges(root.graph()); TF_ASSERT_OK(root.graph()->UpdateEdge( iv.induction_var.node(), 0, dependent_iv.latch.output_true.node(), 0)); VLogGraphIfAsked(*root.graph()); { PredicateMapTy predicate_map; TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map, true)); EXPECT_EQ(predicate_map[ControlOutputFor(dependent_iv.induction_var)], "{#true,&,*iv0/cond:0}<frame>"); } { PredicateMapTy predicate_map; TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map, false)); EXPECT_EQ(predicate_map[ControlOutputFor(dependent_iv.induction_var)], "div0/iv:0"); } } TEST(DeadnessAnalysisTest, ControlEquivalentNestedLoopBodies) { Scope root = Scope::NewRootScope().ExitOnError(); InductionVarInfo iv_outer = CreateInductionVariable(root, "iv_outer", "outer_loop", 0); Output enter_constant_outer_loop = ops::internal::Enter( root.WithOpName("constant_enter_outer_loop"), ops::Const(root.WithOpName("constant"), 5), "outer_loop", ops::internal::Enter::Attrs().IsConstant(true)); ops::Switch inner_value(root.WithOpName("outer_is_live"), enter_constant_outer_loop, iv_outer.loop_cond); InductionVarInfo iv_inner = CreateInductionVariable( root, "iv_inner", "inner_loop", inner_value.output_true); Output dependent_outer_iv0 = CreateDependentLoopInvariantValue(root, "dependent_outer_iv0", "outer_loop", iv_outer.loop_cond, 0) .induction_var; Output dependent_outer_iv1 = CreateDependentLoopInvariantValue(root, "dependent_outer_iv1", "outer_loop", iv_outer.loop_cond, 0) .induction_var; Output dependent_inner_iv0 = CreateDependentLoopInvariantValue( root, "dependent_inner_iv0", "inner_loop", iv_inner.loop_cond, dependent_outer_iv0) .induction_var; Output dependent_inner_iv1 = CreateDependentLoopInvariantValue( root, "dependent_inner_iv1", "inner_loop", iv_inner.loop_cond, dependent_outer_iv1) .induction_var; Output add0 = ops::Add(root.WithOpName("add0"), dependent_inner_iv0, dependent_inner_iv1); VLogGraphIfAsked(*root.graph()); { std::unique_ptr<DeadnessAnalysis> result; TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result)); TF_ASSERT_OK_AND_ASSIGN( bool has_inputs_with_mismatching_deadness, HasInputsWithMismatchingDeadness(*result, *add0.node())); EXPECT_FALSE(has_inputs_with_mismatching_deadness); } { PredicateMapTy predicate_map; TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map, true)); EXPECT_EQ(predicate_map[ControlOutputFor(iv_outer.induction_var)], "{#true,&,*iv_outer/cond:0}<outer_loop>"); EXPECT_EQ(predicate_map[ControlOutputFor(iv_inner.induction_var)], "{(*iv_outer/cond:0 & " "{#true,&,*iv_outer/cond:0}<outer_loop>),&,*iv_inner/" "cond:0}<inner_loop;outer_loop>"); EXPECT_EQ(predicate_map[ControlOutputFor(dependent_inner_iv0)], "{{#true,&,(iv_outer/iv:0 & " "*iv_outer/cond:0)}<outer_loop>,&,(*iv_inner/cond:0 & " "iv_inner/iv:0)}<inner_loop;outer_loop>"); EXPECT_EQ(predicate_map[ControlOutputFor(dependent_inner_iv1)], predicate_map[ControlOutputFor(dependent_inner_iv0)]); EXPECT_EQ(predicate_map[ControlOutputFor(add0)], predicate_map[ControlOutputFor(dependent_inner_iv0)]); } { PredicateMapTy predicate_map; TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map, false)); EXPECT_EQ(predicate_map[ControlOutputFor(iv_outer.induction_var)], "{#true,&,*iv_outer/cond:0}<outer_loop>"); EXPECT_EQ(predicate_map[ControlOutputFor(iv_inner.induction_var)], "{(*iv_outer/cond:0 & " "{#true,&,*iv_outer/cond:0}<outer_loop>),&,*iv_inner/" "cond:0}<inner_loop;outer_loop>"); EXPECT_EQ(predicate_map[ControlOutputFor(dependent_inner_iv0)], "{{#true,&,(iv_outer/iv:0 & " "*iv_outer/cond:0)}<outer_loop>,&,(iv_inner/iv:0 & " "*iv_inner/cond:0)}<inner_loop;outer_loop>"); EXPECT_EQ(predicate_map[ControlOutputFor(dependent_inner_iv1)], predicate_map[ControlOutputFor(dependent_inner_iv0)]); EXPECT_EQ(predicate_map[ControlOutputFor(add0)], predicate_map[ControlOutputFor(dependent_inner_iv0)]); } } TEST(DeadnessAnalysisTest, ControlNonEquivalentNestedLoopBodies) { Scope root = Scope::NewRootScope().ExitOnError(); std::array<Output, 2> outer_iv; std::array<Output, 2> inner_iv; for (int i : {0, 1}) { InductionVarInfo iv_outer = CreateInductionVariable(root, "iv_outer", "outer_loop", 0); Output enter_constant_outer_loop = ops::internal::Enter( root.WithOpName("constant_enter_outer_loop"), ops::Const(root.WithOpName("constant"), 5), "outer_loop", ops::internal::Enter::Attrs().IsConstant(true)); ops::Switch inner_value(root.WithOpName("outer_is_live"), enter_constant_outer_loop, iv_outer.loop_cond); InductionVarInfo iv_inner = CreateInductionVariable( root, "iv_inner", "inner_loop", inner_value.output_true); outer_iv[i] = iv_outer.induction_var; inner_iv[i] = iv_inner.induction_var; } Output add0 = ops::Add(root.WithOpName("add0"), inner_iv[0], inner_iv[1]); VLogGraphIfAsked(*root.graph()); { std::unique_ptr<DeadnessAnalysis> result; TF_ASSERT_OK(AnalyzeDeadness(root.graph(), &result)); TF_ASSERT_OK_AND_ASSIGN( bool has_inputs_with_mismatching_deadness, HasInputsWithMismatch
1,084
cpp
tensorflow/tensorflow
mark_for_compilation_pass
tensorflow/compiler/jit/mark_for_compilation_pass.cc
tensorflow/compiler/jit/mark_for_compilation_pass_test.cc
#ifndef TENSORFLOW_COMPILER_JIT_MARK_FOR_COMPILATION_PASS_H_ #define TENSORFLOW_COMPILER_JIT_MARK_FOR_COMPILATION_PASS_H_ #include "absl/container/flat_hash_set.h" #include "tensorflow/compiler/jit/compilability_check_util.h" #include "tensorflow/core/common_runtime/optimization_registry.h" namespace tensorflow { extern const char* const kXlaClusterAttr; class MarkForCompilationPass : public GraphOptimizationPass { public: MarkForCompilationPass() = default; Status Run(const GraphOptimizationPassOptions& options) override; private: Status RunForTest(const GraphOptimizationPassOptions& options, bool disable_deadness_analysis, bool deterministic_cluster_names); friend class MarkForCompilationPassTestHelper; }; absl::flat_hash_map<string, std::vector<string>>* GetAllowlistTable(); namespace testing { void ResetClusterSequenceNumber(); absl::flat_hash_set<string> GetKnownXLAAllowlistOp(); } } #endif #include "tensorflow/compiler/jit/mark_for_compilation_pass.h" #include <algorithm> #include <atomic> #include <deque> #include <iterator> #include <limits> #include <map> #include <memory> #include <optional> #include <set> #include <string> #include <unordered_map> #include <unordered_set> #include <utility> #include <vector> #include "absl/base/call_once.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/status/statusor.h" #include "absl/strings/str_join.h" #include "tensorflow/compiler/jit/compilability_check_util.h" #include "tensorflow/compiler/jit/deadness_analysis.h" #include "tensorflow/compiler/jit/defs.h" #include "tensorflow/compiler/jit/device_util.h" #include "tensorflow/compiler/jit/flags.h" #include "tensorflow/compiler/jit/resource_operation_safety_analysis.h" #include "tensorflow/compiler/jit/xla_cluster_util.h" #include "tensorflow/compiler/tf2xla/const_analysis.h" #include "tensorflow/compiler/tf2xla/resource_operation_table.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/service/graphcycles/graphcycles.h" #include "xla/union_find.h" #include "xla/util.h" #include "tensorflow/core/common_runtime/function.h" #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/framework/bounds_check.h" #include "tensorflow/core/framework/graph_def_util.h" #include "tensorflow/core/framework/memory_types.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/tensor.pb.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/graph/algorithm.h" #include "tensorflow/core/graph/control_flow.h" #include "tensorflow/core/lib/gtl/cleanup.h" #include "tensorflow/core/lib/gtl/flatmap.h" #include "tensorflow/core/lib/strings/stringprintf.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/mutex.h" #include "tensorflow/core/platform/statusor.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/public/version.h" #include "tensorflow/core/util/dump_graph.h" namespace tensorflow { namespace { using DeadnessPredicate = DeadnessAnalysis::DeadnessPredicate; using jit::DeviceId; using jit::DeviceSet; const char* kXlaAlreadyClustered = "_XlaAlreadyClustered"; class MarkForCompilationPassImpl { public: struct DebugOptions { bool ignore_deadness_checks; bool ignore_resource_variable_checks; bool ignore_xla_compile_attr; bool deterministic_cluster_names; int max_cluster_size; int min_cluster_size; std::atomic<int64_t>* fuel; bool dump_graphs; }; MarkForCompilationPassImpl(DebugOptions debug_options, Graph* graph, FunctionLibraryDefinition* flib_def, Env* env, OptimizerOptions::GlobalJitLevel global_jit_level, bool cpu_global_jit, std::string cluster_name_prefix) : debug_options_(debug_options), graph_(graph), graph_fingerprint_(0), flib_def_(flib_def), env_(env), global_jit_level_(global_jit_level), cpu_global_jit_(cpu_global_jit), cluster_name_prefix_(cluster_name_prefix) {} Status Run(); private: class Cluster { public: Cluster(int tf_graph_node_id, int effective_cluster_size, bool has_functional_control_flow, DeviceSet devices, std::optional<DeviceId> resource_op_device, std::optional<int> resource_var_operation_node_id, std::optional<DeadnessPredicate> deadness_predicate, bool is_xla_compile_attr_true, std::optional<string> xla_scope) : cycles_graph_node_id_(tf_graph_node_id), effective_cluster_size_(effective_cluster_size), has_functional_control_flow_(has_functional_control_flow), devices_(std::move(devices)), resource_op_device_(resource_op_device), deadness_predicate_(deadness_predicate), is_xla_compile_attr_true_(is_xla_compile_attr_true), xla_scope_(std::move(xla_scope)) { if (resource_var_operation_node_id.has_value()) { resource_var_operation_node_ids_.push_back( *resource_var_operation_node_id); } } void Merge(Cluster* other); int GetIdOfOnlyNode() const { DCHECK_EQ(cluster_size(), 1); return cycles_graph_node_id(); } int cluster_size() const { return cluster_size_; } int cycles_graph_node_id() const { return cycles_graph_node_id_; } void set_cycles_graph_node_id(int cycles_graph_node_id) { cycles_graph_node_id_ = cycles_graph_node_id; } int effective_cluster_size() const { return effective_cluster_size_; } bool has_functional_control_flow() const { return has_functional_control_flow_; } const DeviceSet& devices() const { return devices_; } const std::optional<DeviceId>& resource_op_device() const { return resource_op_device_; } const std::optional<DeadnessPredicate>& deadness_predicate() const { return deadness_predicate_; } bool is_xla_compile_attr_true() const { return is_xla_compile_attr_true_; } const std::optional<string>& xla_scope() const { return xla_scope_; } absl::Span<const int> resource_var_operation_node_ids() const { return resource_var_operation_node_ids_; } string DebugString(const Graph& graph) const { Node* node = graph.FindNodeId(cycles_graph_node_id()); if (!node) { return absl::StrCat("NULL NODE IN #", cycles_graph_node_id()); } if (cluster_size() == 1) { return absl::StrCat("<", node->name(), " #", cycles_graph_node_id(), ">"); } return absl::StrCat("<", node->name(), " + ", cluster_size() - 1, " others #", cycles_graph_node_id(), ">"); } private: int cluster_size_ = 1; int cycles_graph_node_id_; int effective_cluster_size_; bool has_functional_control_flow_; DeviceSet devices_; std::optional<DeviceId> resource_op_device_; std::optional<DeadnessPredicate> deadness_predicate_; bool is_xla_compile_attr_true_; std::optional<string> xla_scope_; std::vector<int> resource_var_operation_node_ids_; Cluster(const Cluster&) = delete; void operator=(const Cluster&) = delete; }; Node* GetOnlyNodeIn(const Cluster& cluster); bool IsSinkLike(const Cluster& cluster); bool IsScalarIntegerResourceOperation(const Cluster& cluster); absl::StatusOr<bool> Initialize(); template <typename FnTy> absl::StatusOr<bool> ForEachEdgeInPostOrder(FnTy fn); Status RunEdgeContractionLoop(); Status DeclusterNodes(); Status CreateClusters(); Status DumpDebugInfo(); bool IsCompilationCandidate(Node* n) const { return compilation_candidates_.find(n) != compilation_candidates_.end(); } absl::StatusOr<bool> TryToContractEdge(Cluster* from, Cluster* to); Status FindCompilationCandidates(); bool CompilationDisallowedByXlaCompileAttr(Node* node); Status BuildInitialClusterSet(); absl::StatusOr<bool> ShouldCompileClusterImpl(const Cluster& cluster); absl::StatusOr<bool> ShouldCompileCluster(const Cluster& cluster); absl::StatusOr<bool> ClusteringWillIntroduceInterDeviceDependency( const Cluster& from, const Cluster& to); bool ShouldCompile(bool is_xla_compile_attr_true, const DeviceType& device_type, XlaOpRegistry::AutoclusteringPolicy policy) { return is_xla_compile_attr_true || policy == XlaOpRegistry::AutoclusteringPolicy::kAlways || (policy == XlaOpRegistry::AutoclusteringPolicy::kIfEnabledGlobally && global_jit_level_ != OptimizerOptions::OFF) || (device_type.type_string() == DEVICE_CPU && policy == XlaOpRegistry::AutoclusteringPolicy::kIfExplicitlyRequested && cpu_global_jit_); } absl::StatusOr<bool> AreDevicesCompatible(const Cluster& cluster_a, const Cluster& cluster_b); void DumpPostClusteringGraphs(); void VLogClusteringSummary(); Cluster* MakeNewCluster(int cycles_graph_node_id, int effective_cluster_size, bool has_functional_control_flow, const DeviceSet& device_set, std::optional<DeviceId> resource_op_device, std::optional<int> resource_var_operation_node_id, std::optional<DeadnessPredicate> deadness_predicate, bool is_xla_compile_attr_true, std::optional<string> xla_scope) { cluster_storage_.push_back(std::make_unique<Cluster>( cycles_graph_node_id, effective_cluster_size, has_functional_control_flow, device_set, resource_op_device, resource_var_operation_node_id, deadness_predicate, is_xla_compile_attr_true, xla_scope)); return cluster_storage_.back().get(); } std::optional<string> GetXlaScope(Node* n); Cluster* GetClusterForNode(Node* n) { return cluster_for_node_[n->id()].Get(); } Cluster* GetClusterForCyclesGraphNode(int node_id) { if (node_id >= graph_->num_node_ids() || graph_->FindNodeId(node_id) == nullptr) { return nullptr; } Cluster* cluster = cluster_for_node_[node_id].Get(); if (cluster) { DCHECK_EQ(cluster->cycles_graph_node_id(), node_id); } return cluster; } bool LogNotContractableAndReturnFalse(Cluster* from, Cluster* to, absl::string_view reason); std::vector<int> FindAlternatePathForDebugging(int from, int to); string DebugStringForCyclesGraphNode(int node_id, bool* found_unclustered); string DescribePotentialCycle(int from, int to); bool MergeClusters(Cluster* cluster_from, Cluster* cluster_to) { int from = cluster_from->cycles_graph_node_id(); int to = cluster_to->cycles_graph_node_id(); auto optional_merged_node = cycles_graph_.ContractEdge(from, to); if (!optional_merged_node.has_value()) { VLOG(3) << "Could not contract " << cluster_from->DebugString(*graph_) << " -> " << cluster_to->DebugString(*graph_) << " because contracting the edge would create a cycle via " << DescribePotentialCycle(from, to) << "."; return false; } cluster_from->Merge(cluster_to); cluster_from->set_cycles_graph_node_id(optional_merged_node.value()); cluster_for_node_[from].Merge(&cluster_for_node_[to]); return true; } string EdgeContractionFailureMsg(Cluster* from, Cluster* to, absl::string_view reason) { return absl::StrCat("Could not contract ", from->DebugString(*graph_), " -> ", to->DebugString(*graph_), " because ", reason, "."); } DebugOptions debug_options_; Graph* graph_; uint64 graph_fingerprint_; FunctionLibraryDefinition* flib_def_; Env* env_; OptimizerOptions::GlobalJitLevel global_jit_level_; bool cpu_global_jit_; const std::string cluster_name_prefix_; absl::flat_hash_map<const Cluster*, bool> should_compile_cluster_cache_; jit::DeviceInfoCache device_info_cache_; bool initialized_ = false; bool edges_contracted_ = false; bool clusters_created_ = false; std::vector<std::unique_ptr<Cluster>> cluster_storage_; std::vector<UnionFind<Cluster*>> cluster_for_node_; absl::flat_hash_set<const Node*> declustered_nodes_; GraphCycles cycles_graph_; OrderedNodeSet compilation_candidates_; std::unique_ptr<DeadnessAnalysis> deadness_analysis_; int64_t iteration_count_ = 0; absl::flat_hash_set<std::pair<int, int>> unsafe_resource_deps_; }; std::vector<int> MarkForCompilationPassImpl::FindAlternatePathForDebugging( int from, int to) { std::vector<int> rpo = cycles_graph_.AllNodesInPostOrder(); absl::c_reverse(rpo); absl::flat_hash_map<int, int> best_pred_for_node; best_pred_for_node[from] = -1; int rpo_index = 0, current_rpo_node; do { current_rpo_node = rpo[rpo_index++]; std::optional<int> some_pred, preferred_pred; for (int pred : cycles_graph_.Predecessors(current_rpo_node)) { if (!best_pred_for_node.contains(pred)) { continue; } if (current_rpo_node == to && pred == from) { continue; } some_pred = pred; if (GetClusterForCyclesGraphNode(pred) == nullptr) { preferred_pred = pred; } } if (some_pred || preferred_pred) { best_pred_for_node[current_rpo_node] = preferred_pred.has_value() ? *preferred_pred : *some_pred; } } while (current_rpo_node != to); auto get_best_pred = [&](int n) { auto it = best_pred_for_node.find(n); CHECK(it != best_pred_for_node.end()); return it->second; }; std::vector<int> path; int current_path_node = get_best_pred(to); while (current_path_node != from) { path.push_back(current_path_node); current_path_node = get_best_pred(current_path_node); } absl::c_reverse(path); return path; } string MarkForCompilationPassImpl::DebugStringForCyclesGraphNode( int cycles_graph_node_id, bool* found_unclustered) { Cluster* cluster = GetClusterForCyclesGraphNode(cycles_graph_node_id); if (cluster) { return cluster->DebugString(*graph_); } *found_unclustered = true; if (cycles_graph_node_id >= graph_->num_node_ids()) { return absl::StrCat("<oob #", cycles_graph_node_id, ">"); } Node* node = graph_->FindNodeId(cycles_graph_node_id); if (!node) { return absl::StrCat("<bad #", cycles_graph_node_id, ">"); } return node->name(); } string MarkForCompilationPassImpl::DescribePotentialCycle(int from, int to) { std::vector<string> path_str; bool found_unclustered = false; absl::c_transform(FindAlternatePathForDebugging(from, to), std::back_inserter(path_str), [&](int node_id) { return DebugStringForCyclesGraphNode(node_id, &found_unclustered); }); return absl::StrCat(!found_unclustered ? "(all clusters) " : "", "[", absl::StrJoin(path_str, ","), "]"); } void MarkForCompilationPassImpl::Cluster::Merge(Cluster* other) { cluster_size_ += other->cluster_size_; effective_cluster_size_ += other->effective_cluster_size_; has_functional_control_flow_ |= other->has_functional_control_flow_; devices_.UnionWith(other->devices_); DCHECK(!(resource_op_device_.has_value() && other->resource_op_device_.has_value()) || *resource_op_device_ == *other->resource_op_device_) << "AreDevicesCompatible should have returned false otherwise!"; if (!resource_op_device_.has_value()) { resource_op_device_ = other->resource_op_device_; } is_xla_compile_attr_true_ |= other->is_xla_compile_attr_true_; if (!xla_scope_.has_value()) { xla_scope_ = std::move(other->xla_scope_); } resource_var_operation_node_ids_.reserve( resource_var_operation_node_ids_.size() + other->resource_var_operation_node_ids_.size()); absl::c_copy(other->resource_var_operation_node_ids_, std::back_inserter(resource_var_operation_node_ids_)); other->resource_var_operation_node_ids_.clear(); } Status IgnoreResourceOpForSafetyAnalysis( jit::DeviceInfoCache* device_info_cache, const Node& n, bool* ignore) { if (n.assigned_device_name().empty()) { *ignore = false; return absl::OkStatus(); } TF_ASSIGN_OR_RETURN( const XlaOpRegistry::DeviceRegistration* registration, device_info_cache->GetCompilationDevice(n.assigned_device_name())); if (!registration) { *ignore = true; } else { *ignore = registration->cluster_resource_variable_ops_unsafely; } return absl::OkStatus(); } absl::StatusOr<bool> MarkForCompilationPassImpl::Initialize() { TF_RET_CHECK(!initialized_ && !edges_contracted_ && !clusters_created_); initialized_ = true; TF_RETURN_IF_ERROR(FindCompilationCandidates()); if (compilation_candidates_.empty()) { VLOG(2) << "No compilable candidates"; return false; } TF_ASSIGN_OR_RETURN(bool cycle_detection_graph_ok, CreateCycleDetectionGraph(graph_, &cycles_graph_)); if (!cycle_detection_graph_ok) { VLOG(2) << "Could not form cycle detection graph"; return false; } if (!debug_options_.ignore_deadness_checks) { XLA_SCOPED_LOGGING_TIMER_LEVEL("DeadnessAnalysis", 1); TF_RETURN_IF_ERROR(DeadnessAnalysis::Run(*graph_, &deadness_analysis_)); } if (debug_options_.deterministic_cluster_names) { TF_ASSIGN_OR_RETURN(graph_fingerprint_, FingerprintGraph(*graph_)); } TF_RETURN_IF_ERROR(BuildInitialClusterSet()); return true; } template <typename FnTy> absl::StatusOr<bool> MarkForCompilationPassImpl::ForEachEdgeInPostOrder( FnTy fn) { bool changed = false; for (int32_t node : cycles_graph_.AllNodesInPostOrder()) { Cluster* cluster_from = GetClusterForCyclesGraphNode(node); if (!cluster_from) { continue; } std::vector<int32> successors_copy = cycles_graph_.SuccessorsCopy(cluster_from->cycles_graph_node_id()); for (int to : successors_copy) { iteration_count_++; Cluster* cluster_to = GetClusterForCyclesGraphNode(to); if (!cluster_to) { continue; } TF_ASSIGN_OR_RETURN(bool contracted_edge, fn(cluster_from, cluster_to)); changed |= contracted_edge; } } return changed; } Node* MarkForCompilationPassImpl::GetOnlyNodeIn(const Cluster& cluster) { return cluster.cluster_size() == 1 ? graph_->FindNodeId(cluster.GetIdOfOnlyNode()) : nullptr; } bool MarkForCompilationPassImpl::IsSinkLike(const Cluster& cluster) { if (Node* n = GetOnlyNodeIn(cluster)) { return n->type_string() == "NoOp" && n->out_edges().size() == 1 && (*n->out_edges().begin())->dst()->IsSink(); } return false; } bool MarkForCompilationPassImpl::IsScalarIntegerResourceOperation( const Cluster& cluster) { Node* n = GetOnlyNodeIn(cluster); if (!n) { return false; } if (n->type_string() != "AssignAddVariableOp" && n->type_string() != "AssignSubVariableOp") { return false; } DataType dtype; if (!TryGetNodeAttr(n->def(), "dtype", &dtype) || !DataTypeIsInteger(dtype)) { return false; } Node* const_input = nullptr; for (const Edge* e : n->in_edges()) { if (!e->IsControlEdge() && e->src()->IsConstant()) { const_input = e->src(); break; } } if (!const_input) { return false; } const TensorProto* proto = nullptr; if (!TryGetNodeAttr(const_input->def(), "value", &proto)) { return false; } return TensorShapeUtils::IsScalar(proto->tensor_shape()); } Status MarkForCompilationPassImpl::RunEdgeContractionLoop() { TF_RET_CHECK(initialized_ && !edges_contracted_ && !clusters_created_); edges_contracted_ = true;
#include "tensorflow/compiler/jit/mark_for_compilation_pass.h" #include <algorithm> #include <memory> #include <set> #include <string> #include <unordered_map> #include <utility> #include <vector> #include "absl/container/flat_hash_map.h" #include "absl/memory/memory.h" #include "absl/strings/match.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_split.h" #include "absl/strings/string_view.h" #include "tensorflow/cc/framework/ops.h" #include "tensorflow/cc/ops/array_ops.h" #include "tensorflow/cc/ops/control_flow_ops_internal.h" #include "tensorflow/cc/ops/function_ops.h" #include "tensorflow/cc/ops/functional_ops.h" #include "tensorflow/cc/ops/list_ops.h" #include "tensorflow/cc/ops/resource_variable_ops.h" #include "tensorflow/cc/ops/sendrecv_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/compiler/jit/defs.h" #include "tensorflow/compiler/jit/mark_for_compilation_pass_test_helper.h" #include "tensorflow/compiler/jit/node_matchers.h" #include "tensorflow/compiler/jit/xla_cluster_util.h" #include "tensorflow/compiler/tf2xla/xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/common_runtime/graph_def_builder_util.h" #include "tensorflow/core/framework/node_def_util.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/graph/algorithm.h" #include "tensorflow/core/graph/graph_def_builder.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/test.h" using ::tensorflow::testing::FindNodeByName; namespace tensorflow { namespace { static bool Initialized = [] { tensorflow::GetXlaDeviceFlags()->tf_xla_enable_xla_devices = true; return true; }(); REGISTER_OP("UncompilableNullary").Output("o: float"); REGISTER_OP("UncompilableUnary").Input("a: float").Output("o: float"); std::unordered_map<string, string> GetClusters(const Graph& graph) { std::unordered_map<string, string> ids; for (Node* node : graph.nodes()) { string cluster; if (TryGetNodeAttr(node->attrs(), kXlaClusterAttr, &cluster)) { CHECK(!cluster.empty()); ids[node->name()] = cluster; } } if (VLOG_IS_ON(2)) { VLOG(2) << "Clusters:"; for (const auto& p : ids) { VLOG(2) << " " << p.first << " -> " << p.second; } } return ids; } std::set<string> GetClusterNames(const Graph& graph) { std::set<string> names; for (Node* node : graph.nodes()) { string cluster; if (TryGetNodeAttr(node->attrs(), kXlaClusterAttr, &cluster)) { CHECK(!cluster.empty()); names.insert(cluster); } } return names; } absl::flat_hash_map<string, std::vector<string>> GetClusterSets( const Graph& g, std::vector<string>* cluster_names = nullptr) { CHECK(cluster_names == nullptr || cluster_names->empty()); absl::flat_hash_map<string, std::vector<string>> cluster_sets; for (const auto& p : GetClusters(g)) { cluster_sets[p.second].push_back(p.first); } for (auto& p : cluster_sets) { if (cluster_names != nullptr) { cluster_names->push_back(p.first); } std::sort(p.second.begin(), p.second.end()); } if (cluster_names != nullptr) { std::sort(cluster_names->begin(), cluster_names->end()); } return cluster_sets; } TEST(XlaCompilationTest, Chains) { std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); { GraphDefBuilder builder(GraphDefBuilder::kFailImmediately); Node* a = ops::SourceOp("UncompilableNullary", builder.opts().WithName("A")); Node* b = ops::UnaryOp("Relu", a, builder.opts().WithName("B")); Node* c = ops::UnaryOp("Relu", b, builder.opts().WithName("C")); Node* d = ops::UnaryOp("UncompilableUnary", c, builder.opts().WithName("D")); Node* e = ops::UnaryOp("Relu", d, builder.opts().WithName("E")); ops::UnaryOp("Relu", e, builder.opts().WithName("F")); TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get())); } TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph)); auto clusters = GetClusters(*graph); EXPECT_EQ(4, clusters.size()); EXPECT_EQ(clusters["B"], clusters["C"]); EXPECT_EQ(clusters["E"], clusters["F"]); EXPECT_NE(clusters["B"], clusters["E"]); EXPECT_TRUE(clusters.find("A") == clusters.cend()); EXPECT_TRUE(clusters.find("D") == clusters.cend()); } TEST(XlaCompilationTest, UncompilableCycles) { std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); { GraphDefBuilder builder(GraphDefBuilder::kFailImmediately); Node* a = ops::SourceOp("Const", builder.opts() .WithName("A") .WithAttr("dtype", DT_FLOAT) .WithAttr("value", Tensor())); Node* b = ops::UnaryOp("UncompilableUnary", a, builder.opts().WithName("B")); ops::BinaryOp("MatMul", a, b, builder.opts().WithName("C")); TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get())); } TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph)); auto clusters = GetClusters(*graph); EXPECT_TRUE(clusters.empty()); } TEST(XlaCompilationTest, CompilableCycles) { std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); { GraphDefBuilder builder(GraphDefBuilder::kFailImmediately); Node* a = ops::SourceOp("Const", builder.opts() .WithName("A") .WithAttr("dtype", DT_FLOAT) .WithAttr("value", Tensor())); Node* b = ops::UnaryOp("Relu", a, builder.opts().WithName("B")); ops::BinaryOp("MatMul", a, b, builder.opts().WithName("C")); TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get())); } TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph)); auto clusters = GetClusters(*graph); EXPECT_EQ(3, clusters.size()); EXPECT_EQ(clusters["A"], clusters["B"]); EXPECT_EQ(clusters["A"], clusters["C"]); } TEST(XlaCompilationTest, StringUnsupported) { std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); { GraphDefBuilder builder(GraphDefBuilder::kFailImmediately); Node* a = ops::SourceOp( "Const", builder.opts() .WithName("A") .WithAttr("dtype", DT_STRING) .WithAttr("value", Tensor(DT_STRING, TensorShape()))); Node* b = ops::UnaryOp("EncodeBase64", a, builder.opts().WithName("B")); ops::BinaryOp("StringSplit", a, b, builder.opts().WithName("C")); TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get())); } TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph)); auto clusters = GetClusters(*graph); EXPECT_TRUE(clusters.empty()); } TEST(XlaCompilationTest, WhereUnsupported) { std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); { GraphDefBuilder builder(GraphDefBuilder::kFailImmediately); Node* a = ops::SourceOp("Const", builder.opts() .WithName("A") .WithAttr("dtype", DT_INT32) .WithAttr("value", Tensor())); Node* b = ops::UnaryOp("Where", a, builder.opts().WithName("B")); ops::BinaryOp("Gather", b, a, builder.opts().WithName("C")); TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get())); } TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph)); auto clusters = GetClusters(*graph); EXPECT_TRUE(!clusters.empty()); } TEST(XlaCompilationTest, HalfSupported) { std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); { GraphDefBuilder builder(GraphDefBuilder::kFailImmediately); Tensor t(DT_HALF, TensorShape()); t.scalar<Eigen::half>()() = static_cast<Eigen::half>(0.0f); Node* a = ops::SourceOp("Const", builder.opts() .WithName("A") .WithAttr("dtype", DT_HALF) .WithAttr("value", t)); Node* b = ops::UnaryOp("Neg", a, builder.opts().WithName("B")); ops::BinaryOp("MatMul", a, b, builder.opts().WithName("C")); TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get())); } TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph)); auto clusters = GetClusters(*graph); EXPECT_FALSE(clusters.empty()); } TEST(XlaCompilationTest, PartitionedCallUnsupported) { FunctionDef compilable = FunctionDefHelper::Define( "CompilableFn", {"n_a:float", "n_b:float"}, {"n_c:float"}, {}, {{{"n_c"}, "Add", {"n_a", "n_b"}, {{"T", DT_FLOAT}}}}); FunctionDef uncompilable = FunctionDefHelper::Define("UncompilableFn", {"n_a:float"}, {"n_c:float"}, {}, {{{"n_c"}, "UncompilableUnary", {"n_a"}}}); FunctionDefLibrary flib; *flib.add_function() = compilable; *flib.add_function() = uncompilable; FunctionLibraryDefinition flib_def(OpRegistry::Global(), flib); std::unique_ptr<Graph> graph(new Graph(&flib_def)); Scope root = Scope::NewRootScope().ExitOnError(); Output a = ops::Placeholder(root.WithOpName("A"), DT_FLOAT); NameAttrList b_name_attr; b_name_attr.set_name("CompilableFn"); ops::PartitionedCall b(root.WithOpName("B"), {a, a}, {DT_FLOAT}, b_name_attr); NameAttrList c_name_attr; c_name_attr.set_name("UncompilableFn"); ops::PartitionedCall c(root.WithOpName("C"), {a}, {DT_FLOAT}, c_name_attr); Output d = ops::Add(root.WithOpName("D"), b.output.front(), c.output.front()); TF_ASSERT_OK(root.ToGraph(graph.get())); TF_ASSERT_OK( MarkForCompilationPassTestHelper::MarkForCompilation(&graph, &flib_def)); auto clusters = GetClusters(*graph); EXPECT_EQ(2, clusters.size()); EXPECT_FALSE(clusters["B"].empty()); EXPECT_TRUE(clusters["C"].empty()); EXPECT_EQ(clusters["B"], clusters["D"]); } TEST(XlaCompilationTest, FunctionCalls) { FunctionDef compilable = FunctionDefHelper::Define( "CompilableFn", {"n_a:float", "n_b:float"}, {"n_c:float"}, {}, {{{"n_c"}, "Add", {"n_a", "n_b"}, {{"T", DT_FLOAT}}}}); FunctionDef uncompilable = FunctionDefHelper::Define("UncompilableFn", {"n_a:float"}, {"n_c:float"}, {}, {{{"n_c"}, "UncompilableUnary", {"n_a"}}}); FunctionDef noinline = compilable; noinline.mutable_signature()->set_name("NoInlineFn"); AddAttr("_noinline", static_cast<bool>(true), noinline.mutable_attr()); FunctionDefLibrary flib; *flib.add_function() = compilable; *flib.add_function() = uncompilable; *flib.add_function() = noinline; FunctionLibraryDefinition flib_def(OpRegistry::Global(), flib); std::unique_ptr<Graph> graph(new Graph(&flib_def)); { GraphDefBuilder builder(GraphDefBuilder::kFailImmediately, &flib_def); Node* a = ops::SourceOp("UncompilableNullary", builder.opts().WithName("A")); Node* b = ops::BinaryOp("CompilableFn", a, a, builder.opts().WithName("B")); Node* c = ops::UnaryOp("Relu", b, builder.opts().WithName("C")); ops::UnaryOp("UncompilableFn", c, builder.opts().WithName("D")); ops::BinaryOp("NoInlineFn", c, c, builder.opts().WithName("E")); TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get())); } TF_ASSERT_OK( MarkForCompilationPassTestHelper::MarkForCompilation(&graph, &flib_def)); auto clusters = GetClusters(*graph); EXPECT_EQ(2, clusters.size()); EXPECT_FALSE(clusters["C"].empty()); EXPECT_EQ(clusters["C"], clusters["E"]); EXPECT_TRUE(clusters.find("A") == clusters.cend()); EXPECT_TRUE(clusters.find("B") == clusters.cend()); EXPECT_TRUE(clusters.find("D") == clusters.cend()); } TEST(XlaCompilationTest, CallXlaDeviceFuncWithResourceOp) { FunctionDef compilable = FunctionDefHelper::Define( "FnWithResourceOp", {"var:resource", "val:float"}, {"retval:float"}, {}, {{{"assign_op"}, "AssignVariableOp", {"var", "val"}, {{"dtype", DT_FLOAT}}}, {{"retval"}, "Identity", {"val"}, {{"T", DT_FLOAT}}, {"assign_op"}}}); FunctionDefLibrary flib; *flib.add_function() = compilable; FunctionLibraryDefinition flib_def(OpRegistry::Global(), flib); std::unique_ptr<Graph> graph(new Graph(&flib_def)); { GraphDefBuilder builder(GraphDefBuilder::kFailImmediately, &flib_def); Node* resource = ops::SourceOp("VarHandleOp", builder.opts() .WithName("varhandle") .WithAttr("dtype", DT_FLOAT) .WithAttr("shape", TensorShape({}))); Tensor const_tensor(DT_FLOAT, TensorShape({})); const_tensor.scalar<float>()() = 42.0f; Node* value = ops::SourceOp("Const", builder.opts() .WithName("const") .WithAttr("value", const_tensor) .WithAttr("dtype", DT_FLOAT)); Node* call = ops::BinaryOp("FnWithResourceOp", resource, value, builder.opts().WithName("A")); Node* tanh0 = ops::UnaryOp("Tanh", call, builder.opts().WithName("tanh0")); Node* tanh1 = ops::UnaryOp("Tanh", tanh0, builder.opts().WithName("tanh1")); ops::UnaryOp("Tanh", tanh1, builder.opts().WithName("tanh2")); TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get())); } string xla_cpu_device = "/job:worker/replica:0/task:0/device:XLA_CPU:0"; testing::FindNodeByName(graph.get(), "A") ->set_assigned_device_name(xla_cpu_device); testing::FindNodeByName(graph.get(), "tanh0") ->set_assigned_device_name(xla_cpu_device); testing::FindNodeByName(graph.get(), "tanh1") ->set_assigned_device_name(xla_cpu_device); testing::FindNodeByName(graph.get(), "tanh2") ->set_assigned_device_name(xla_cpu_device); TF_ASSERT_OK( MarkForCompilationPassTestHelper::MarkForCompilation(&graph, &flib_def)); auto clusters = GetClusters(*graph); EXPECT_NE(clusters["A"], ""); } static Status GradForUnaryCwise(FunctionDef* g, std::vector<FunctionDefHelper::Node> nodes) { for (auto& n : nodes) { if (n.attr.empty()) { n.attr = {{"T", DT_FLOAT}}; } } *g = FunctionDefHelper::Define( {"x: float", "dy: float"}, {"dx: float"}, {}, nodes); return absl::OkStatus(); } Status SupportedGrad(const AttrSlice& attrs, FunctionDef* g) { return GradForUnaryCwise(g, { {{"y"}, "Tanh", {"x"}}, {{"y2"}, "Square", {"y"}, {}, {"dy"}}, FunctionDefHelper::Const("one", 1.0f), {{"a"}, "Sub", {"one", "y2"}}, {{"dx"}, "Mul", {"dy", "a"}}, }); } REGISTER_OP_GRADIENT("Supported", SupportedGrad); Status UnsupportedGrad(const AttrSlice& attrs, FunctionDef* g) { return GradForUnaryCwise(g, { {{"y"}, "Tanh", {"x"}}, {{"y2"}, "UncompilableUnary", {"y"}, {}, {"dy"}}, FunctionDefHelper::Const("one", 1.0f), {{"a"}, "Sub", {"one", "y2"}}, {{"dx"}, "Mul", {"dy", "a"}}, }); } REGISTER_OP_GRADIENT("Unsupported", UnsupportedGrad); TEST(XlaCompilationTest, SymbolicGradients) { std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); { GraphDefBuilder builder(GraphDefBuilder::kFailImmediately); Node* a = ops::SourceOp("UncompilableNullary", builder.opts().WithName("A")); NodeBuilder b_builder("B", "SymbolicGradient", builder.opts().op_registry()); NameAttrList b_name_attr; b_name_attr.set_name("Supported"); b_builder.Attr("f", b_name_attr); b_builder.Attr("Tin", {DT_FLOAT, DT_FLOAT}); b_builder.Attr("Tout", {DT_FLOAT}); b_builder.Input({a, a}); Node* b = builder.opts().FinalizeBuilder(&b_builder); Node* c = ops::UnaryOp("Relu", b, builder.opts().WithName("C")); NodeBuilder d_builder("D", "SymbolicGradient", builder.opts().op_registry()); NameAttrList d_name_attr; d_name_attr.set_name("Unsupported"); d_builder.Attr("f", d_name_attr); d_builder.Attr("Tin", {DT_FLOAT, DT_FLOAT}); d_builder.Attr("Tout", {DT_FLOAT}); d_builder.Input({c, c}); builder.opts().FinalizeBuilder(&d_builder); TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get())); } TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph)); auto clusters = GetClusters(*graph); EXPECT_EQ(2, clusters.size()); EXPECT_FALSE(clusters["B"].empty()); EXPECT_EQ(clusters["B"], clusters["C"]); EXPECT_TRUE(clusters.find("A") == clusters.cend()); EXPECT_TRUE(clusters.find("D") == clusters.cend()); } TEST(XlaCompilationTest, Loops) { Scope root = Scope::NewRootScope().ExitOnError(); auto a = ops::Placeholder(root.WithOpName("A"), DT_FLOAT); auto b = ops::Placeholder(root.WithOpName("B"), DT_FLOAT); auto c = ops::Add(root.WithOpName("C"), a, b); auto enter = ops::internal::Enter(root, c, "aframe"); auto next_iter = ops::NextIteration(root, enter); auto exit = ops::internal::Exit(root, next_iter); auto d = ops::Add(root.WithOpName("D"), c, exit); std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); TF_EXPECT_OK(root.ToGraph(graph.get())); TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph)); auto clusters = GetClusters(*graph); EXPECT_EQ(0, clusters.size()); } TEST(XlaCompilationTest, CyclesWithAllDifferentScopesGlobalJitOverridden) { std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); { GraphDefBuilder builder(GraphDefBuilder::kFailImmediately); Node* a = ops::SourceOp("Const", builder.opts() .WithName("A") .WithAttr("dtype", DT_FLOAT) .WithAttr("value", Tensor()) .WithAttr(kXlaScopeAttr, "ScopeA")); Node* b = ops::UnaryOp( "Relu", a, builder.opts().WithName("B").WithAttr(kXlaScopeAttr, "ScopeB")); ops::BinaryOp( "MatMul", a, b, builder.opts().WithName("C").WithAttr(kXlaScopeAttr, "ScopeC")); TF_CHECK_OK(GraphDefBuilderToGraph(builder, graph.get())); } FunctionDefLibrary flib; FunctionLibraryDefinition flib_def(graph->op_registry(), flib); TF_ASSERT_OK( MarkForCompilationPassTestHelper::MarkForCompilation(&graph, &flib_def)); auto clusters = GetClusters(*graph); EXPECT_EQ(3, clusters.size()); EXPECT_EQ(clusters["A"], clusters["B"]); EXPECT_EQ(clusters["A"], clusters["C"]); } TEST(XlaCompilationTest, CyclesWithAllDifferentScopes) { std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); { GraphDefBuilder builder(GraphDefBuilder::kFailImmediately); Node* a = ops::SourceOp("Const", builder.opts() .WithName("A") .WithAttr("dtype", DT_FLOAT) .WithAttr("value", Tensor()) .WithAttr(kXlaScopeAttr, "ScopeA")); Node* b = ops::UnaryOp( "Relu", a, builder.opts().WithName("B").WithAttr(kXlaScopeAttr, "ScopeB")); ops::BinaryOp( "MatMul", a, b, builder.opts().WithName("C").WithAttr(kXlaScopeAttr, "ScopeC")); TF_CHECK_OK(GraphDefBuilderToGraph(builder, graph.get())); } TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation( &graph, MarkForCompilationPassTestHelper::Options().WithNoGlobalJit())); auto clusters = GetClusters(*graph); EXPECT_EQ(0, clusters.size()); } TEST(XlaCompilationTest, CyclesWithSplittingScopes) { std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); { GraphDefBuilder builder(GraphDefBuilder::kFailImmediately); Node* a = ops::SourceOp("Const", builder.opts() .WithName("A") .WithAttr("dtype", DT_FLOAT) .WithAttr("value", Tensor()) .WithAttr(kXlaCompileAttr, true) .WithAttr(kXlaScopeAttr, "Scope1")); Node* b = ops::UnaryOp("Relu", a, builder.opts() .WithName("B") .WithAttr(kXlaCompileAttr, true) .WithAttr(kXlaScopeAttr, "Scope1")); Node* c = ops::BinaryOp("MatMul", a, b, builder.opts() .WithName("C") .WithAttr(kXlaCompileAttr, true) .WithAttr(kXlaScopeAttr, "Scope2")); ops::BinaryOp("Add", b, c, builder.opts() .WithName("D") .WithAttr(kXlaCompileAttr, true) .WithAttr(kXlaScopeAttr, "Scope2")); TF_CHECK_OK(GraphDefBuilderToGraph(builder, graph.get())); } TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation( &graph, MarkForCompilationPassTestHelper::Options().WithNoGlobalJit())); auto clusters = GetClusters(*graph); EXPECT_EQ(4, clusters.size()); EXPECT_EQ(clusters["A"], clusters["B"]); EXPECT_NE(clusters["A"], clusters["C"]); EXPECT_EQ(clusters["C"], clusters["D"]); } TEST(XlaCompilationTest, CyclesWithDifferentScopesAndBridge) { std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); { GraphDefBuilder builder(GraphDefBuilder::kFailImmediately); Node* a = ops::SourceOp("Const", builder.opts() .WithName("A") .WithAttr("dtype", DT_FLOAT) .WithAttr("value", Tensor()) .WithAttr(kXlaCompileAttr, true) .WithAttr(kXlaScopeAttr, "ScopeA")); Node* b = ops::UnaryOp("Relu", a, builder.opts() .WithName("B") .WithAttr(kXlaCompileAttr, true) .WithAttr(kXlaScopeAttr, "ScopeB")); ops::BinaryOp("MatMul", a, b, builder.opts().WithName("C")); TF_CHECK_OK(GraphDefBuilderToGraph(builder, graph.get())); } TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation( &graph, MarkForCompilationPassTestHelper::Options().WithNoGlobalJit())); auto clusters = GetClusters(*graph); EXPECT_EQ(2, clusters.size()); EXPECT_NE(clusters["A"], clusters["B"]); EXPECT_NE(clusters["B"], clusters["C"]); } TEST(XlaCompilationTest, DontClusterNodesWithMismatchingDeadness) { Scope root = Scope::NewRootScope().ExitOnError(); Output cond_a = ops::Placeholder(root.WithOpName("cond_a"), DT_BOOL); Output cond_b = ops::Placeholder(root.WithOpName("cond_b"), DT_BOOL); Output value = ops::Placeholder(root.WithOpName("value"), DT_FLOAT); ops::Switch switch_a(root.WithOpName("switch_a"), value, cond_a); ops::Switch switch_b(root.WithOpName("switch_b"), value, cond_b); Output tanh_a0 = ops::Tanh(root.WithOpName("tan_a0"), switch_a.output_true); Output tanh_a1 = ops::Tanh(root.WithOpName("tan_a1"), tanh_a0); Output tanh_b0 = ops::Tanh(root.WithOpName("tan_b0"), switch_b.output_true); Output tanh_b1 = ops::Tanh(root.WithOpName("tan_b1"), tanh_b0); Output add = ops::Add(root.WithOpName("add"), tanh_a1, tanh_b1); std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); TF_EXPECT_OK(root.ToGraph(graph.get())); TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation( &graph, MarkForCompilationPassTestHelper::Options().WithDeadnessAnalysis())); auto clusters = GetClusters(*graph); EXPECT_NE(clusters["tan_a0"], ""); EXPECT_NE(clusters["tan_a1"], ""); EXPECT_NE(clusters["tan_b0"], ""); EXPECT_NE(clusters["tan_b1"], ""); EXPECT_EQ(clusters["tan_a0"], clusters["tan_a1"]); EXPECT_EQ(clusters["tan_b0"], clusters["tan_b1"]); EXPECT_NE(clusters["tan_a0"], clusters["tan_b0"]); } TEST(XlaCompilationTest, ClusterNodesWithMismatchingInputDeadness) { Scope root = Scope::NewRootScope().ExitOnError(); Output cond_a = ops::Placeholder(root.WithOpName("cond_a"), DT_BOOL); Output cond_b = ops::Placeholder(root.WithOpName("cond_b"), DT_BOOL); Output value = ops::Placeholder(root.WithOpName("value"), DT_FLOAT); ops::Switch switch_a(root.WithOpName("switch_a"), value, cond_a); ops::Switch switch_b(root.WithOpName("switch_b"), value, cond_b); Output add_a = ops::Add(root.WithOpName("add_a"), switch_a.output_true, switch_b.output_true); Output add_b = ops::Add(root.WithOpName("add_b"), switch_a.output_true, switch_b.output_true); Output add = ops::Add(root.WithOpName("add_c"), add_a, add_b); std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); TF_EXPECT_OK(root.ToGraph(graph.get())); TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation( &graph, MarkForCompilationPassTestHelper::Options().WithDeadnessAnalysis())); auto clusters = GetClusters(*graph); EXPECT_NE(clusters["add_a"], ""); EXPECT_NE(clusters["add_b"], ""); EXPECT_NE(clusters["add_c"], ""); EXPECT_EQ(clusters["add_a"], clusters["add_b"]); EXPECT_EQ(clusters["add_b"], clusters["add_c"]); } namespace { Node* MakeRead(const Scope& scope, const string& id, Node** var_handle_op = nullptr) { Output var_handle = ops::VarHandleOp(scope.WithOpName("Var" + id), DT_FLOAT, TensorShape({})); Output read = ops::ReadVariableOp(scope.WithOpName("Read" + id), var_handle, DT_FLOAT); if (var_handle_op) { *var_handle_op = var_handle.node(); } return read.node(); } Node* MakeWrite(const Scope& scope, const string& id) { Output var_handle = ops::VarHandleOp(scope.WithOpName("Var" + id), DT_FLOAT, TensorShape({})); Output value_to_write = ops::Const(scope.WithOpName("ValueToAssign" + id), 1.0f); ops::AssignVariableOp assign_op(scope.WithOpName("Assignment" + id), var_handle, value_to_write); return assign_op.operation.node(); } Node* MakeNeutral(const Scope& scope, const string& id) { return ops::Const(scope.WithOpName("Const" + id), 42.0f).node(); } } TEST(XlaCompilationTest, ResourcesClusteringAllowed) { Scope root = Scope::NewRootScope().ExitOnError(); Node* read = MakeRead(root, "R"); Node* write = MakeWrite(root, "W"); root.graph()->AddControlEdge(read, write); FixupSourceAndSinkEdges(root.graph()); std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); TF_EXPECT_OK(root.ToGraph(graph.get())); TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph)); absl::flat_hash_map<string, std::vector<string>> cluster_sets = GetClusterSets(*graph); ASSERT_EQ(cluster_sets.size(), 1); std::vector<string> expected_clustered_nodes = {"AssignmentW", "ReadR", "ValueToAssignW"}; ASSERT_EQ(cluster_sets.begin()->second, expected_clustered_nodes); } TEST(XlaCompilationTest, ResourcesClusteringDisallowed) { Scope root = Scope::NewRootScope().ExitOnError(); Node* read = MakeRead(root, "R"); Node* write = MakeWrite(root, "W"); root.graph()->AddControlEdge(write, read); FixupSourceAndSinkEdges(root.graph()); std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); TF_EXPECT_OK(root.ToGraph(graph.get())); TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph)); absl::flat_hash_map<string, std::vector<string>> cluster_sets = GetClusterSets(*graph); ASSERT_EQ(cluster_sets.size(), 0); } TEST(XlaCompilationTest, ChainOfOps) { Scope root = Scope::NewRootScope().ExitOnError(); Node* write_0 = MakeWrite(root, "W0"); Node* neutral_0 = MakeNeutral(root, "N0"); Node* read_0 = MakeRead(root, "R0"); Node* write_1 = MakeWrite(root, "W1"); Node* neutral_1 = MakeNeutral(root, "N1"); Node* read_1 = MakeRead(root, "R1"); root.graph()->AddControlEdge(write_0, neutral_0); root.graph()->AddControlEdge(neutral_0, read_0); root.graph()->AddControlEdge(read_0, write_1); root.graph()->AddControlEdge(write_1, neutral_1); root.graph()->AddControlEdge(neutral_1, read_1); FixupSourceAndSinkEdges(root.graph()); std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); TF_EXPECT_OK(root.ToGraph(graph.get())); TF_ASSERT_OK(MarkForCompilationPassTestHelper::MarkForCompilation(&graph)); std::vector<string> cluster_names; absl::flat_hash_map<string, std::vector<string>> cluster_sets = GetClusterSets(*graph, &cluster_names); ASSERT_EQ(cluster_sets.size(), 1); std::vector<string> expected_clustered_nodes_a = { "AssignmentW1", "ConstN0", "ReadR0", "ValueToAssignW1"}; ASSERT_EQ(cluster_sets[cluster_names[0]], expected_clustered_nodes_a); } TEST(XlaCompilationTest, IllegalCycle_UsefulErrorMessage) { std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); Scope root = Scope::NewRootScope().ExitOnError(); { auto BuildNoopNode = [](absl::string_view name, Graph* graph) { NodeDefBuilder builder(name, "NoOp"); NodeDef def; TF_CHECK_OK(builder.Finalize(&def)); Status status; Node* node = graph->AddNode(def, &status); TF_CHECK_OK(status); return node; }; Node* a = BuildNoopNode("a", graph.get()); Node* b = BuildNoopNode("b", graph.get()); Node* c = BuildNoopNode("c", graph.get()); graph->AddControlEdge(a, b); graph->AddControlEdge(b, c); graph->AddControlEdge(c, a); } TF_EXPECT_OK(root.ToGraph(graph.get())); Status status = MarkForCompilationPassTestHelper::MarkForCompilation(&graph); EXPECT_FALSE(status.ok()); EXPECT_TRUE(absl::StrContains(status.ToString(), "Edge from c to a would create a cycle.\n" "+-> a\n" "| b\n" "+-- c\n")); } TEST(XlaCompilationTest, Retval) { std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); { GraphDefBuilder builder(GraphDefBuilder::kFailImmediately); Node* a = ops::SourceOp("Const", builder.opts() .WithName("A")
1,085
cpp
tensorflow/tensorflow
xla_kernel_creator
tensorflow/compiler/jit/xla_kernel_creator.cc
tensorflow/compiler/jit/xla_kernel_creator_test.cc
#ifndef TENSORFLOW_COMPILER_JIT_XLA_KERNEL_CREATOR_H_ #define TENSORFLOW_COMPILER_JIT_XLA_KERNEL_CREATOR_H_ #include <memory> #include "tensorflow/core/framework/function.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/node_properties.h" #include "tensorflow/core/lib/core/status.h" namespace tensorflow { class FunctionLibraryRuntime; class OpKernel; class XlaKernelCreator : public CustomKernelCreator { public: bool CanCreateKernel( const FunctionLibraryRuntime& flr, const std::shared_ptr<const NodeProperties>& props) const override; Status CreateKernel(FunctionLibraryRuntime* flr, const std::shared_ptr<const NodeProperties>& props, std::unique_ptr<OpKernel>* kernel) const override; }; bool RegisterLaunchOpCreator(); } #endif #include "tensorflow/compiler/jit/xla_kernel_creator.h" #include <memory> #include <vector> #include "absl/memory/memory.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_format.h" #include "tensorflow/compiler/jit/compilability_check_util.h" #include "tensorflow/compiler/jit/defs.h" #include "tensorflow/compiler/jit/flags.h" #include "tensorflow/compiler/jit/kernels/xla_ops.h" #include "tensorflow/compiler/tf2xla/const_analysis.h" #include "tensorflow/compiler/tf2xla/mlir_bridge_pass.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "tensorflow/core/common_runtime/function.h" #include "tensorflow/core/common_runtime/function_body.h" #include "tensorflow/core/common_runtime/function_utils.h" #include "tensorflow/core/framework/allocator.h" #include "tensorflow/core/framework/device.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/node_def_util.h" #include "tensorflow/core/framework/node_properties.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/core/status.h" #include "tsl/platform/errors.h" namespace tensorflow { bool XlaKernelCreator::CanCreateKernel( const FunctionLibraryRuntime& flr, const std::shared_ptr<const NodeProperties>& props) const { return CanCreateXlaKernel(props->node_def) && !XlaOpRegistry::IsCompilationDevice(flr.device()->device_type()); } static Status CreateXlaKernel(FunctionLibraryRuntime* flr, const NodeDef& node_def, std::unique_ptr<OpKernel>* kernel) { if (!CanCreateXlaKernel(node_def)) { return errors::Internal("Invalid node: ", node_def.ShortDebugString()); } VLOG(3) << "Attempting to create XlaLaunchOp for " << node_def.DebugString(); XlaOpRegistry::RegisterCompilationKernels(); NameAttrList function; TF_RETURN_IF_ERROR(NameAndAttrsFromFunctionCall(node_def, &function)); const FunctionBody* fbody = nullptr; std::vector<int> constant_arg_indices; std::vector<int> resource_arg_indices; TF_RETURN_IF_ERROR(GetBodyAndConstantsAndResources( flr, function, &fbody, &constant_arg_indices, &resource_arg_indices)); MemoryTypeVector input_memory_types = GetInputMemoryTypes(fbody, constant_arg_indices, resource_arg_indices); MemoryTypeVector output_memory_types = GetOutputMemoryTypes(fbody); Device* dev = flr->device(); Status s; auto props = std::make_shared<NodeProperties>( &fbody->record->fdef().signature(), node_def, fbody->arg_types, fbody->ret_types); OpKernelConstruction construction(DeviceType(dev->device_type()), dev, dev->GetAllocator(AllocatorAttributes()), flr, dev->resource_manager(), props, input_memory_types, output_memory_types, flr->graph_def_version(), &s); *kernel = std::make_unique<XlaLocalLaunchBase>( &construction, constant_arg_indices, resource_arg_indices, function, false); return s; } Status XlaKernelCreator::CreateKernel( FunctionLibraryRuntime* flr, const std::shared_ptr<const NodeProperties>& props, std::unique_ptr<OpKernel>* kernel) const { return CreateXlaKernel(flr, props->node_def, kernel); } bool RegisterLaunchOpCreator() { XlaKernelCreator* xla_kernel_creator = new XlaKernelCreator(); RegisterDefaultCustomKernelCreator(xla_kernel_creator); return true; } static bool register_me = RegisterLaunchOpCreator(); }
#include "tensorflow/compiler/jit/xla_kernel_creator.h" #include "absl/memory/memory.h" #include "absl/status/status.h" #include "tensorflow/core/common_runtime/device_factory.h" #include "tensorflow/core/common_runtime/function.h" #include "tensorflow/core/framework/function_testlib.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/public/session_options.h" #include "tensorflow/core/public/version.h" namespace tensorflow { std::shared_ptr<NodeProperties> ToNodeProperties(const string& text) { NodeDef node_def; DataTypeVector dummy; EXPECT_TRUE(protobuf::TextFormat::MergeFromString(text, &node_def)); return std::make_shared<NodeProperties>(nullptr, std::move(node_def), dummy, dummy); } FunctionDef XTimesY() { return FunctionDefHelper::Define( "XTimesY", {"x: float", "y: resource"}, {"z: float"}, {}, { {{"y0"}, "ReadVariableOp", {"y"}, {{"dtype", DT_FLOAT}}}, {{"z"}, "Mul", {"x", "y0"}, {{"T", DT_FLOAT}}}, }); } class XlaKernelCreatorTest : public ::testing::Test { protected: void Init(const std::vector<FunctionDef>& flib) { SessionOptions options; auto* device_count = options.config.mutable_device_count(); device_count->insert({"CPU", 1}); std::vector<std::unique_ptr<Device>> devices; TF_CHECK_OK(DeviceFactory::AddDevices( options, "/job:localhost/replica:0/task:0", &devices)); FunctionDefLibrary proto; for (const auto& fdef : flib) { *(proto.add_function()) = fdef; } lib_def_ = std::make_unique<FunctionLibraryDefinition>( OpRegistry::Global(), proto); OptimizerOptions opts; device_mgr_ = std::make_unique<StaticDeviceMgr>(std::move(devices)); pflr_ = std::make_unique<ProcessFunctionLibraryRuntime>( device_mgr_.get(), Env::Default(), nullptr, TF_GRAPH_DEF_VERSION, lib_def_.get(), opts, nullptr, nullptr); flr_ = pflr_->GetFLR("/job:localhost/replica:0/task:0/cpu:0"); } FunctionLibraryRuntime* flr_; std::unique_ptr<DeviceMgr> device_mgr_; std::unique_ptr<FunctionLibraryDefinition> lib_def_; std::unique_ptr<ProcessFunctionLibraryRuntime> pflr_; std::unique_ptr<OpKernel> kernel_; }; AttrValue BoolAttr(bool b) { AttrValue v; v.set_b(b); return v; } TEST_F(XlaKernelCreatorTest, OneFloatOneResourceArgument) { FunctionDef fdef = XTimesY(); (*fdef.mutable_attr())["_XlaMustCompile"] = BoolAttr(true); Init({fdef}); XlaKernelCreator xla_kernel_creator; auto callsite = ToNodeProperties(R"pb( name: 'XTimesY' op: 'XTimesY' input: 'a' input: 'b' )pb"); (*(callsite->node_def.mutable_attr()))["_XlaMustCompile"] = BoolAttr(true); Status status = xla_kernel_creator.CreateKernel(flr_, callsite, &kernel_); ASSERT_TRUE(status.ok()) << status.ToString(); EXPECT_EQ("XTimesY", kernel_->name()); EXPECT_EQ("XTimesY", kernel_->type_string()); EXPECT_EQ(2, kernel_->num_inputs()); EXPECT_EQ(DT_FLOAT, kernel_->input_type(0)); EXPECT_EQ(DT_RESOURCE, kernel_->input_type(1)); EXPECT_EQ(DEVICE_MEMORY, kernel_->input_memory_types()[0]); EXPECT_EQ(HOST_MEMORY, kernel_->input_memory_types()[1]); EXPECT_EQ(1, kernel_->num_outputs()); EXPECT_EQ(DT_FLOAT, kernel_->output_type(0)); EXPECT_EQ(DEVICE_MEMORY, kernel_->output_memory_types()[0]); } TEST_F(XlaKernelCreatorTest, FailsIfXlaCompileAttrNotSet) { FunctionDef fdef = XTimesY(); Init({fdef}); XlaKernelCreator xla_kernel_creator; Status status = xla_kernel_creator.CreateKernel(flr_, ToNodeProperties(R"proto( name: 'XTimesY' op: 'XTimesY' input: 'a' input: 'b' )proto"), &kernel_); EXPECT_TRUE(absl::IsInternal(status)) << status; } TEST_F(XlaKernelCreatorTest, FailsIfXlaCompileAttrIsSetToFalse) { FunctionDef fdef = XTimesY(); (*fdef.mutable_attr())["_XlaMustCompile"] = BoolAttr(false); Init({fdef}); XlaKernelCreator xla_kernel_creator; Status status = xla_kernel_creator.CreateKernel(flr_, ToNodeProperties(R"proto( name: 'XTimesY' op: 'XTimesY' input: 'a' input: 'b' )proto"), &kernel_); EXPECT_TRUE(absl::IsInternal(status)) << status; } }
1,086
cpp
tensorflow/tensorflow
node_matchers
tensorflow/compiler/jit/node_matchers.cc
tensorflow/compiler/jit/node_matchers_test.cc
#ifndef TENSORFLOW_COMPILER_JIT_NODE_MATCHERS_H_ #define TENSORFLOW_COMPILER_JIT_NODE_MATCHERS_H_ #include <array> #include <string> #include <vector> #include "absl/algorithm/container.h" #include "absl/strings/string_view.h" #include "absl/types/optional.h" #include "absl/types/span.h" #include "tensorflow/cc/framework/ops.h" #include "xla/test.h" #include "tensorflow/core/graph/graph.h" namespace tensorflow { namespace testing { namespace matchers { namespace impl { using OutEdge = std::pair<const Node*, int>; class NodeMatcherProperties { public: using NodeSeqMatcher = std::vector<::testing::Matcher<const Node*>>; using InputSeqMatcher = std::vector<::testing::Matcher<OutEdge>>; using AttrKeyValuePair = std::pair<string, std::optional<AttrValue>>; const std::optional<string>& name() const { return name_; } const std::optional<string>& op() const { return op_; } const std::optional<string>& assigned_device() const { return assigned_device_; } const std::optional<Tensor>& constant_value() const { return constant_value_; } const std::optional<InputSeqMatcher>& inputs() const { return input_matchers_; } const std::optional<NodeSeqMatcher>& control_deps() const { return control_deps_; } const std::optional<AttrKeyValuePair>& attr() const { return attr_; } void set_name(string name) { DCHECK(IsEmpty()); name_ = std::move(name); } void set_op(string op) { DCHECK(IsEmpty()); op_ = std::move(op); } void set_assigned_device(string assigned_device) { DCHECK(IsEmpty()); assigned_device_ = std::move(assigned_device); } void set_constant_value(Tensor constant_value) { DCHECK(IsEmpty()); constant_value_ = std::move(constant_value); op_ = "Const"; } void set_inputs(InputSeqMatcher inputs) { DCHECK(IsEmpty()); input_matchers_ = std::move(inputs); } void set_control_deps(NodeSeqMatcher control_deps) { DCHECK(IsEmpty()); control_deps_ = std::move(control_deps); } void set_attr(AttrKeyValuePair attr) { DCHECK(IsEmpty()); attr_ = std::move(attr); } bool IsEmpty() const { return !name().has_value() && !op().has_value() && !inputs().has_value() && !control_deps().has_value() && !attr().has_value(); } private: std::optional<string> name_; std::optional<string> op_; std::optional<string> assigned_device_; std::optional<Tensor> constant_value_; std::optional<InputSeqMatcher> input_matchers_; std::optional<NodeSeqMatcher> control_deps_; std::optional<AttrKeyValuePair> attr_; }; ::testing::Matcher<const Node*> NodeWith( absl::Span<const NodeMatcherProperties> props); impl::NodeMatcherProperties Inputs( absl::Span<const ::testing::Matcher<OutEdge>> inputs); impl::NodeMatcherProperties CtrlDeps( absl::Span<const ::testing::Matcher<const Node*>> control_deps); impl::NodeMatcherProperties Attr(std::pair<string, AttrValue> attrs); impl::NodeMatcherProperties Attr(string name); std::pair<string, AttrValue> AttrLiteralHelper( const std::pair<string, bool>& bool_attr); std::pair<string, AttrValue> AttrLiteralHelper( const std::pair<string, absl::Span<const int>>& int_list_attr); std::pair<string, AttrValue> AttrLiteralHelper( const std::pair<string, absl::Span<const string>>& string_list_attr); } impl::NodeMatcherProperties Name(string name); impl::NodeMatcherProperties Op(string op); impl::NodeMatcherProperties AssignedDevice(string assigned_device); template <typename ValueTy> impl::NodeMatcherProperties Attr(const string& name, ValueTy value) { return impl::Attr({impl::AttrLiteralHelper({name, value})}); } inline impl::NodeMatcherProperties Attr(const string& name) { return impl::Attr(name); } template <typename... Ts> impl::NodeMatcherProperties Inputs(Ts... inputs) { return impl::Inputs({inputs...}); } ::testing::Matcher<impl::OutEdge> Out(int oidx, ::testing::Matcher<const Node*> node); inline ::testing::Matcher<impl::OutEdge> Out( ::testing::Matcher<const Node*> node) { return Out(0, node); } template <typename... Ts> impl::NodeMatcherProperties CtrlDeps(Ts... control_deps) { return impl::CtrlDeps({control_deps...}); } impl::NodeMatcherProperties ConstantValue( const ::tensorflow::Input::Initializer& val); template <typename... Ts> ::testing::Matcher<const Node*> NodeWith(Ts... args) { std::array<impl::NodeMatcherProperties, sizeof...(Ts)> array = {args...}; return impl::NodeWith(array); } ::testing::Matcher<impl::OutEdge> Const( const ::tensorflow::Input::Initializer& val); } Node* FindNodeByName(Graph* g, absl::string_view name); } void PrintTo(const Node* n, ::std::ostream* os); void PrintTo(Node* n, ::std::ostream* os); } #endif #include "tensorflow/compiler/jit/node_matchers.h" #include <utility> #include "absl/algorithm/container.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_join.h" #include "absl/strings/str_replace.h" #include "absl/strings/str_split.h" #include "tensorflow/core/framework/attr_value_util.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/tensor.pb.h" #include "tensorflow/core/graph/graph_node_util.h" namespace tensorflow { namespace testing { namespace matchers { namespace { using impl::NodeMatcherProperties; using impl::OutEdge; string IndentAllButFirstLine(absl::string_view text) { std::vector<std::string> lines = absl::StrSplit(text, '\n'); for (int i = 1; i < lines.size(); i++) { lines[i].insert(0, " "); } return absl::StrJoin(lines, "\n"); } template <typename T> bool CompareTensor(const Tensor& actual, const Tensor& expected, ::testing::MatchResultListener* listener) { if (actual.NumElements() != expected.NumElements()) { if (listener->IsInterested()) { *listener << "\nwas looking for tensor with " << expected.NumElements() << " elements, found tensor with " << actual.NumElements() << " elements"; return false; } } for (int64_t i = 0, e = actual.NumElements(); i < e; i++) { if (actual.flat<T>()(i) != expected.flat<T>()(i)) { *listener << "\nmismatch in constant tensor at index " << i << " expected = " << expected.flat<T>()(i) << " actual = " << actual.flat<T>()(i); return false; } } return true; } bool MatchAndExplainTensor(const Tensor& tensor, const Tensor& expected_tensor, ::testing::MatchResultListener* listener) { if (tensor.dtype() != expected_tensor.dtype()) { if (listener->IsInterested()) { *listener << "\nexpected tensor of type " << DataType_Name(expected_tensor.dtype()) << " but found one of type " << DataType_Name(tensor.dtype()); return false; } } switch (tensor.dtype()) { case DT_HALF: return CompareTensor<Eigen::half>(tensor, expected_tensor, listener); case DT_FLOAT: return CompareTensor<float>(tensor, expected_tensor, listener); case DT_DOUBLE: return CompareTensor<double>(tensor, expected_tensor, listener); case DT_INT8: return CompareTensor<int8>(tensor, expected_tensor, listener); case DT_INT16: return CompareTensor<int16>(tensor, expected_tensor, listener); case DT_INT32: return CompareTensor<int32>(tensor, expected_tensor, listener); case DT_INT64: return CompareTensor<int64_t>(tensor, expected_tensor, listener); case DT_UINT8: return CompareTensor<uint8>(tensor, expected_tensor, listener); case DT_UINT16: return CompareTensor<uint16>(tensor, expected_tensor, listener); case DT_UINT32: return CompareTensor<uint32>(tensor, expected_tensor, listener); case DT_UINT64: return CompareTensor<uint64>(tensor, expected_tensor, listener); default: LOG(FATAL) << "Unsupported dtype " << DataType_Name(tensor.dtype()); } } struct NodeMatcher : public ::testing::MatcherInterface<const Node*> { bool MatchAndExplain( const Node* node, ::testing::MatchResultListener* listener) const override { if (op && node->type_string() != *op) { if (listener->IsInterested()) { *listener << "\nexpected op " << *op << " but found " << node->type_string(); } return false; } if (assigned_device && node->assigned_device_name() != *assigned_device) { if (listener->IsInterested()) { *listener << "\nexpected assigned_device " << *assigned_device << " but found \"" << node->assigned_device_name() << "\""; } return false; } if (name && node->name() != *name) { if (listener->IsInterested()) { *listener << "\nexpected name " << *name << " but found " << node->name(); } return false; } if (constant_value) { const TensorProto* proto = nullptr; if (!TryGetNodeAttr(node->def(), "value", &proto)) { if (listener->IsInterested()) { *listener << "\ncould not find \"value\" attribute in node"; } return false; } Tensor tensor(proto->dtype()); if (!tensor.FromProto(*proto)) { if (listener->IsInterested()) { *listener << "\ncould not convert TensorProto in \"value\" attribute " "to Tensor"; } return false; } if (!MatchAndExplainTensor(tensor, *constant_value, listener)) { return false; } } if (input_matchers) { if (input_matchers->size() != node->num_inputs()) { if (listener->IsInterested()) { *listener << "\nexpected " << input_matchers->size() << " inputs but node has " << node->num_inputs(); } return false; } for (int input_idx = 0, e = input_matchers->size(); input_idx < e; input_idx++) { if (!MatchAndExplainInput(node, input_idx, listener)) { return false; } } } std::vector<const Node*> control_deps; for (const Edge* e : node->in_edges()) { if (e->IsControlEdge()) { control_deps.push_back(e->src()); } } ::testing::StringMatchResultListener inner_listener; if (control_dep_set && !control_dep_set->MatchAndExplain(control_deps, &inner_listener)) { if (listener->IsInterested()) { string explanation = inner_listener.str(); if (!explanation.empty()) { explanation = absl::StrCat(", ", explanation, ","); } *listener << "ctrl_deps" << explanation << " does not match expected: "; control_dep_set->DescribeTo(listener->stream()); } return false; } const AttrValueMap attr_value_map = node->def().attr(); for (const auto& attr_kv_pair : attrs) { auto it = attr_value_map.find(attr_kv_pair.first); if (it == attr_value_map.end()) { if (listener->IsInterested()) { *listener << "did not find attribute named \"" << attr_kv_pair.first << "\" in node"; } return false; } if (attr_kv_pair.second && !AreAttrValuesEqual(it->second, *attr_kv_pair.second)) { if (listener->IsInterested()) { *listener << "attribute named " << attr_kv_pair.first << " does not match value; expected: \"" << SummarizeAttrValue(*attr_kv_pair.second) << "\", found: \"" << SummarizeAttrValue(it->second) << "\""; } return false; } } return true; } void DescribeTo(::std::ostream* os) const override { std::vector<string> predicates; if (name) { predicates.push_back(absl::StrCat("name: ", *name)); } if (op) { predicates.push_back(absl::StrCat("op: ", *op)); } if (assigned_device) { predicates.push_back(absl::StrCat("assigned device: ", *assigned_device)); } bool printed_something = !predicates.empty(); *os << absl::StrJoin(predicates, ", "); if (constant_value) { printed_something = true; *os << "constant value: " << constant_value->DebugString(); } if (input_matchers) { if (!input_matchers->empty()) { printed_something = true; *os << " with " << (input_matchers->size() == 1 ? "only " : "") << "input" << (input_matchers->size() == 1 ? "" : "s") << " "; } if (input_matchers->size() == 1) { ::std::stringstream ss; input_matchers->front().DescribeTo(&ss); printed_something = true; *os << "matching " << ss.str(); } else { int edge_idx = 0; for (const ::testing::Matcher<OutEdge>& matcher : (*input_matchers)) { *os << "\n [" << edge_idx << "] matching ("; ::std::stringstream ss; matcher.DescribeTo(&ss); printed_something = true; *os << IndentAllButFirstLine(ss.str()); *os << ")"; edge_idx++; } } } if (control_dep_set) { printed_something = true; *os << " and control deps "; control_dep_set->DescribeTo(os); } if (!attrs.empty()) { printed_something = true; std::vector<string> attrs_str; absl::c_transform( attrs, std::back_inserter(attrs_str), [](const std::pair<string, std::optional<AttrValue>>& attr_kv_pair) { return absl::StrCat(attr_kv_pair.first, "->", attr_kv_pair.second ? SummarizeAttrValue(*attr_kv_pair.second) : "*"); }); *os << " and attr values matching [" << absl::StrJoin(attrs_str, ", ") << "]"; } if (!printed_something) { *os << "is any node"; } } bool MatchAndExplainInput(const Node* node, int input_idx, ::testing::MatchResultListener* listener) const { const Edge* edge; if (!node->input_edge(input_idx, &edge).ok()) { if (listener->IsInterested()) { *listener << "\ncould not find incoming edge for input " << input_idx; } return false; } ::testing::StringMatchResultListener inner_listener; OutEdge input = {edge->src(), edge->src_output()}; if ((*input_matchers)[input_idx].MatchAndExplain(input, &inner_listener)) { return true; } if (listener->IsInterested()) { *listener << "\ninput " << input_idx << " does not match expected:\n"; (*input_matchers)[input_idx].DescribeTo(listener->stream()); string explanation = inner_listener.str(); if (!explanation.empty()) { *listener << ", " << explanation; } } return false; } std::optional<string> op; std::optional<string> name; std::optional<string> assigned_device; std::optional<Tensor> constant_value; std::optional<std::vector<::testing::Matcher<OutEdge>>> input_matchers; std::optional<::testing::Matcher<absl::Span<const Node* const>>> control_dep_set; std::map<string, std::optional<AttrValue>> attrs; }; class OutEdgeMatcher : public ::testing::MatcherInterface<OutEdge> { public: OutEdgeMatcher(::testing::Matcher<const Node*> src_matcher, int src_oidx) : src_matcher_(std::move(src_matcher)), src_oidx_(src_oidx) {} bool MatchAndExplain( OutEdge out_edge, ::testing::MatchResultListener* listener) const override { ::testing::StringMatchResultListener inner_listener; if (!src_matcher_.MatchAndExplain(out_edge.first, &inner_listener)) { if (listener->IsInterested()) { *listener << "\nsource does not match expected "; src_matcher_.DescribeTo(listener->stream()); string explanation = inner_listener.str(); if (!explanation.empty()) { *listener << "\n\t" << explanation; } } return false; } if (out_edge.second != src_oidx_) { if (listener->IsInterested()) { *listener << "\nexpected output slot to be " << src_oidx_ << " but found " << out_edge.second; } return false; } return true; } void DescribeTo(::std::ostream* os) const override { if (src_oidx_) { *os << "output slot: " << src_oidx_ << ", source: ("; } src_matcher_.DescribeTo(os); if (src_oidx_) { *os << ")"; } } private: ::testing::Matcher<const Node*> src_matcher_; int src_oidx_; }; } ::testing::Matcher<const Node*> impl::NodeWith( absl::Span<const NodeMatcherProperties> props) { NodeMatcher* matcher = new NodeMatcher(); for (const NodeMatcherProperties& prop : props) { if (prop.name()) { DCHECK(!matcher->name); matcher->name = prop.name(); } if (prop.op()) { DCHECK(!matcher->op); matcher->op = prop.op(); } if (prop.constant_value()) { DCHECK(!matcher->constant_value); matcher->constant_value = prop.constant_value(); } if (prop.assigned_device()) { DCHECK(!matcher->assigned_device); matcher->assigned_device = prop.assigned_device(); } if (prop.inputs()) { DCHECK(!matcher->input_matchers); matcher->input_matchers = *prop.inputs(); } if (prop.control_deps()) { DCHECK(!matcher->control_dep_set); matcher->control_dep_set = ::testing::UnorderedElementsAreArray(*prop.control_deps()); } if (prop.attr()) { auto insert_result = matcher->attrs.insert(*prop.attr()); DCHECK(insert_result.second); } } return ::testing::MakeMatcher(matcher); } impl::NodeMatcherProperties Name(string name) { impl::NodeMatcherProperties props; props.set_name(std::move(name)); return props; } impl::NodeMatcherProperties Op(string op) { impl::NodeMatcherProperties props; props.set_op(std::move(op)); return props; } impl::NodeMatcherProperties AssignedDevice(string assigned_device) { impl::NodeMatcherProperties props; props.set_assigned_device(std::move(assigned_device)); return props; } impl::NodeMatcherProperties impl::Inputs( absl::Span<const ::testing::Matcher<OutEdge>> inputs) { std::vector<::testing::Matcher<OutEdge>> inputs_vector; absl::c_copy(inputs, std::back_inserter(inputs_vector)); impl::NodeMatcherProperties props; props.set_inputs(std::move(inputs_vector)); return props; } impl::NodeMatcherProperties impl::CtrlDeps( absl::Span<const ::testing::Matcher<const Node*>> control_deps) { std::vector<::testing::Matcher<const Node*>> control_deps_vector; absl::c_copy(control_deps, std::back_inserter(control_deps_vector)); impl::NodeMatcherProperties props; props.set_control_deps(std::move(control_deps_vector)); return props; } std::pair<string, AttrValue> impl::AttrLiteralHelper( const std::pair<string, bool>& bool_attr) { AttrValue attr_value; attr_value.set_b(bool_attr.second); return {bool_attr.first, attr_value}; } std::pair<string, AttrValue> impl::AttrLiteralHelper( const std::pair<string, absl::Span<const int>>& int_list_attr) { AttrValue attr_value; AttrValue::ListValue* list = attr_value.mutable_list(); for (int i : int_list_attr.second) { list->add_i(i); } return {int_list_attr.first, attr_value}; } std::pair<string, AttrValue> impl::AttrLiteralHelper( const std::pair<string, absl::Span<const string>>& string_list_attr) { AttrValue attr_value; AttrValue::ListValue* list = attr_value.mutable_list(); for (const string& s : string_list_attr.second) { list->add_s(s); } return {string_list_attr.first, attr_value}; } impl::NodeMatcherProperties impl::Attr(std::pair<string, AttrValue> attr) { impl::NodeMatcherProperties props; props.set_attr(std::move(attr)); return props; } impl::NodeMatcherProperties impl::Attr(string name) { impl::NodeMatcherProperties props; props.set_attr({std::move(name), std::nullopt}); return props; } NodeMatcherProperties ConstantValue( const ::tensorflow::Input::Initializer& val) { TF_CHECK_OK(val.status); NodeMatcherProperties props; props.set_constant_value(val.tensor); return props; } ::testing::Matcher<impl::OutEdge> Const( const ::tensorflow::Input::Initializer& val) { return Out(NodeWith(ConstantValue(val))); } ::testing::Matcher<impl::OutEdge> Out( int oidx, ::testing::Matcher<const Node*> node_matcher) { return ::testing::MakeMatcher(new OutEdgeMatcher(node_matcher, oidx)); } } Node* FindNodeByName(Graph* g, absl::string_view name) { for (Node* n : g->nodes()) { if (n->name() == name) { return n; } } return nullptr; } } void PrintTo(const Node* n, ::std::ostream* os) { *os << SummarizeNode(*n); } void PrintTo(Node* n, ::std::ostream* os) { *os << SummarizeNode(*n); } }
#include "tensorflow/compiler/jit/node_matchers.h" #include <string> #include "tensorflow/cc/framework/ops.h" #include "tensorflow/cc/ops/array_ops.h" #include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/control_flow_ops.h" #include "tensorflow/cc/ops/control_flow_ops_internal.h" #include "tensorflow/cc/ops/math_ops.h" namespace tensorflow { namespace testing { namespace { using ::testing::_; using testing::matchers::AssignedDevice; using testing::matchers::Attr; using testing::matchers::ConstantValue; using testing::matchers::CtrlDeps; using testing::matchers::Inputs; using testing::matchers::Name; using testing::matchers::NodeWith; using testing::matchers::Op; using testing::matchers::Out; template <typename M, typename T> string Explain(const T& t, const M& m) { ::testing::StringMatchResultListener listener; EXPECT_THAT(t, ::testing::Not(m)); EXPECT_FALSE(m.MatchAndExplain(t, &listener)); return listener.str(); } TEST(NodeMatchers, CheckAgainstConstant) { Scope root = Scope::NewRootScope().ExitOnError(); Output placeholder = ops::Placeholder(root.WithOpName("placeholder"), DT_FLOAT); EXPECT_THAT(placeholder.node(), NodeWith(Op("Placeholder"))); EXPECT_THAT(placeholder.node(), NodeWith(Name("placeholder"))); EXPECT_THAT(placeholder.node(), NodeWith(Op("Placeholder"), Name("placeholder"))); EXPECT_THAT(placeholder.node(), NodeWith(Name("placeholder"), Op("Placeholder"))); EXPECT_THAT(placeholder.node(), NodeWith(Inputs())); EXPECT_THAT(placeholder.node(), NodeWith(Op("Placeholder"), Name("placeholder"), Inputs())); EXPECT_EQ(Explain(placeholder.node(), NodeWith(Op("Add"))), "\nexpected op Add but found Placeholder"); EXPECT_EQ(Explain(placeholder.node(), NodeWith(Name("add"))), "\nexpected name add but found placeholder"); EXPECT_EQ(Explain(placeholder.node(), NodeWith(Inputs(Out(NodeWith())))), "\nexpected 1 inputs but node has 0"); } TEST(NodeMatchers, CheckAgainstBinary) { Scope root = Scope::NewRootScope().ExitOnError(); Output placeholder_a = ops::Placeholder(root.WithOpName("placeholder_a"), DT_FLOAT); Output placeholder_b = ops::Placeholder(root.WithOpName("placeholder_b"), DT_FLOAT); Output add = ops::Add(root.WithOpName("add"), placeholder_a, placeholder_b); EXPECT_THAT(add.node(), NodeWith(Op("Add"), Name("add"), Inputs(Out(NodeWith(Name("placeholder_a"))), Out(NodeWith(Name("placeholder_b")))))); EXPECT_EQ(Explain(add.node(), NodeWith(Inputs())), "\nexpected 0 inputs but node has 2"); EXPECT_EQ( Explain(add.node(), NodeWith(Inputs(Out(NodeWith(Name("blah"))), _))), "\ninput 0 does not match expected:\nname: blah, \nsource does not match " "expected name: blah\n\t\nexpected name blah but found placeholder_a"); EXPECT_EQ( Explain(add.node(), NodeWith(Inputs(_, Out(NodeWith(Name("blah")))))), "\ninput 1 does not match expected:\nname: blah, \nsource does not match " "expected name: blah\n\t\nexpected name blah but found placeholder_b"); } TEST(NodeMatchers, CheckControlDependence) { Scope root = Scope::NewRootScope().ExitOnError(); Output placeholder_a = ops::Placeholder(root.WithOpName("placeholder_a"), DT_FLOAT); Output placeholder_b = ops::Placeholder(root.WithOpName("placeholder_b"), DT_FLOAT); Output placeholder_c = ops::Placeholder(root.WithOpName("placeholder_c"), DT_FLOAT); Output placeholder_d = ops::Placeholder(root.WithOpName("placeholder_d"), DT_FLOAT); root.graph()->AddControlEdge(placeholder_a.node(), placeholder_c.node()); root.graph()->AddControlEdge(placeholder_b.node(), placeholder_c.node()); EXPECT_THAT(placeholder_c.node(), NodeWith(Name("placeholder_c"), CtrlDeps(NodeWith(Name("placeholder_a")), NodeWith(Name("placeholder_b"))))); EXPECT_THAT(placeholder_d.node(), NodeWith(Name("placeholder_d"), CtrlDeps())); { const std::string explanation = Explain(placeholder_c.node(), NodeWith(CtrlDeps())); EXPECT_NE(explanation.find("ctrl_deps, which has 2 elements"), std::string::npos); EXPECT_NE(explanation.find("does not match expected: is empty"), std::string::npos); } { const std::string explanation = Explain(placeholder_d.node(), NodeWith(CtrlDeps(NodeWith()))); EXPECT_NE(explanation.find("ctrl_deps"), std::string::npos); EXPECT_NE(explanation.find("does not match expected: has 1 element and " "that element is any node"), std::string::npos); } } TEST(NodeMatchers, ConstValue) { Scope root = Scope::NewRootScope().ExitOnError(); Output placeholder = ops::Placeholder(root.WithOpName("placeholder"), DT_FLOAT); Output const_0d = ops::Const(root.WithOpName("const_0d"), 42); Output const_2d = ops::Const(root.WithOpName("const_2d"), {{1, 2}, {4, 3}}); EXPECT_THAT(const_0d.node(), NodeWith(ConstantValue(42))); EXPECT_THAT(const_0d.node(), NodeWith(ConstantValue(42), Name("const_0d"))); EXPECT_THAT(const_2d.node(), NodeWith(ConstantValue({{1, 2}, {4, 3}}))); EXPECT_EQ(Explain(placeholder.node(), NodeWith(ConstantValue(42))), "\nexpected op Const but found Placeholder"); EXPECT_EQ( Explain(const_0d.node(), NodeWith(ConstantValue(43))), "\nmismatch in constant tensor at index 0 expected = 43 actual = 42"); EXPECT_EQ( Explain(const_0d.node(), NodeWith(ConstantValue({{1, 2}, {4, 3}}))), "\nwas looking for tensor with 4 elements, found tensor with 1 elements"); EXPECT_EQ( Explain(const_2d.node(), NodeWith(ConstantValue(42))), "\nwas looking for tensor with 1 elements, found tensor with 4 elements"); } TEST(NodeMatchers, AssignedDevice) { Scope root = Scope::NewRootScope().ExitOnError(); Output placeholder_a = ops::Placeholder(root.WithOpName("placeholder_a"), DT_FLOAT); Output placeholder_b = ops::Placeholder(root.WithOpName("placeholder_b"), DT_FLOAT); Output assigned_add = ops::Add(root.WithOpName("assigned_add"), placeholder_a, placeholder_b); assigned_add.node()->set_assigned_device_name( "/job:localhost/replica:0/task:0/device:CPU:0"); Output unassigned_add = ops::Add(root.WithOpName("unassigned_add"), placeholder_a, placeholder_b); EXPECT_THAT( assigned_add.node(), NodeWith(AssignedDevice("/job:localhost/replica:0/task:0/device:CPU:0"))); EXPECT_THAT(unassigned_add.node(), NodeWith(AssignedDevice(""))); EXPECT_EQ(Explain(unassigned_add.node(), NodeWith(AssignedDevice( "/job:localhost/replica:0/task:0/device:CPU:0"))), "\nexpected assigned_device " "/job:localhost/replica:0/task:0/device:CPU:0 but found \"\""); } TEST(NodeMatchers, OutputIndices) { Scope root = Scope::NewRootScope().ExitOnError(); Output pred = ops::Placeholder(root.WithOpName("pred"), DT_BOOL); Output data = ops::Placeholder(root.WithOpName("data"), DT_FLOAT); ops::Switch sw(root.WithOpName("switch"), data, pred); Output add = ops::Add(root.WithOpName("add"), sw.output_true, ops::Placeholder(root.WithOpName("addend"), DT_FLOAT)); EXPECT_THAT(add.node(), NodeWith(Inputs(Out(1, NodeWith(Op("Switch"))), _))); EXPECT_EQ( Explain(add.node(), NodeWith(Inputs(Out(0, NodeWith(Op("Switch"))), _))), "\ninput 0 does not match expected:\nop: Switch, \nexpected output slot " "to be 0 but found 1"); } TEST(NodeMatchers, Attrs) { Scope root = Scope::NewRootScope().ExitOnError(); Output enter = ops::internal::Enter( root.WithOpName("enter"), ops::Placeholder(root.WithOpName("data"), DT_FLOAT), "frame_name", ops::internal::Enter::Attrs{}.IsConstant(true)); EXPECT_THAT(enter.node(), NodeWith(Attr("is_constant", true))); EXPECT_EQ(Explain(enter.node(), NodeWith(Attr("is_constant", false))), "attribute named is_constant does not match value; expected: " "\"false\", found: \"true\""); EXPECT_EQ(Explain(enter.node(), NodeWith(Attr("missing_attr", false))), "did not find attribute named \"missing_attr\" in node"); } } } }
1,087
cpp
tensorflow/tensorflow
device_compilation_cluster_signature
tensorflow/compiler/jit/device_compilation_cluster_signature.cc
tensorflow/compiler/jit/device_compilation_cluster_signature_test.cc
#ifndef TENSORFLOW_COMPILER_JIT_DEVICE_COMPILATION_CLUSTER_SIGNATURE_H_ #define TENSORFLOW_COMPILER_JIT_DEVICE_COMPILATION_CLUSTER_SIGNATURE_H_ #include <utility> #include <variant> #include "tensorflow/compiler/tf2xla/xla_compiler.h" namespace tensorflow { struct DeviceCompilationClusterSignature { string name; using TensorTypeAndShape = std::pair<DataType, absl::InlinedVector<int64_t, 4>>; absl::InlinedVector<std::variant<Tensor, TensorTypeAndShape>, 8> args; bool operator==(const DeviceCompilationClusterSignature& other) const; struct Hash { uint64 operator()(const DeviceCompilationClusterSignature& signature) const; }; string HumanString() const; static absl::StatusOr<DeviceCompilationClusterSignature> Build( const NameAttrList& function, absl::Span<const XlaCompiler::Argument> args); }; } #endif #include "tensorflow/compiler/jit/device_compilation_cluster_signature.h" #include <string> #include <utility> #include <variant> namespace tensorflow { namespace { using Signature = DeviceCompilationClusterSignature; using TensorTypeAndShape = Signature::TensorTypeAndShape; struct SignatureHumanStringAppender { explicit SignatureHumanStringAppender(std::string* dest) : dest(dest) {} std::string* dest; void operator()(const Tensor& arg) { absl::StrAppend(dest, "; ", arg.DebugString()); } void operator()(const TensorTypeAndShape& arg) { absl::StrAppend(dest, ",", DataTypeString(arg.first)); absl::StrAppend(dest, " [", absl::StrJoin(arg.second, ","), "]"); } }; struct SignatureNotEqual { bool operator()(const Tensor& arg, const Tensor& other) { return arg.dtype() != other.dtype() || arg.shape() != other.shape() || arg.tensor_data() != other.tensor_data(); } bool operator()(const TensorTypeAndShape& arg, const TensorTypeAndShape& other) { return arg.first != other.first || arg.second != other.second; } bool operator()(const Tensor& arg, const TensorTypeAndShape& other) { return true; } bool operator()(const TensorTypeAndShape& arg, const Tensor& other) { return true; } }; struct SignatureHashCombiner { explicit SignatureHashCombiner(const uint64 h) : h(h) {} uint64 h; uint64 operator()(const Tensor& arg) { h = Hash64Combine(h, std::hash<int>()(static_cast<int>(arg.dtype()))); h = Hash64Combine( h, Hash64(arg.tensor_data().data(), arg.tensor_data().size())); for (int dim = 0; dim < arg.dims(); ++dim) { h = Hash64Combine(h, std::hash<int>()(arg.dim_size(dim))); } return h; } uint64 operator()(const TensorTypeAndShape& arg) { h = Hash64Combine(h, std::hash<int>()(static_cast<int>(arg.first))); h = Hash64Combine(h, std::hash<int>()(arg.second.size())); for (int dim : arg.second) { h = Hash64Combine(h, std::hash<int>()(dim)); } return h; } }; } std::string Signature::HumanString() const { std::string result = name; for (const auto& arg : args) { std::visit(SignatureHumanStringAppender(&result), arg); } return result; } bool Signature::operator==(const Signature& other) const { if (name != other.name) return false; if (args.size() != other.args.size()) return false; for (int i = 0, end = args.size(); i < end; ++i) { if (std::visit(SignatureNotEqual(), args[i], other.args[i])) { return false; } } return true; } uint64 Signature::Hash::operator()(const Signature& signature) const { uint64 h = std::hash<string>()(signature.name); for (const auto& arg : signature.args) { h = std::visit(SignatureHashCombiner(h), arg); } return h; } absl::StatusOr<Signature> Signature::Build( const NameAttrList& function, absl::Span<const XlaCompiler::Argument> args) { Signature signature; signature.name = Canonicalize(function.name(), AttrSlice(&function.attr())); for (const XlaCompiler::Argument& arg : args) { switch (arg.kind) { case XlaCompiler::Argument::kConstant: case XlaCompiler::Argument::kConstantResource: signature.args.push_back(arg.constant_value); break; case XlaCompiler::Argument::kParameter: case XlaCompiler::Argument::kResource: signature.args.push_back( TensorTypeAndShape(arg.type, arg.DimensionSizesAsInlinedVector())); break; default: return errors::InvalidArgument( "Unhandled argument kind in XlaCompilationCache: ", arg.HumanString()); } } return std::move(signature); } }
#include "tensorflow/compiler/jit/device_compilation_cluster_signature.h" #include <utility> #include <vector> #include "tensorflow/compiler/jit/flags.h" #include "tensorflow/compiler/tf2xla/shape_util.h" #include "xla/client/client_library.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/platform/test_benchmark.h" namespace tensorflow { namespace { using SignatureHash = DeviceCompilationClusterSignature::Hash; TEST(DeviceCompilationClusterSignatureTest, SignatureEquality) { NameAttrList fn; fn.set_name("afunction"); std::vector<XlaCompiler::Argument> args(1); args[0].kind = XlaCompiler::Argument::kConstant; args[0].type = DT_INT32; args[0].shape = TensorShape({4, 0}); args[0].constant_value = Tensor(DT_INT32, {4, 0}); TF_ASSERT_OK_AND_ASSIGN(DeviceCompilationClusterSignature s1, DeviceCompilationClusterSignature::Build(fn, args)); args[0].type = DT_FLOAT; args[0].constant_value = Tensor(DT_FLOAT, {4, 0}); TF_ASSERT_OK_AND_ASSIGN(DeviceCompilationClusterSignature s2, DeviceCompilationClusterSignature::Build(fn, args)); args[0].shape = TensorShape({0, 4}); args[0].constant_value = Tensor(DT_FLOAT, {0, 4}); TF_ASSERT_OK_AND_ASSIGN(DeviceCompilationClusterSignature s3, DeviceCompilationClusterSignature::Build(fn, args)); std::vector<DeviceCompilationClusterSignature> signatures = {s1, s2, s3}; for (int i = 0; i < signatures.size(); ++i) { for (int j = 0; j < signatures.size(); ++j) { EXPECT_EQ(i == j, signatures[i] == signatures[j]) << "s1: " << signatures[i].HumanString() << "\n" << "s2: " << signatures[j].HumanString(); EXPECT_EQ(i == j, signatures[i].HumanString() == signatures[j].HumanString()) << "s1: " << signatures[i].HumanString() << "\n" << "s2: " << signatures[j].HumanString(); EXPECT_EQ(i == j, SignatureHash()(signatures[i]) == SignatureHash()(signatures[j])) << "s1: " << signatures[i].HumanString() << "\n" << "s1_hash: " << SignatureHash()(signatures[i]) << "\n" << "s2: " << signatures[j].HumanString() << "\n" << "s2_hash: " << SignatureHash()(signatures[j]); } } } TEST(DeviceCompilationClusterSignatureTest, SignatureUniqueness) { NameAttrList fn; fn.set_name("afunction"); std::vector<XlaCompiler::Argument> args(2); args[0].kind = XlaCompiler::Argument::kConstant; args[0].type = DT_INT32; args[0].constant_value = Tensor(DT_INT32, {4, 0}); args[1].kind = XlaCompiler::Argument::kParameter; args[1].type = DT_INT32; args[1].shape = TensorShape({4, 0}); TF_ASSERT_OK_AND_ASSIGN(DeviceCompilationClusterSignature s1, DeviceCompilationClusterSignature::Build(fn, args)); using std::swap; swap(args[0], args[1]); TF_ASSERT_OK_AND_ASSIGN(DeviceCompilationClusterSignature s2, DeviceCompilationClusterSignature::Build(fn, args)); EXPECT_NE(s1.HumanString(), s2.HumanString()); EXPECT_NE(SignatureHash()(s1), SignatureHash()(s2)); EXPECT_FALSE(s1 == s2); } void BM_BuildSignature(::testing::benchmark::State& state) { const int n_args = state.range(0); NameAttrList fn; fn.set_name("afunction"); for (int i = 0; i < n_args; i++) { (*fn.mutable_attr())[absl::StrCat("T", i)].set_type(DT_FLOAT); } std::vector<XlaCompiler::Argument> args(n_args); for (int i = 0; i < n_args; i++) { args[i].kind = (((i % 3) == 0) ? XlaCompiler::Argument::kConstant : XlaCompiler::Argument::kParameter); args[i].type = DT_INT32; args[i].shape = TensorShape({4, 0}); args[i].constant_value = Tensor(DT_INT32, {4, 0}); } for (auto i : state) { auto s = DeviceCompilationClusterSignature::Build(fn, args); CHECK(s.ok()); DeviceCompilationClusterSignature sig = std::move(s.value()); } } BENCHMARK(BM_BuildSignature)->Arg(0)->Arg(1)->Arg(2)->Arg(5)->Arg(10); } }
1,088
cpp
tensorflow/tensorflow
compilability_check_util
tensorflow/compiler/jit/compilability_check_util.cc
tensorflow/compiler/jit/compilability_check_util_test.cc
#ifndef TENSORFLOW_COMPILER_JIT_COMPILABILITY_CHECK_UTIL_H_ #define TENSORFLOW_COMPILER_JIT_COMPILABILITY_CHECK_UTIL_H_ #include <string> #include "absl/algorithm/container.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "absl/types/optional.h" #include "tensorflow/compiler/jit/defs.h" #include "tensorflow/compiler/jit/device_util.h" #include "tensorflow/compiler/jit/flags.h" #include "tensorflow/compiler/jit/resource_operation_safety_analysis.h" #include "tensorflow/compiler/tf2xla/const_analysis.h" #include "tensorflow/compiler/tf2xla/resource_operation_table.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/service/graphcycles/graphcycles.h" #include "xla/union_find.h" #include "xla/util.h" #include "tensorflow/core/common_runtime/function.h" #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/framework/attr_value.pb.h" #include "tensorflow/core/framework/bounds_check.h" #include "tensorflow/core/framework/function.h" #include "tensorflow/core/framework/graph_def_util.h" #include "tensorflow/core/framework/memory_types.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/graph/algorithm.h" #include "tensorflow/core/graph/control_flow.h" #include "tensorflow/core/graph/graph.h" #include "tensorflow/core/lib/gtl/cleanup.h" #include "tensorflow/core/lib/strings/stringprintf.h" #include "tensorflow/core/public/version.h" #include "tensorflow/core/util/dump_graph.h" namespace tensorflow { class RecursiveCompilabilityChecker { public: struct StackFrame { std::string name; std::string function_name; std::shared_ptr<AbstractStackTrace> stack_trace; }; struct UncompilableNodeInfo { std::string name; std::vector<StackFrame> stack_trace; std::string uncompilable_reason; }; struct OperationFilter { bool allow_resource_ops_in_called_functions = false; bool allow_stack_ops = false; bool allow_tensor_array_ops = false; bool allow_stateful_rng_ops = false; bool allow_control_trigger = false; bool allow_eliding_assert_and_checknumerics_ops = false; bool allow_ops_producing_or_consuming_variant = false; bool allow_slow_ops = false; bool allow_inaccurate_ops = false; bool require_always_compilable = false; bool allow_string_consts = true; bool allow_collective_reduce_v2 = true; bool allow_where_op = true; bool allow_unique_op = true; bool allow_outside_compiled = false; }; RecursiveCompilabilityChecker(OperationFilter op_filter, DeviceType jit_device_type) : op_filter_(std::move(op_filter)), jit_device_type_(std::move(jit_device_type)) {} using UncompilableNodesMap = std::map<std::string, std::pair<NameAttrList, std::vector<UncompilableNodeInfo>>>; UncompilableNodesMap FindUncompilableNodes( const Node& node, FunctionLibraryRuntime* lib_runtime, const std::vector<StackFrame>* node_stack_trace = nullptr) const; bool IsCompilableNode(const Node& node, FunctionLibraryRuntime* lib_runtime) const { std::vector<StackFrameView> stack_trace; stack_trace.emplace_back(StackFrameView{node.name(), ""}); return IsCompilableNode(node, lib_runtime, &stack_trace); } bool OpIsInaccurate(const Node& node) const; bool OpIsSlow(const Node& node) const; private: struct StackFrameView { absl::string_view name; absl::string_view function_name; std::shared_ptr<AbstractStackTrace> stack_trace; }; bool IsCompilableNode( const Node& node, FunctionLibraryRuntime* lib_runtime, std::vector<StackFrameView>* stack_trace, NameAttrList* encapsulating_function = nullptr, UncompilableNodesMap* uncompilable_nodes = nullptr) const; bool IsCompilableCall( const NodeDef& call_def, FunctionLibraryRuntime* lib_runtime, std::vector<StackFrameView>* stack_trace, NameAttrList* encapsulating_function = nullptr, UncompilableNodesMap* uncompilable_nodes = nullptr) const; bool IsCompilableIf(const Node& if_node, FunctionLibraryRuntime* lib_runtime, std::vector<StackFrameView>* stack_trace, NameAttrList* encapsulating_function, UncompilableNodesMap* uncompilable_nodes) const; bool IsCompilableWhile(const Node& while_node, FunctionLibraryRuntime* lib_runtime, std::vector<StackFrameView>* stack_trace, NameAttrList* encapsulating_function, UncompilableNodesMap* uncompilable_nodes) const; bool IsCompilableCase(const Node& case_node, FunctionLibraryRuntime* lib_runtime, std::vector<StackFrameView>* stack_trace, NameAttrList* encapsulating_function, UncompilableNodesMap* uncompilable_nodes) const; bool ExtractNodeDefAndCheckCompilability( const Node& node, const std::string& attr_name, const std::string& call_name, NameAttrList* encapsulating_function, FunctionLibraryRuntime* lib_runtime, std::vector<StackFrameView>* stack_trace, UncompilableNodesMap* uncompilable_nodes) const; bool IsStackOp(const Node& node) const { const XlaResourceOpInfo* op_info = GetResourceOpInfoForOp(node.type_string()); return op_info && op_info->resource_kind() == XlaResourceKind::kStack; } bool IsTensorArrayOp(const Node& node) const { const XlaResourceOpInfo* op_info = GetResourceOpInfoForOp(node.type_string()); return op_info && op_info->resource_kind() == XlaResourceKind::kTensorArray; } bool IsAssertOrCheckNumerics(absl::string_view op_name) const { return op_name == "Assert" || op_name == "CheckNumerics"; } bool IsStatefulRandomOp(absl::string_view op_name) const { return op_name == "RandomUniform" || op_name == "RandomShuffle" || op_name == "RandomUniformInt" || op_name == "RandomStandardNormal" || op_name == "TruncatedNormal" || op_name == "Multinomial"; } bool OpProducesOrConsumesVariant(const Node& node) const { auto is_variant = [](DataType dtype) { return dtype == DT_VARIANT; }; return absl::c_any_of(node.input_types(), is_variant) || absl::c_any_of(node.output_types(), is_variant); } bool HasXLAKernel(const Node& node, string* uncompilable_reason = nullptr) const; static void MaybeMarkUncompilableNode( const absl::string_view reason, const std::vector<StackFrameView>& stack_trace, NameAttrList* encapsulating_function, UncompilableNodesMap* uncompilable_nodes_map); const size_t kMaxRecursionDepth = 50; const OperationFilter op_filter_; const DeviceType jit_device_type_; }; RecursiveCompilabilityChecker::OperationFilter CreateOperationFilter( const XlaOpRegistry::DeviceRegistration& registration); Status GetBodyAndConstantsAndResources(FunctionLibraryRuntime* flr, const NameAttrList& function, const FunctionBody** fbody, std::vector<int>* constant_arg_indices, std::vector<int>* resource_arg_indices); bool CanCreateXlaKernel(const NodeDef& node_def); tensorflow::MemoryTypeVector GetInputMemoryTypes( const tensorflow::FunctionBody* fbody, absl::Span<int const> constant_arg_indices, absl::Span<int const> resource_arg_indices); tensorflow::MemoryTypeVector GetOutputMemoryTypes( const tensorflow::FunctionBody* fbody); bool CanTriggerXlaCompilation(const GraphDef& graph); } #endif #include "tensorflow/compiler/jit/compilability_check_util.h" #include <algorithm> #include <atomic> #include <deque> #include <iterator> #include <limits> #include <string> #include <unordered_map> #include <unordered_set> #include <utility> #include "absl/algorithm/container.h" #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/status/statusor.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_join.h" #include "absl/strings/string_view.h" #include "tensorflow/compiler/jit/defs.h" #include "tensorflow/compiler/jit/device_util.h" #include "tensorflow/compiler/jit/flags.h" #include "tensorflow/compiler/jit/resource_operation_safety_analysis.h" #include "tensorflow/compiler/jit/xla_activity.pb.h" #include "tensorflow/compiler/jit/xla_activity_listener.h" #include "tensorflow/compiler/jit/xla_cluster_util.h" #include "tensorflow/compiler/tf2xla/const_analysis.h" #include "tensorflow/compiler/tf2xla/resource_operation_table.h" #include "tensorflow/compiler/tf2xla/tf2xla_util.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/service/graphcycles/graphcycles.h" #include "xla/union_find.h" #include "xla/util.h" #include "tensorflow/core/common_runtime/function.h" #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/framework/attr_value.pb.h" #include "tensorflow/core/framework/bounds_check.h" #include "tensorflow/core/framework/graph_def_util.h" #include "tensorflow/core/framework/memory_types.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/graph/algorithm.h" #include "tensorflow/core/graph/control_flow.h" #include "tensorflow/core/lib/gtl/cleanup.h" #include "tensorflow/core/lib/strings/stringprintf.h" #include "tensorflow/core/public/version.h" #include "tensorflow/core/util/dump_graph.h" namespace tensorflow { namespace { bool HasResourceInput(const Node& node) { return absl::c_count(node.input_types(), DT_RESOURCE) != 0; } void LogNotCompilable(const Node& node, absl::string_view reason = "") { VLOG(3) << "Found uncompilable node " << node.name() << " (op " << node.type_string() << ")" << (reason.empty() ? "" : ": ") << reason; } bool IsInOutsideCompilationCluster(const Node& n) { return n.attrs().Find(kXlaOutsideCompilationAttr) != nullptr; } Status MakeCallNodeFromAttribute(const Node& node, const std::string& attr_name, NodeDef* node_def) { const NameAttrList* name_attr; TF_RETURN_IF_ERROR(GetNodeAttr(node.attrs(), attr_name, &name_attr)); node_def->set_op(name_attr->name()); *(node_def->mutable_attr()) = name_attr->attr(); return absl::OkStatus(); } absl::StatusOr<std::vector<NodeDef>> MakeCallNodesFromAttribute( const Node& node, absl::string_view attr_name, absl::string_view call_name) { std::vector<NameAttrList> attr_lists; TF_RETURN_IF_ERROR(GetNodeAttr(node.attrs(), attr_name, &attr_lists)); std::vector<NodeDef> out; out.reserve(attr_lists.size()); for (int i = 0; i < attr_lists.size(); i++) { out.emplace_back(); NodeDef& inserted = out.back(); inserted.set_name(absl::StrCat(call_name, "_", i)); inserted.set_op(attr_lists[i].name()); *inserted.mutable_attr() = attr_lists[i].attr(); } return out; } class SinglePassSearch { public: explicit SinglePassSearch(absl::Span<int const> values) : current_index_(0), values_(values) {} bool ScanForValue(int value) { while (current_index_ < values_.size() && values_[current_index_] <= value) { if (values_[current_index_] == value) { current_index_++; return true; } current_index_++; } return false; } private: int current_index_; const absl::Span<int const> values_; }; } RecursiveCompilabilityChecker::UncompilableNodesMap RecursiveCompilabilityChecker::FindUncompilableNodes( const Node& node, FunctionLibraryRuntime* lib_runtime, const std::vector<RecursiveCompilabilityChecker::StackFrame>* node_stack_trace) const { std::vector<StackFrameView> stack_trace; if (node_stack_trace != nullptr) { for (const auto& frame : *node_stack_trace) { stack_trace.emplace_back( StackFrameView{frame.name, frame.function_name, frame.stack_trace}); } } stack_trace.emplace_back( StackFrameView{node.name(), "", node.GetStackTrace()}); RecursiveCompilabilityChecker::UncompilableNodesMap uncompilable_nodes; IsCompilableNode(node, lib_runtime, &stack_trace, nullptr, &uncompilable_nodes); return uncompilable_nodes; } bool RecursiveCompilabilityChecker::HasXLAKernel( const Node& node, string* uncompilable_reason) const { if (node.type_string() == "SymbolicGradient") { *uncompilable_reason = "SymbolicGradient should be handled by IsCompilableCall()."; return false; } if (node.type_string() == "Const") { const AttrValue* attr = node.attrs().Find("dtype"); if (!op_filter_.allow_string_consts && attr != nullptr && attr->type() == DT_STRING) { *uncompilable_reason = "Const op with type DT_STRING is not supported by XLA."; return false; } } if (HasForwardedRefInput(node)) { VLOG(2) << "Rejecting " << node.name() << ": Identity with unsafe cast."; *uncompilable_reason = "Identity with unsafe cast."; return false; } Status s = FindKernelDef(jit_device_type_, node.def(), nullptr, nullptr); if (!s.ok()) { *uncompilable_reason = s.message(); return false; } return true; } bool RecursiveCompilabilityChecker::IsCompilableIf( const Node& if_node, FunctionLibraryRuntime* lib_runtime, std::vector<StackFrameView>* stack_trace, NameAttrList* encapsulating_function, RecursiveCompilabilityChecker::UncompilableNodesMap* uncompilable_nodes) const { bool is_compilable = true; is_compilable &= ExtractNodeDefAndCheckCompilability( if_node, "then_branch", "if_then", encapsulating_function, lib_runtime, stack_trace, uncompilable_nodes); if (!uncompilable_nodes && !is_compilable) return is_compilable; is_compilable &= ExtractNodeDefAndCheckCompilability( if_node, "else_branch", "if_else", encapsulating_function, lib_runtime, stack_trace, uncompilable_nodes); return is_compilable; } bool RecursiveCompilabilityChecker::IsCompilableCase( const Node& case_node, FunctionLibraryRuntime* lib_runtime, std::vector<StackFrameView>* stack_trace, NameAttrList* encapsulating_function, RecursiveCompilabilityChecker::UncompilableNodesMap* uncompilable_nodes) const { absl::StatusOr<std::vector<NodeDef>> calls = MakeCallNodesFromAttribute(case_node, "branches", "branch"); if (!calls.ok()) { VLOG(2) << "Rejecting node " << case_node.name() << ": " << "missing attribute 'branches'"; return false; } bool is_compilable = true; for (const NodeDef& call : *calls) { is_compilable &= IsCompilableCall(call, lib_runtime, stack_trace, encapsulating_function, uncompilable_nodes); } return is_compilable; } bool RecursiveCompilabilityChecker::IsCompilableWhile( const Node& while_node, FunctionLibraryRuntime* lib_runtime, std::vector<StackFrameView>* stack_trace, NameAttrList* encapsulating_function, RecursiveCompilabilityChecker::UncompilableNodesMap* uncompilable_nodes) const { bool is_compilable = true; is_compilable &= ExtractNodeDefAndCheckCompilability( while_node, "cond", "while_cond", encapsulating_function, lib_runtime, stack_trace, uncompilable_nodes); if (!uncompilable_nodes && !is_compilable) return is_compilable; is_compilable &= ExtractNodeDefAndCheckCompilability( while_node, "body", "while_body", encapsulating_function, lib_runtime, stack_trace, uncompilable_nodes); return is_compilable; } bool RecursiveCompilabilityChecker::ExtractNodeDefAndCheckCompilability( const Node& node, const std::string& attr_name, const std::string& call_name, NameAttrList* encapsulating_function, FunctionLibraryRuntime* lib_runtime, std::vector<StackFrameView>* stack_trace, RecursiveCompilabilityChecker::UncompilableNodesMap* uncompilable_nodes) const { NodeDef call; call.set_name(call_name); if (!MakeCallNodeFromAttribute(node, attr_name, &call).ok()) { const auto uncompilable_reason = absl::StrCat( "missing '", attr_name, "' attribute from node", node.name()); MaybeMarkUncompilableNode(uncompilable_reason, *stack_trace, encapsulating_function, uncompilable_nodes); VLOG(2) << "Rejecting node " << node.name() << ": " << uncompilable_reason << "."; return false; } if (!IsCompilableCall(call, lib_runtime, stack_trace, encapsulating_function, uncompilable_nodes)) { VLOG(2) << "Rejecting node " << node.name() << ": can't compile : " << call.op(); return false; } return true; } bool RecursiveCompilabilityChecker::IsCompilableCall( const NodeDef& call_def, FunctionLibraryRuntime* lib_runtime, std::vector<StackFrameView>* stack_trace, NameAttrList* encapsulating_function, RecursiveCompilabilityChecker::UncompilableNodesMap* uncompilable_nodes) const { if (stack_trace->size() > kMaxRecursionDepth) { std::string uncompilable_reason = "function depth limit exceeded"; MaybeMarkUncompilableNode(uncompilable_reason, *stack_trace, encapsulating_function, uncompilable_nodes); VLOG(2) << "Rejecting " << call_def.op() << ": " << uncompilable_reason << "."; return false; } FunctionLibraryRuntime::Handle handle; Status s; NameAttrList function; s = NameAndAttrsFromFunctionCall(call_def, &function); if (s.ok()) { s = lib_runtime->Instantiate(function.name(), AttrSlice(&function.attr()), &handle); } if (!s.ok()) { std::string uncompilable_reason = absl::StrCat("could not instantiate call: '", function.name(), "'"); MaybeMarkUncompilableNode(uncompilable_reason, *stack_trace, encapsulating_function, uncompilable_nodes); VLOG(2) << "Rejecting " << call_def.DebugString() << ": " << uncompilable_reason << " : " << s; return false; } auto release_handle_on_return = gtl::MakeCleanup( [&] { TF_CHECK_OK(lib_runtime->ReleaseHandle(handle)); }); const FunctionBody* fbody = lib_runtime->GetFunctionBody(handle); bool is_compilable = true; for (const Node* node : fbody->graph->op_nodes()) { stack_trace->emplace_back( StackFrameView{node->name(), function.name(), node->GetStackTrace()}); is_compilable &= IsCompilableNode(*node, lib_runtime, stack_trace, &function, uncompilable_nodes); stack_trace->pop_back(); if (!uncompilable_nodes && !is_compilable) return is_compilable; } return is_compilable; } bool RecursiveCompilabilityChecker::OpIsInaccurate(const Node& node) const { return node.type_string() == "SelfAdjointEigV2" || node.type_string() == "Svd"; } bool RecursiveCompilabilityChecker::OpIsSlow(const Node& node) const { return node.type_string() == "SelfAdjointEigV2" || node.type_string() == "Svd" || node.type_string() == "Qr" || node.type_string() == "MatrixInverse" || node.type_string() == "MatrixSolve" || node.type_string() == "ResizeBilinearGrad"; } bool RecursiveCompilabilityChecker::IsCompilableNode( const Node& node, FunctionLibraryRuntime* lib_runtime, std::vector<StackFrameView>* stack_trace, NameAttrList* encapsulating_function, RecursiveCompilabilityChecker::UncompilableNodesMap* uncompilable_nodes) const { auto stack_depth = stack_trace->size(); if (op_filter_.allow_outside_compiled && IsInOutsideCompilationCluster(node)) return true; if (node.IsSource() || node.IsSink()) { absl::string_view uncompilable_reason = "source or sink node"; MaybeMarkUncompilableNode(uncompilable_reason, *stack_trace, encapsulating_function, uncompilable_nodes); LogNotCompilable(node, uncompilable_reason); return false; } if (stack_depth == 1 && (node.type_string() == "_Arg" || node.type_string() == "_Retval")) { absl::string_view uncompilable_reason = "top level _Arg or _Retval"; MaybeMarkUncompilableNode(uncompilable_reason, *stack_trace, encapsulating_function, uncompilable_nodes); LogNotCompilable(node, uncompilable_reason); return false; } if (node.attrs().Find("_scoped_allocator") || node.attrs().Find("_forward_from")) { absl::string_view uncompilable_reason = "_scoped_allocator or _forward_from attribute"; MaybeMarkUncompilableNode(uncompilable_reason, *stack_trace, encapsulating_function, uncompilable_nodes); LogNotCompilable(node, uncompilable_reason); return false; } string uncompilable_reason; if (IsFunctionCall(*lib_runtime->GetFunctionLibraryDefinition(), node)) { if (!IsCompilableCall(node.def(), lib_runtime, stack_trace, encapsulating_function, uncompilable_nodes)) { LogNotCompilable(node, "unsupported function"); return false; } } else if (!HasXLAKernel(node, &uncompilable_reason)) { MaybeMarkUncompilableNode( absl::StrCat("unsupported op: ", uncompilable_reason), *stack_trace, encapsulating_function, uncompilable_nodes); LogNotCompilable(node, uncompilable_reason); return false; } if (node.IsWhileNode() && !IsCompilableWhile(node, lib_runtime, stack_trace, encapsulating_function, uncompilable_nodes)) { LogNotCompilable(node, "unsupported while"); return false; } if (node.IsIfNode() && !IsCompilableIf(node, lib_runtime, stack_trace, encapsulating_function, uncompilable_nodes)) { LogNotCompilable(node, "unsupported if"); return f
#include "tensorflow/compiler/jit/compilability_check_util.h" #include "absl/memory/memory.h" #include "tensorflow/cc/framework/scope.h" #include "tensorflow/cc/ops/function_ops.h" #include "tensorflow/cc/ops/functional_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/compiler/tf2xla/xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "tensorflow/core/common_runtime/graph_def_builder_util.h" #include "tensorflow/core/framework/attr_value.pb.h" #include "tensorflow/core/framework/function.h" #include "tensorflow/core/framework/graph_to_functiondef.h" #include "tensorflow/core/framework/node_def_util.h" #include "tensorflow/core/graph/graph_def_builder.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace { AttrValue FuncListAttr(const absl::Span<const char* const> names) { AttrValue attr; for (const char* name : names) { attr.mutable_list()->add_func()->set_name(name); } return attr; } constexpr char kFunctionalIfNodeName[] = "If"; constexpr char kFunctionalCaseNodeName[] = "Case"; constexpr char kFunctionalWhileNodeName[] = "While"; constexpr char kCompilableFunctionName[] = "CompilableFn"; constexpr char kCompilableFunctionNodeName[] = "n_c"; constexpr char kUncompilableFunctionName[] = "UncompilableFn"; constexpr char kUncompilableFunctionNodeName[] = "n_c_uncompilable"; constexpr char kUncompilableFunctionTwoName[] = "UncompilableFnTwo"; constexpr char kUncompilableFunctionNodeTwoName[] = "n_d_uncompilable"; class DummyCompilableOp : public XlaOpKernel { public: explicit DummyCompilableOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {} void Compile(XlaOpKernelContext* ctx) override { ctx->SetOutput(0, ctx->Input(0)); } }; REGISTER_OP("InputFloatOp").Output("o: float"); REGISTER_OP("CompilableOp").Input("i: float").Output("o: float"); REGISTER_XLA_OP(Name("CompilableOp").Device(DEVICE_CPU_XLA_JIT), DummyCompilableOp); REGISTER_OP("MissingKernel").Input("i: float").Output("o: float"); class CompilabilityCheckUtilTest : public ::testing::Test { protected: void SetUp() override { XlaOpRegistry::RegisterCompilationKernels(); op_filter_.allow_resource_ops_in_called_functions = false; op_filter_.allow_stack_ops = false; op_filter_.allow_tensor_array_ops = false; op_filter_.allow_stateful_rng_ops = false; op_filter_.allow_control_trigger = false; op_filter_.allow_eliding_assert_and_checknumerics_ops = false; op_filter_.allow_ops_producing_or_consuming_variant = false; op_filter_.allow_inaccurate_ops = false; op_filter_.allow_slow_ops = false; op_filter_.allow_outside_compiled = false; checker_ = CreateCompilabilityChecker(); } std::unique_ptr<RecursiveCompilabilityChecker> CreateCompilabilityChecker() { return std::make_unique<RecursiveCompilabilityChecker>(op_filter_, device_type_); } FunctionLibraryRuntime* GetFunctionLibraryRuntime() { OptimizerOptions opts; pflr_ = std::make_unique<ProcessFunctionLibraryRuntime>( nullptr, Env::Default(), nullptr, TF_GRAPH_DEF_VERSION, flib_def_.get(), opts); return pflr_->GetFLR(ProcessFunctionLibraryRuntime::kDefaultFLRDevice); } RecursiveCompilabilityChecker::OperationFilter op_filter_; DeviceType device_type_ = DeviceType(DEVICE_CPU_XLA_JIT); std::unique_ptr<FunctionDefLibrary> func_library_ = std::make_unique<FunctionDefLibrary>(); std::unique_ptr<FunctionLibraryDefinition> flib_def_ = std::make_unique<FunctionLibraryDefinition>(OpRegistry::Global(), *func_library_); std::unique_ptr<RecursiveCompilabilityChecker> checker_; std::unique_ptr<ProcessFunctionLibraryRuntime> pflr_; }; TEST_F(CompilabilityCheckUtilTest, CheckNonFunctionalNodes) { GraphDefBuilder builder(GraphDefBuilder::kFailImmediately); auto opts = builder.opts(); Node* const0 = ops::SourceOp("InputFloatOp", opts); Node* compilable_op = ops::UnaryOp("CompilableOp", const0, opts); Node* uncompilable_op = ops::UnaryOp("MissingKernel", compilable_op, opts); GraphDef graph_def; TF_EXPECT_OK(builder.ToGraphDef(&graph_def)); auto* flib_runtime = GetFunctionLibraryRuntime(); EXPECT_FALSE(checker_->IsCompilableNode(*const0, flib_runtime)); EXPECT_TRUE(checker_->IsCompilableNode(*compilable_op, flib_runtime)); EXPECT_FALSE(checker_->IsCompilableNode(*uncompilable_op, flib_runtime)); const auto uncompilable_nodes = checker_->FindUncompilableNodes(*uncompilable_op, flib_runtime); ASSERT_EQ(1, uncompilable_nodes.size()); auto node_info_it = uncompilable_nodes.find(NameAttrList().ShortDebugString()); ASSERT_NE(uncompilable_nodes.end(), node_info_it); const auto& uncompilable_nodes_inside_function = node_info_it->second.second; ASSERT_EQ(1, uncompilable_nodes_inside_function.size()); const auto& uncompilable_node_info = uncompilable_nodes_inside_function.at(0); EXPECT_TRUE(absl::StrContains(uncompilable_node_info.uncompilable_reason, "unsupported op")); ASSERT_EQ(1, uncompilable_node_info.stack_trace.size()); ASSERT_EQ("", uncompilable_node_info.stack_trace.at(0).function_name); } TEST_F(CompilabilityCheckUtilTest, CheckOutsideCompiledNode) { GraphDefBuilder builder(GraphDefBuilder::kFailImmediately); auto opts = builder.opts(); Node* const0 = ops::SourceOp("InputFloatOp", opts); Node* uncompilable_op = ops::UnaryOp("MissingKernel", const0, opts); uncompilable_op->AddAttr("_xla_outside_compilation", "0"); GraphDef graph_def; TF_EXPECT_OK(builder.ToGraphDef(&graph_def)); auto* flib_runtime = GetFunctionLibraryRuntime(); EXPECT_FALSE(checker_->IsCompilableNode(*uncompilable_op, flib_runtime)); const auto uncompilable_nodes = checker_->FindUncompilableNodes(*uncompilable_op, flib_runtime); ASSERT_EQ(1, uncompilable_nodes.size()); op_filter_.allow_outside_compiled = true; checker_ = CreateCompilabilityChecker(); EXPECT_TRUE(checker_->IsCompilableNode(*uncompilable_op, flib_runtime)); const auto uncompilable_nodes2 = checker_->FindUncompilableNodes(*uncompilable_op, flib_runtime); ASSERT_EQ(0, uncompilable_nodes2.size()); } TEST_F(CompilabilityCheckUtilTest, CheckSimpleFunctionNode) { FunctionDefLibrary flib; *flib.add_function() = FunctionDefHelper::Define( kUncompilableFunctionName, {"n_a:float"}, {"n_c_uncompilable:float"}, {}, {{{kUncompilableFunctionNodeName}, "MissingKernel", {"n_a"}}}); flib_def_.reset(new FunctionLibraryDefinition(OpRegistry::Global(), flib)); GraphDefBuilder builder(GraphDefBuilder::kFailImmediately, flib_def_.get()); std::unique_ptr<Graph> graph(new Graph(flib_def_.get())); Node* const0 = ops::SourceOp("InputFloatOp", builder.opts()); Node* functional_node = ops::UnaryOp(kUncompilableFunctionName, const0, builder.opts().WithName("D")); TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get())); auto* flib_runtime = GetFunctionLibraryRuntime(); EXPECT_FALSE(checker_->IsCompilableNode(*functional_node, flib_runtime)); const auto uncompilable_nodes = checker_->FindUncompilableNodes(*functional_node, flib_runtime); EXPECT_EQ(1, uncompilable_nodes.size()); NameAttrList function; function.set_name(kUncompilableFunctionName); const auto node_info_it = uncompilable_nodes.find(function.ShortDebugString()); ASSERT_NE(uncompilable_nodes.end(), node_info_it); const auto& uncompilable_node_list = node_info_it->second.second; ASSERT_EQ(1, uncompilable_node_list.size()); const auto& node_info = uncompilable_node_list.at(0); const auto& node_stack = node_info.stack_trace; ASSERT_EQ(2, node_stack.size()); EXPECT_EQ("D", node_stack.at(0).name); EXPECT_EQ(kUncompilableFunctionNodeName, node_stack.at(1).name); EXPECT_EQ(kUncompilableFunctionNodeName, node_info.name); EXPECT_TRUE( absl::StrContains(node_info.uncompilable_reason, "unsupported op")); } TEST_F(CompilabilityCheckUtilTest, CheckFunctionalWhileNode) { FunctionDefLibrary flib; *flib.add_function() = FunctionDefHelper::Define( kCompilableFunctionName, {"n_a:float", "n_b:float"}, {"n_c:float"}, {}, {{{kCompilableFunctionNodeName}, "Add", {"n_a", "n_b"}, {{"T", DT_FLOAT}}}}); *flib.add_function() = FunctionDefHelper::Define( kUncompilableFunctionName, {"n_a:float"}, {"n_c_uncompilable:float"}, {}, {{{kUncompilableFunctionNodeName}, "MissingKernel", {"n_a"}}}); flib_def_.reset(new FunctionLibraryDefinition(OpRegistry::Global(), flib)); GraphDefBuilder builder(GraphDefBuilder::kFailImmediately, flib_def_.get()); Node* const0 = ops::SourceOp("InputFloatOp", builder.opts()); Node* input_node = ops::UnaryOp("CompilableOp", const0, builder.opts()); NameAttrList compilable; compilable.set_name(kCompilableFunctionName); NameAttrList uncompilable; uncompilable.set_name(kUncompilableFunctionName); NodeBuilder while_builder(kFunctionalWhileNodeName, "While", builder.opts().op_registry()); while_builder.Input({input_node, input_node}) .Attr("cond", compilable) .Attr("body", uncompilable); builder.opts().FinalizeBuilder(&while_builder); GraphDef graph_def; TF_EXPECT_OK(builder.ToGraphDef(&graph_def)); std::unique_ptr<Graph> graph(new Graph(flib_def_.get())); TF_CHECK_OK(GraphDefBuilderToGraph(builder, graph.get())); auto while_node_it = std::find_if( graph->nodes().begin(), graph->nodes().end(), [&](const Node* n) { return n->name() == kFunctionalWhileNodeName; }); EXPECT_NE(while_node_it, graph->nodes().end()); auto* flib_runtime = GetFunctionLibraryRuntime(); EXPECT_FALSE(checker_->IsCompilableNode(**while_node_it, flib_runtime)); const auto uncompilable_nodes = checker_->FindUncompilableNodes(**while_node_it, flib_runtime); ASSERT_EQ(1, uncompilable_nodes.size()); NameAttrList function; function.set_name(kUncompilableFunctionName); const auto node_info_it = uncompilable_nodes.find(function.ShortDebugString()); ASSERT_NE(uncompilable_nodes.end(), node_info_it); const auto& uncompilable_node_list = node_info_it->second.second; ASSERT_EQ(1, uncompilable_node_list.size()); const auto& node_info = uncompilable_node_list.at(0); const auto& node_stack = node_info.stack_trace; ASSERT_EQ(2, node_stack.size()); const auto& stacktrace_first_node_info = node_stack.at(0); EXPECT_EQ(kFunctionalWhileNodeName, stacktrace_first_node_info.name); EXPECT_EQ("", stacktrace_first_node_info.function_name); const auto& stacktrace_second_node_info = node_stack.at(1); EXPECT_EQ(kUncompilableFunctionNodeName, stacktrace_second_node_info.name); EXPECT_EQ(kUncompilableFunctionName, stacktrace_second_node_info.function_name); EXPECT_EQ(kUncompilableFunctionNodeName, node_info.name); EXPECT_TRUE( absl::StrContains(node_info.uncompilable_reason, "unsupported op")); } TEST_F(CompilabilityCheckUtilTest, CheckFunctionalIfNode) { FunctionDefLibrary flib; *flib.add_function() = FunctionDefHelper::Define( kUncompilableFunctionName, {"n_a:float"}, {"n_c_uncompilable:float"}, {}, {{{kUncompilableFunctionNodeName}, "MissingKernel", {"n_a"}}}); *flib.add_function() = FunctionDefHelper::Define( kUncompilableFunctionTwoName, {"n_a:float"}, {"n_d_uncompilable:float"}, {}, {{{kUncompilableFunctionNodeTwoName}, "MissingKernel", {"n_a"}}}); NameAttrList uncompilable_fn1_attr; uncompilable_fn1_attr.set_name(kUncompilableFunctionName); NameAttrList uncompilable_fn2_attr; uncompilable_fn2_attr.set_name(kUncompilableFunctionTwoName); Scope root = Scope::NewRootScope().ExitOnError(); TF_ASSERT_OK(root.graph()->AddFunctionLibrary(flib)); auto predicate = ops::Placeholder(root.WithOpName("pred"), DT_BOOL); auto placeholder = ops::Placeholder(root.WithOpName("A"), DT_INT32); std::vector<NodeBuilder::NodeOut> if_inputs( {NodeBuilder::NodeOut(placeholder.node())}); Node* if_node; TF_ASSERT_OK( NodeBuilder(kFunctionalIfNodeName, "If", &root.graph()->flib_def()) .Input(predicate.node()) .Input(if_inputs) .Attr("then_branch", uncompilable_fn1_attr) .Attr("else_branch", uncompilable_fn2_attr) .Attr("Tout", {DT_INT32}) .Finalize(root.graph(), &if_node)); std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); TF_ASSERT_OK(root.ToGraph(graph.get())); flib_def_.reset(new FunctionLibraryDefinition(OpRegistry::Global(), flib)); auto if_node_it = std::find_if( graph->nodes().begin(), graph->nodes().end(), [&](const Node* n) { return n->name() == kFunctionalIfNodeName; }); EXPECT_NE(if_node_it, graph->nodes().end()); auto* flib_runtime = GetFunctionLibraryRuntime(); EXPECT_FALSE(checker_->IsCompilableNode(**if_node_it, flib_runtime)); const auto uncompilable_nodes = checker_->FindUncompilableNodes(**if_node_it, flib_runtime); ASSERT_EQ(2, uncompilable_nodes.size()); NameAttrList function_one; function_one.set_name(kUncompilableFunctionName); auto it = uncompilable_nodes.find(function_one.ShortDebugString()); ASSERT_NE(uncompilable_nodes.end(), it); const auto& uncompilable_node_list = it->second.second; ASSERT_EQ(1, uncompilable_node_list.size()); const auto& uncompilable_node_one = uncompilable_node_list.at(0); const auto& node_one_stack = uncompilable_node_one.stack_trace; ASSERT_EQ(2, node_one_stack.size()); const auto& node_one_stacktrace_first_node = node_one_stack.at(0); EXPECT_EQ(kFunctionalIfNodeName, node_one_stacktrace_first_node.name); EXPECT_EQ("", node_one_stacktrace_first_node.function_name); const auto& stacktrace_second_node_info = node_one_stack.at(1); EXPECT_EQ(kUncompilableFunctionNodeName, stacktrace_second_node_info.name); EXPECT_EQ(kUncompilableFunctionName, stacktrace_second_node_info.function_name); EXPECT_EQ(kUncompilableFunctionNodeName, uncompilable_node_one.name); EXPECT_TRUE(absl::StrContains(uncompilable_node_one.uncompilable_reason, "unsupported op")); NameAttrList function_two; function_two.set_name(kUncompilableFunctionTwoName); it = uncompilable_nodes.find(function_two.ShortDebugString()); ASSERT_NE(uncompilable_nodes.end(), it); const auto& uncompilable_node_two_list = it->second.second; ASSERT_EQ(1, uncompilable_node_two_list.size()); const auto& uncompilable_node_two = uncompilable_node_two_list.at(0); const auto& node_two_stack = uncompilable_node_two.stack_trace; ASSERT_EQ(2, node_two_stack.size()); const auto& node_two_stacktrace_first_node = node_two_stack.at(0); EXPECT_EQ(kFunctionalIfNodeName, node_two_stacktrace_first_node.name); EXPECT_EQ("", node_two_stacktrace_first_node.function_name); const auto& node_two_stacktrace_second_node = node_two_stack.at(1); EXPECT_EQ(kUncompilableFunctionNodeTwoName, node_two_stacktrace_second_node.name); EXPECT_EQ(kUncompilableFunctionTwoName, node_two_stacktrace_second_node.function_name); EXPECT_EQ(kUncompilableFunctionNodeTwoName, uncompilable_node_two.name); EXPECT_TRUE(absl::StrContains(uncompilable_node_one.uncompilable_reason, "unsupported op")); } TEST_F(CompilabilityCheckUtilTest, CheckFunctionalCaseNode) { FunctionDefLibrary flib; *flib.add_function() = FunctionDefHelper::Define( kUncompilableFunctionName, {"n_a:float"}, {"n_c_uncompilable:float"}, {}, {{{kUncompilableFunctionNodeName}, "MissingKernel", {"n_a"}}}); *flib.add_function() = FunctionDefHelper::Define( kUncompilableFunctionTwoName, {"n_a:float"}, {"n_d_uncompilable:float"}, {}, {{{kUncompilableFunctionNodeTwoName}, "MissingKernel", {"n_a"}}}); Scope root = Scope::NewRootScope().ExitOnError(); TF_ASSERT_OK(root.graph()->AddFunctionLibrary(flib)); auto branch_index = ops::Placeholder(root.WithOpName("pred"), DT_INT32); auto placeholder = ops::Placeholder(root.WithOpName("A"), DT_INT32); std::vector<NodeBuilder::NodeOut> inputes( {NodeBuilder::NodeOut(placeholder.node())}); Node* case_node; TF_ASSERT_OK( NodeBuilder(kFunctionalCaseNodeName, "Case", &root.graph()->flib_def()) .Input(branch_index.node()) .Input(inputes) .Attr("branches", FuncListAttr({kUncompilableFunctionName, kUncompilableFunctionTwoName})) .Attr("Tout", {DT_INT32}) .Finalize(root.graph(), &case_node)); std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); TF_ASSERT_OK(root.ToGraph(graph.get())); flib_def_.reset(new FunctionLibraryDefinition(OpRegistry::Global(), flib)); auto case_node_it = std::find_if( graph->nodes().begin(), graph->nodes().end(), [&](const Node* n) { return n->name() == kFunctionalCaseNodeName; }); EXPECT_NE(case_node_it, graph->nodes().end()); auto* flib_runtime = GetFunctionLibraryRuntime(); op_filter_.require_always_compilable = false; checker_ = CreateCompilabilityChecker(); EXPECT_TRUE(checker_->IsCompilableNode(**case_node_it, flib_runtime)); op_filter_.require_always_compilable = true; checker_ = CreateCompilabilityChecker(); EXPECT_FALSE(checker_->IsCompilableNode(**case_node_it, flib_runtime)); } TEST_F(CompilabilityCheckUtilTest, TestCanNotTriggerXlaCompilation) { GraphDefBuilder b(GraphDefBuilder::kFailImmediately); Scope root = Scope::NewRootScope().ExitOnError(); FunctionDefLibrary library; FunctionDef identity_func = FunctionDefHelper::Create( "IdentityFunc", {"x:float"}, {"res:float"}, {}, {{{"t0"}, "Identity", {"x"}, {{"T", DT_FLOAT}}}}, {{"res", "t0:output"}}); *library.add_function() = identity_func; Output in = ops::Placeholder(root, DT_FLOAT); NameAttrList b_name_attr; b_name_attr.set_name("IdentityFunc"); ops::PartitionedCall call(root.WithOpName("call"), {in}, {DT_FLOAT}, b_name_attr); GraphDef graph_def; TF_ASSERT_OK(root.graph()->AddFunctionLibrary(library)); TF_ASSERT_OK(root.ToGraphDef(&graph_def)); EXPECT_FALSE(CanTriggerXlaCompilation(graph_def)); } TEST_F(CompilabilityCheckUtilTest, TestXlaOpsCanTriggerXlaCompilation) { GraphDefBuilder b(GraphDefBuilder::kFailImmediately); Scope root = Scope::NewRootScope().ExitOnError(); FunctionDefLibrary library; FunctionDef sort_func = FunctionDefHelper::Create( "SortFunc", {"x:float"}, {"res:float"}, {}, {{{"t0"}, "XlaSort", {"x"}, {{"T", DT_FLOAT}}}}, {{"res", "t0:output"}}); *library.add_function() = sort_func; Output in = ops::Placeholder(root, DT_FLOAT); NameAttrList b_name_attr; b_name_attr.set_name("SortFunc"); ops::PartitionedCall call(root.WithOpName("call"), {in}, {DT_FLOAT}, b_name_attr); GraphDef graph_def; TF_ASSERT_OK(root.graph()->AddFunctionLibrary(library)); TF_ASSERT_OK(root.ToGraphDef(&graph_def)); EXPECT_TRUE(CanTriggerXlaCompilation(graph_def)); } TEST_F(CompilabilityCheckUtilTest, TestCanTriggerXlaCompilation) { GraphDefBuilder b(GraphDefBuilder::kFailImmediately); Scope root = Scope::NewRootScope().ExitOnError(); FunctionDefLibrary library; AttrValue true_attribute; true_attribute.set_b(true); FunctionDef identity_func = FunctionDefHelper::Create( "IdentityFunc", {"x:float"}, {"res:float"}, {}, {{{"t0"}, "Identity", {"x"}, {{"T", DT_FLOAT}}}}, {{"res", "t0:output"}}); (*identity_func.mutable_attr())[kXlaMustCompileAttr] = true_attribute; FunctionDef call_identity = FunctionDefHelper::Create( "CallIdentity", {"x:float"}, {"z:float"}, {}, {{{"func_call"}, "PartitionedCall", {"x"}, {{"Tin", DataTypeSlice({DT_FLOAT})}, {"Tout", DataTypeSlice({DT_FLOAT})}, {"f", FunctionDefHelper::FunctionRef("IdentityRef", {{"T", DT_FLOAT}})}, {kXlaMustCompileAttr, true}}}}, {{"z", "func_call:output:0"}}); *library.add_function() = identity_func; *library.add_function() = call_identity; Output in = ops::Placeholder(root, DT_FLOAT); NameAttrList b_name_attr; b_name_attr.set_name("CallIdentity"); ops::PartitionedCall call(root.WithOpName("call"), {in}, {DT_FLOAT}, b_name_attr); GraphDef graph_def; TF_ASSERT_OK(root.graph()->AddFunctionLibrary(library)); TF_ASSERT_OK(root.ToGraphDef(&graph_def)); EXPECT_TRUE(CanTriggerXlaCompilation(graph_def)); } } }
1,089
cpp
tensorflow/tensorflow
encapsulate_util
tensorflow/compiler/jit/encapsulate_util.cc
tensorflow/compiler/jit/encapsulate_util_test.cc
#ifndef TENSORFLOW_COMPILER_JIT_ENCAPSULATE_UTIL_H_ #define TENSORFLOW_COMPILER_JIT_ENCAPSULATE_UTIL_H_ #include "absl/container/flat_hash_map.h" #include "tensorflow/core/graph/graph.h" namespace tensorflow { extern const char kXlaInferredShapesAttrName[]; Status PerformStaticShapeInferenceBeforeEncapsulation(Graph* g); extern const char kXlaConnectedToXlaComputationAttrName[]; extern const char kXlaConnectedFromXlaComputationAttrName[]; extern const char kOutsideCompilationOriginalNodeAttrName[]; extern const char kOutsideCompilationSrcOutputAttrName[]; extern const char kXlaControlDependenciesWithinXlaClusterAttrName[]; extern const char kXlaIsLiftedArgAttrName[]; extern const char kXlaLiftedArgOutsideCompilationAttrName[]; extern const char kXlaOutsideCompilationInputsAttrName[]; extern const char kXlaIsPlaceholderForArg[]; struct XlaClusterInfo { XlaClusterInfo() {} XlaClusterInfo(const string& cluster_name, const NameAttrList& func_name_attrs, Node* node, const std::map<string, int>& host_compute_core) : cluster_name(cluster_name), func_name_attrs(func_name_attrs), node(node), host_compute_core(host_compute_core) {} const string cluster_name; const NameAttrList func_name_attrs; Node* node; const std::map<string, int> host_compute_core; }; absl::StatusOr< std::unique_ptr<absl::flat_hash_map<string, std::vector<string>>>> OutsideCompilationClusterDependencies( const Graph* g, const string& outside_compilation_attr_name); Status PreprocessEdgesBetweenOutsideCompilations( Graph* g, const string& outside_compilation_attr_name); Status PostprocessEdgesBetweenOutsideCompilations( Graph* g, const string& outside_compilation_attr_name); } #endif #include "tensorflow/compiler/jit/encapsulate_util.h" #include <algorithm> #include <iterator> #include "absl/container/flat_hash_map.h" #include "absl/container/flat_hash_set.h" #include "absl/strings/str_cat.h" #include "absl/types/optional.h" #include "tensorflow/compiler/jit/shape_inference.h" #include "tensorflow/compiler/tf2xla/tf2xla_util.h" #include "tensorflow/core/framework/node_def_util.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/protobuf/error_codes.pb.h" using tsl::StatusOr; namespace tensorflow { namespace { std::optional<string> GetStringAttr(const Node& n, const string& attr_name) { auto attr = n.attrs().Find(attr_name); if (!attr) { return std::nullopt; } else { return attr->s(); } } template <typename T> Status AppendToListAttr(Node* n, const string& attr_name, const string& value) { std::vector<T> attr_value; Status s = GetNodeAttr(n->attrs(), attr_name, &attr_value); if (!s.ok() && s.code() != error::NOT_FOUND) { return s; } n->ClearAttr(attr_name); attr_value.push_back(value); n->AddAttr(attr_name, attr_value); return absl::OkStatus(); } template <typename T> void ReplaceAttr(Node* n, const string& attr_name, const T& value) { n->ClearAttr(attr_name); n->AddAttr(attr_name, value); } Status PreprocessControlEdgesBetweenOutsideCompilations( Graph* g, const string& outside_compilation_attr_name) { std::vector<const Edge*> edges_to_remove; for (const Edge* e : g->edges()) { if (!e->IsControlEdge()) { continue; } auto src_outside_compilation = GetStringAttr(*e->src(), outside_compilation_attr_name); auto dst_outside_compilation = GetStringAttr(*e->dst(), outside_compilation_attr_name); if (src_outside_compilation && dst_outside_compilation) { if (*src_outside_compilation != *dst_outside_compilation) { edges_to_remove.push_back(e); TF_RETURN_IF_ERROR(AppendToListAttr<string>( e->dst(), kXlaControlDependenciesWithinXlaClusterAttrName, e->src()->name())); } } else if (src_outside_compilation && !dst_outside_compilation) { ReplaceAttr(e->src(), kXlaConnectedToXlaComputationAttrName, true); } else if (!src_outside_compilation && dst_outside_compilation) { ReplaceAttr(e->dst(), kXlaConnectedFromXlaComputationAttrName, true); } } for (auto e : edges_to_remove) { g->RemoveEdge(e); } return absl::OkStatus(); } Status PreprocessDataEdgesBetweenOutsideCompilations( Graph* g, const string& outside_compilation_attr_name) { struct EdgeInfo { int dst_input, dst_node_id; }; std::vector<EdgeInfo> edges; for (const Edge* e : g->edges()) { if (e->IsControlEdge()) { continue; } auto src_outside_compilation = GetStringAttr(*e->src(), outside_compilation_attr_name); auto dst_outside_compilation = GetStringAttr(*e->dst(), outside_compilation_attr_name); if (src_outside_compilation && dst_outside_compilation && *src_outside_compilation != *dst_outside_compilation) { edges.push_back(EdgeInfo{e->dst_input(), e->dst()->id()}); VLOG(4) << "Oc -> oc edge: " << e->DebugString(); } } std::map<std::pair<string, int>, Node*> placeholders; for (int i = 0, end = edges.size(); i < end; i++) { Node* dst = g->FindNodeId(edges[i].dst_node_id); const Edge* e; TF_RETURN_IF_ERROR(dst->input_edge(edges[i].dst_input, &e)); Node* src = e->src(); int src_output = e->src_output(), dst_input = e->dst_input(); g->RemoveEdge(e); string new_name = absl::StrCat(src->name(), "_oc_to_oc_placeholder_", src_output); auto placeholder_index = std::make_pair(src->name(), src_output); auto iter = placeholders.find(placeholder_index); Node* placeholder_node; if (iter == placeholders.end()) { NodeDefBuilder placeholder_builder(new_name, "Placeholder"); placeholder_builder.Attr("dtype", src->output_type(src_output)); string outside_compilation_attr; TF_RETURN_IF_ERROR(GetNodeAttr(dst->attrs(), outside_compilation_attr_name, &outside_compilation_attr)); placeholder_builder.Attr(outside_compilation_attr_name, outside_compilation_attr); placeholder_builder.Attr(kOutsideCompilationOriginalNodeAttrName, src->name()); placeholder_builder.Attr(kOutsideCompilationSrcOutputAttrName, src_output); NodeDef placeholder_def; TF_RETURN_IF_ERROR(placeholder_builder.Finalize(&placeholder_def)); TF_ASSIGN_OR_RETURN(placeholder_node, g->AddNode(placeholder_def)); placeholders[placeholder_index] = placeholder_node; } else { placeholder_node = iter->second; } g->AddEdge(placeholder_node, 0, dst, dst_input); NodeDef new_def = dst->def(); *new_def.mutable_input(dst_input) = placeholder_node->name(); TF_ASSIGN_OR_RETURN(Node * dst_replace_node, ReplaceNode(g, dst, new_def)); for (int j = i + 1, end = edges.size(); j < end; j++) { if (edges[j].dst_node_id == edges[i].dst_node_id) { edges[j].dst_node_id = dst_replace_node->id(); } } } return absl::OkStatus(); } Status PostprocessDataEdgesBetweenOutsideCompilations( Graph* g, const string& outside_compilation_attr_name) { std::vector<Node*> placeholder_nodes; for (Node* n : g->nodes()) { if (n->type_string() == "Placeholder" && HasNodeAttr(n->def(), kOutsideCompilationOriginalNodeAttrName)) { placeholder_nodes.push_back(n); } } auto node_name_index = g->BuildNodeNameIndex(); for (auto n : placeholder_nodes) { string node_name; int node_src_output; TF_RETURN_IF_ERROR(GetNodeAttr( n->attrs(), kOutsideCompilationOriginalNodeAttrName, &node_name)); TF_RETURN_IF_ERROR(GetNodeAttr( n->attrs(), kOutsideCompilationSrcOutputAttrName, &node_src_output)); auto iter = node_name_index.find(node_name); if (iter == node_name_index.end()) { return errors::Internal( "Cannot find original node for oc -> host placeholder node ", node_name); } Node* original_node = iter->second; std::vector<const Edge*> control_edges; std::vector<OutEdgeInfo> data_edges; for (auto e : n->out_edges()) { if (e->IsControlEdge()) { control_edges.push_back(e); } else { data_edges.push_back({e->dst(), e->src_output(), e->dst_input()}); } } for (const Edge* e : control_edges) { g->AddControlEdge(original_node, e->dst()); g->RemoveEdge(e); } for (int i = 0, end = data_edges.size(); i < end; i++) { Node* dst = data_edges[i].dst; NodeDef new_def = dst->def(); int dst_input = data_edges[i].dst_input; *new_def.mutable_input(dst_input) = absl::StrCat(original_node->name(), ":", node_src_output); TF_ASSIGN_OR_RETURN(Node * replace_node, ReplaceNode(g, dst, new_def)); const Edge* edge_to_replace = nullptr; TF_RETURN_IF_ERROR(replace_node->input_edge(dst_input, &edge_to_replace)); g->RemoveEdge(edge_to_replace); g->AddEdge(original_node, node_src_output, replace_node, dst_input); for (int j = i + 1, end = data_edges.size(); j < end; j++) { if (data_edges[j].dst == dst) { data_edges[j].dst = replace_node; } } node_name_index[replace_node->name()] = replace_node; } g->RemoveNode(n); } return absl::OkStatus(); } Status PostprocessControlEdgesBetweenOutsideCompilations( Graph* g, const string& outside_compilation_attr_name) { auto node_name_index = g->BuildNodeNameIndex(); for (Node* n : g->nodes()) { std::vector<string> control_deps; Status s = GetNodeAttr(n->attrs(), kXlaControlDependenciesWithinXlaClusterAttrName, &control_deps); if (!s.ok()) { if (s.code() != error::NOT_FOUND) { return s; } else { continue; } } else { n->ClearAttr(kXlaControlDependenciesWithinXlaClusterAttrName); for (const string& control_input : control_deps) { auto iter = node_name_index.find(control_input); if (iter == node_name_index.end()) { return errors::Internal("Cannot find original node for ", control_input); } g->AddControlEdge(iter->second, n); } } } return absl::OkStatus(); } } const char kXlaInferredShapesAttrName[] = "_xla_inferred_shapes"; const char kXlaConnectedToXlaComputationAttrName[] = "_xla_connected_to_xla_computation"; const char kXlaConnectedFromXlaComputationAttrName[] = "_xla_connected_from_xla_computation"; const char kOutsideCompilationOriginalNodeAttrName[] = "_xla_oc_to_oc_node_name"; const char kOutsideCompilationSrcOutputAttrName[] = "_xla_oc_to_oc_src_output"; const char kXlaControlDependenciesWithinXlaClusterAttrName[] = "_xla_control_dependencies_within_xla_cluster"; const char kXlaIsLiftedArgAttrName[] = "_xla_is_lifted_arg"; const char kXlaLiftedArgOutsideCompilationAttrName[] = "_xla_lifted_arg_oc"; const char kXlaOutsideCompilationInputsAttrName[] = "_xla_oc_inputs"; const char kXlaIsPlaceholderForArg[] = "_xla_is_placeholder_for_arg"; Status PerformStaticShapeInferenceBeforeEncapsulation(Graph* g) { std::map<int, InferredShape> arg_shapes; GraphShapeInfo shape_info; TF_RETURN_IF_ERROR( InferShapes(g, arg_shapes, nullptr, &shape_info)); auto node_name_index = g->BuildNodeNameIndex(); for (auto iter : shape_info) { std::vector<PartialTensorShape> output_shapes; std::transform(iter.second.begin(), iter.second.end(), std::back_inserter(output_shapes), [](const InferredShape& inferred_shape) { return inferred_shape.shape; }); Node* n = node_name_index[iter.first]; n->AddAttr(kXlaInferredShapesAttrName, output_shapes); } return absl::OkStatus(); } absl::StatusOr< std::unique_ptr<absl::flat_hash_map<string, std::vector<string>>>> OutsideCompilationClusterDependencies( const Graph* g, const string& outside_compilation_attr_name) { auto cluster_deps = std::make_unique< absl::flat_hash_map<string, absl::flat_hash_set<string>>>(); for (const Edge* e : g->edges()) { auto src_outside_compilation = GetStringAttr(*e->src(), outside_compilation_attr_name); auto dst_outside_compilation = GetStringAttr(*e->dst(), outside_compilation_attr_name); if (src_outside_compilation && dst_outside_compilation && *src_outside_compilation != *dst_outside_compilation) { auto dst_deps_it = cluster_deps->find(*dst_outside_compilation); if (dst_deps_it == cluster_deps->end()) { cluster_deps->insert(std::make_pair( *dst_outside_compilation, absl::flat_hash_set<string>({*src_outside_compilation}))); } else { dst_deps_it->second.insert(*src_outside_compilation); } } } auto cluster_deps_ordered = std::make_unique<absl::flat_hash_map<string, std::vector<string>>>(); for (auto it = cluster_deps->begin(); it != cluster_deps->end(); it++) { std::vector<string> ordered_deps(it->second.begin(), it->second.end()); std::sort(ordered_deps.begin(), ordered_deps.end()); cluster_deps_ordered->insert(std::make_pair(it->first, ordered_deps)); } return std::move(cluster_deps_ordered); } Status PreprocessEdgesBetweenOutsideCompilations( Graph* g, const string& outside_compilation_attr_name) { std::vector<const Edge*> edges_to_remove; for (const Edge* e : g->source_node()->out_edges()) { if (HasNodeAttr(e->dst()->def(), outside_compilation_attr_name)) { edges_to_remove.push_back(e); } } for (const Edge* e : g->sink_node()->in_edges()) { if (HasNodeAttr(e->src()->def(), outside_compilation_attr_name)) { edges_to_remove.push_back(e); } } for (auto e : edges_to_remove) { g->RemoveEdge(e); } TF_RETURN_IF_ERROR(PreprocessControlEdgesBetweenOutsideCompilations( g, outside_compilation_attr_name)); TF_RETURN_IF_ERROR(PreprocessDataEdgesBetweenOutsideCompilations( g, outside_compilation_attr_name)); return absl::OkStatus(); } Status PostprocessEdgesBetweenOutsideCompilations( Graph* g, const string& outside_compilation_attr_name) { TF_RETURN_IF_ERROR(PostprocessDataEdgesBetweenOutsideCompilations( g, outside_compilation_attr_name)); TF_RETURN_IF_ERROR(PostprocessControlEdgesBetweenOutsideCompilations( g, outside_compilation_attr_name)); return absl::OkStatus(); } }
#include "tensorflow/compiler/jit/encapsulate_util.h" #include "tensorflow/cc/framework/scope.h" #include "tensorflow/cc/ops/array_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/core/framework/node_def_util.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_shape.pb.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { TEST(PerformStaticShapeInferenceBeforeEncapsulationTest, Basic) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output const_0 = ops::Const(s.WithOpName("const_0"), 1, {2}); Output const_1 = ops::Const(s.WithOpName("const_1"), 2, {2}); Output add = ops::Add(s.WithOpName("add"), const_0, const_1); Output identity = ops::Identity(s.WithOpName("identity"), add); Graph g(OpRegistry::Global()); TF_CHECK_OK(s.ToGraph(&g)); TF_CHECK_OK(PerformStaticShapeInferenceBeforeEncapsulation(&g)); auto node_index = g.BuildNodeNameIndex(); Node *add_node = node_index["add"]; std::vector<PartialTensorShape> output_shapes; TF_CHECK_OK(GetNodeAttr(add_node->attrs(), kXlaInferredShapesAttrName, &output_shapes)); EXPECT_EQ(output_shapes.size(), 1); TensorShapeProto shape_proto; output_shapes[0].AsProto(&shape_proto); EXPECT_EQ(shape_proto.dim_size(), 1); EXPECT_EQ(shape_proto.dim(0).size(), 2); } }
1,090
cpp
tensorflow/tensorflow
build_xla_ops_pass
tensorflow/compiler/jit/build_xla_ops_pass.cc
tensorflow/compiler/jit/build_xla_ops_pass_test.cc
#ifndef TENSORFLOW_COMPILER_JIT_BUILD_XLA_OPS_PASS_H_ #define TENSORFLOW_COMPILER_JIT_BUILD_XLA_OPS_PASS_H_ #include "absl/types/optional.h" #include "tensorflow/core/common_runtime/optimization_registry.h" #include "tensorflow/core/lib/core/status.h" namespace tensorflow { class BuildXlaOpsPass : public GraphOptimizationPass { public: explicit BuildXlaOpsPass( std::optional<bool> enable_lazy_compilation = std::nullopt) : enable_lazy_compilation_(enable_lazy_compilation) {} Status Run(const GraphOptimizationPassOptions& options) override; private: std::optional<bool> enable_lazy_compilation_; }; } #endif #include "tensorflow/compiler/jit/build_xla_ops_pass.h" #include "absl/algorithm/container.h" #include "absl/strings/str_cat.h" #include "absl/strings/str_join.h" #include "tensorflow/cc/framework/ops.h" #include "tensorflow/cc/framework/scope_internal.h" #include "tensorflow/cc/ops/array_ops.h" #include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/control_flow_ops.h" #include "tensorflow/cc/ops/functional_ops.h" #include "tensorflow/cc/ops/logging_ops.h" #include "tensorflow/compiler/jit/defs.h" #include "tensorflow/compiler/jit/device_util.h" #include "tensorflow/compiler/jit/encapsulate_subgraphs_pass.h" #include "tensorflow/compiler/jit/flags.h" #include "tensorflow/compiler/jit/xla_cluster_util.h" #include "tensorflow/compiler/tf2xla/cc/ops/xla_jit_ops.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/status_macros.h" #include "tensorflow/core/common_runtime/function.h" #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/common_runtime/optimization_registry.h" #include "tensorflow/core/framework/graph_def_util.h" #include "tensorflow/core/framework/memory_types.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/node_def_util.h" #include "tensorflow/core/graph/algorithm.h" #include "tensorflow/core/graph/graph.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/hash/hash.h" #include "tensorflow/core/public/version.h" #include "tensorflow/core/util/dump_graph.h" namespace tensorflow { namespace { struct DebuggingOpts { bool print_outputs; bool check_input_numerics; bool check_output_numerics; }; void MoveOutgoingEdges(Graph* g, Node* old_node, Node* new_node) { std::vector<const Edge*> out_edges(old_node->out_edges().begin(), old_node->out_edges().end()); for (const Edge* edge : out_edges) { g->AddEdge(new_node, edge->src_output(), edge->dst(), edge->dst_input()); g->RemoveEdge(edge); } } Output ControlToData(const Scope& scope, Node* control) { Output data = ops::Const(scope.WithOpName("ctrl_as_data"), Tensor(DT_INT32, TensorShape({0}))); scope.graph()->AddControlEdge(control, data.node()); return Output(data.node()); } Operation DataToControl(const Scope& scope, Output data) { return Operation( ops::Identity(scope.WithOpName("data_as_ctrl"), data).node()); } void MergeOutgoingDataEdges(const Scope& s, Node* old_node, Node* new_node, absl::string_view cluster_name, const DebuggingOpts& debugging_opts) { if (!s.status().ok()) { return; } std::vector<Output> merged_outputs(old_node->num_outputs(), Output(nullptr)); std::vector<const Edge*> data_edges; absl::c_copy_if(old_node->out_edges(), std::back_inserter(data_edges), [](const Edge* e) { return !e->IsControlEdge(); }); for (const Edge* e : data_edges) { int oidx = e->src_output(); Output merged_output = merged_outputs[oidx]; if (merged_output.node() == nullptr) { Output new_output(new_node, oidx); if (debugging_opts.print_outputs) { string cpu_device = "/job:localhost/replica:0/task:0/device:CPU:0"; ops::Print print_op(s.WithOpName("print_", oidx) .WithDevice(cpu_device) .WithAssignedDevice(cpu_device), new_output, {new_output}, ops::Print::Attrs{} .Message(absl::StrCat("output ", oidx, " from ", old_node->name(), " is ")) .FirstN(1000) .Summarize(-1)); new_output = print_op; } if (debugging_opts.check_output_numerics && DataTypeIsFloating(new_output.type())) { ops::CheckNumerics check_numerics_op( s.WithOpName("check_output_", oidx) .WithDevice(new_node->requested_device()) .WithAssignedDevice(new_node->assigned_device_name()), new_output, absl::StrCat("CheckNumerics failed for output ", oidx, "(", new_output.name(), ") from cluster ", cluster_name)); new_output = check_numerics_op; } ops::_XlaMerge xla_merge_op(s.WithOpName("merge_oidx_", oidx), Output(old_node, oidx), new_output); merged_output = merged_outputs[oidx] = xla_merge_op.output; } Node* dst = e->dst(); int dst_idx = e->dst_input(); s.graph()->RemoveEdge(e); s.graph()->AddEdge(merged_output.node(), merged_output.index(), dst, dst_idx); } } void MergeOutgoingControlEdges(const Scope& s, Node* old_node, Node* new_node) { if (!s.status().ok()) { return; } std::vector<const Edge*> ctrl_edges; absl::c_copy_if(old_node->out_edges(), std::back_inserter(ctrl_edges), [](const Edge* e) { return e->IsControlEdge(); }); if (ctrl_edges.empty()) { return; } if (ctrl_edges.size() == 1 && ctrl_edges.front()->dst()->IsSink()) { s.graph()->AddControlEdge(new_node, s.graph()->sink_node()); return; } Output old_ctrl_as_data = ControlToData(s, old_node); Output new_ctrl_as_data = ControlToData(s, new_node); ops::Merge ctrl_merge_as_data(s.WithOpName("ctrl_merge"), {old_ctrl_as_data, new_ctrl_as_data}); Operation ctrl_merge = DataToControl(s, ctrl_merge_as_data.output); for (const Edge* e : ctrl_edges) { s.graph()->AddControlEdge(ctrl_merge.node(), e->dst()); s.graph()->RemoveControlEdge(e); } } struct XlaClusterInfo { std::vector<Output> constant_inputs; std::vector<Output> non_constant_inputs; std::vector<Output> resource_inputs; NameAttrList function; }; Output IncomingEdgeAsOutput(const Edge* e) { return Output(e->src(), e->src_output()); } Status GetXlaClusterInfo(Node* n, XlaClusterInfo* result) { int num_constant_inputs, num_resource_inputs; TF_RETURN_IF_ERROR( GetNodeAttr(n->attrs(), kXlaNumConstantArgsAttr, &num_constant_inputs)); TF_RETURN_IF_ERROR( GetNodeAttr(n->attrs(), kXlaNumResourceArgsAttr, &num_resource_inputs)); if (num_constant_inputs < 0 || num_resource_inputs < 0 || num_constant_inputs + num_resource_inputs > n->num_inputs()) { return errors::InvalidArgument( "Invalid number of constant/resource arguments to XLA kernel."); } int num_non_constant_inputs = n->num_inputs() - num_constant_inputs - num_resource_inputs; std::vector<const Edge*> input_edges_vector; TF_RETURN_IF_ERROR(n->input_edges(&input_edges_vector)); absl::Span<const Edge*> input_edges(input_edges_vector); absl::c_transform(input_edges.subspan(0, num_constant_inputs), std::back_inserter(result->constant_inputs), IncomingEdgeAsOutput); absl::c_transform( input_edges.subspan(num_constant_inputs, num_non_constant_inputs), std::back_inserter(result->non_constant_inputs), IncomingEdgeAsOutput); absl::c_transform( input_edges.subspan(num_constant_inputs + num_non_constant_inputs, num_resource_inputs), std::back_inserter(result->resource_inputs), IncomingEdgeAsOutput); result->function.set_name(n->type_string()); *result->function.mutable_attr() = n->def().attr(); return absl::OkStatus(); } Status CopyIncomingControlEdges(Graph* g, Node* from, Node* to) { for (const Edge* e : from->in_edges()) { if (e->IsControlEdge()) { g->AddControlEdge(e->src(), to); } } return absl::OkStatus(); } void RemoveAllIncomingControlEdges(Graph* g, Node* n) { std::vector<const Edge*> incoming_ctrl_edges; absl::c_copy_if(n->in_edges(), std::back_inserter(incoming_ctrl_edges), [](const Edge* e) { return e->IsControlEdge(); }); for (const Edge* e : incoming_ctrl_edges) { g->RemoveControlEdge(e); } } Status DeviceRequiresCompilation(const jit::DeviceInfoCache& device_info_cache, jit::DeviceId device, bool* result) { const XlaOpRegistry::DeviceRegistration* registration = device_info_cache.GetCompilationDevice(device); *result = registration->autoclustering_policy == XlaOpRegistry::AutoclusteringPolicy::kAlways; return absl::OkStatus(); } absl::StatusOr<Node*> ReplaceFunctionCallWithPartitionedCall( const GraphOptimizationPassOptions& options, const FunctionLibraryDefinition& flib_def, Node* n, Graph* g, const NameAttrList& func, const Scope& root) { string config_string = options.session_options->config.SerializeAsString(); int input_count = absl::c_count_if( n->in_edges(), [](const Edge* e) { return !e->IsControlEdge(); }); std::vector<Output> args(input_count); for (const Edge* e : n->in_edges()) { if (!e->IsControlEdge()) { args[e->dst_input()] = Output(e->src(), e->src_output()); } } ops::StatefulPartitionedCall call( root.WithOpName("stateful_partitioned_call"), args, n->output_types(), func, ops::StatefulPartitionedCall::Attrs{}.ConfigProto(config_string)); for (const Edge* e : n->in_edges()) { if (e->IsControlEdge()) { g->AddControlEdge(e->src(), call.operation.node()); } } std::vector<const Edge*> edges_to_delete; for (const Edge* e : n->out_edges()) { edges_to_delete.push_back(e); if (e->IsControlEdge()) { g->AddControlEdge(call.operation.node(), e->dst()); } else { g->AddEdge(call.operation.node(), e->src_output(), e->dst(), e->dst_input()); } } for (const Edge* e : edges_to_delete) { g->RemoveEdge(e); } g->RemoveNode(n); return call.operation.node(); } absl::StatusOr<jit::DeviceId> InferDeviceForCluster( jit::DeviceInfoCache* device_info_cache, Node* n, const string& function_name, const FunctionLibraryDefinition& flib_def) { const FunctionDef* func_def = flib_def.Find(function_name); TF_RET_CHECK(func_def) << "Could not find " << function_name; jit::DeviceSet device_set; for (const NodeDef& ndef : func_def->node_def()) { VLOG(3) << ndef.DebugString(); if (!ndef.device().empty()) { TF_ASSIGN_OR_RETURN(jit::DeviceId device_id, device_info_cache->GetIdFor(ndef.device())); device_set.Insert(device_id); } } if (!n->assigned_device_name().empty()) { TF_ASSIGN_OR_RETURN(jit::DeviceId device_id, device_info_cache->GetIdFor(n->assigned_device_name())); device_set.Insert(device_id); } TF_ASSIGN_OR_RETURN(jit::DeviceId result, PickDeviceForXla(*device_info_cache, device_set, true)); VLOG(2) << "For " << function_name << " PickDeviceForXla(" << device_info_cache->DebugString(device_set) << ") -> " << device_info_cache->GetNameFor(result); return result; } std::vector<Output> GetXlaRunArgs(const Scope& s, const XlaClusterInfo& cluster_info, const DebuggingOpts& debugging_opts) { std::vector<Output> xla_run_args; xla_run_args.reserve(cluster_info.non_constant_inputs.size() + cluster_info.resource_inputs.size()); int input_idx = 0; for (const Output& o : cluster_info.non_constant_inputs) { if (debugging_opts.check_input_numerics && DataTypeIsFloating(o.type())) { ops::CheckNumerics check_numerics_op( s.WithOpName("check_input_", input_idx), o, absl::StrCat("CheckNumerics failed for input ", input_idx, "(", o.name(), ") into ", cluster_info.function.name())); xla_run_args.push_back(check_numerics_op); } else { xla_run_args.push_back(o); } input_idx++; } absl::c_copy(cluster_info.resource_inputs, std::back_inserter(xla_run_args)); return xla_run_args; } absl::StatusOr<MemoryTypeVector> GetOutputMemoryTypes(const Scope& root, Node* n) { MemoryTypeVector input_mtypes, output_mtypes; DeviceType device_type(""); TF_RETURN_IF_ERROR( DeviceNameToDeviceType(n->assigned_device_name(), &device_type)); TF_RETURN_IF_ERROR(MemoryTypesForNode(root.graph()->op_registry(), device_type, n->def(), &input_mtypes, &output_mtypes)); return output_mtypes; } Status PredicateInt32Inputs(const Scope& root, Node* n, Operation predicate_as_control) { std::vector<Output> int32_inputs; std::vector<int> int32_inputs_input_idxs; for (const Edge* e : n->in_edges()) { if (e->IsControlEdge()) { continue; } if (e->src()->output_type(e->src_output()) == DT_INT32) { TF_ASSIGN_OR_RETURN(MemoryTypeVector source_output_mem_types, GetOutputMemoryTypes(root, e->src())); if (source_output_mem_types[e->src_output()] == DEVICE_MEMORY) { int32_inputs.push_back(Output(e->src(), e->src_output())); int32_inputs_input_idxs.push_back(e->dst_input()); } } } if (int32_inputs.empty()) { return absl::OkStatus(); } ops::IdentityN identity_n(root.WithOpName("int32_id_n"), int32_inputs); root.graph()->AddControlEdge(predicate_as_control.node(), identity_n.operation.node()); for (int i = 0, end = int32_inputs.size(); i < end; i++) { TF_RETURN_IF_ERROR(root.graph()->UpdateEdge(identity_n[i].node(), i, n, int32_inputs_input_idxs[i])); } return absl::OkStatus(); } Status ReplaceNodeWithXlaCompileAndXlaRun( jit::DeviceInfoCache* device_info_cache, const GraphOptimizationPassOptions& options, const FunctionLibraryDefinition& flib_def, bool lazy_compilation_enabled, const DebuggingOpts& debugging_opts, Graph* g, Node* n) { XlaClusterInfo cluster_info; TF_RETURN_IF_ERROR(GetXlaClusterInfo(n, &cluster_info)); TF_ASSIGN_OR_RETURN( jit::DeviceId device, InferDeviceForCluster(device_info_cache, n, cluster_info.function.name(), flib_def)); bool requires_compilation; TF_RETURN_IF_ERROR(DeviceRequiresCompilation(*device_info_cache, device, &requires_compilation)); if (!lazy_compilation_enabled) { requires_compilation = true; } string device_name_str = string(device_info_cache->GetNameFor(device)); Status status; Scope root = NewInternalScope(g, &status, nullptr) .NewSubScope(n->name()) .WithDevice(n->requested_device()) .WithAssignedDevice(device_name_str); ops::_XlaCompile xla_compile(root.WithOpName("xla_compile"), cluster_info.constant_inputs, cluster_info.non_constant_inputs, cluster_info.resource_inputs, requires_compilation, cluster_info.function); bool has_ref_attr; TF_RETURN_IF_ERROR( GetNodeAttr(n->attrs(), kXlaHasReferenceVarsAttr, &has_ref_attr)); xla_compile.operation.node()->AddAttr(kXlaHasReferenceVarsAttr, has_ref_attr); TF_RETURN_IF_ERROR( CopyIncomingControlEdges(g, n, xla_compile.key.node())); std::vector<Output> xla_run_args = GetXlaRunArgs(root, cluster_info, debugging_opts); if (requires_compilation) { ops::_XlaRun xla_run(root.WithOpName("xla_run"), xla_run_args, xla_compile.key, n->output_types()); MoveOutgoingEdges(g, n, xla_run.operation.node()); g->RemoveNode(n); } else { ops::Switch s(root.WithOpName("predicated_compilation_key"), xla_compile.key, xla_compile.compilation_successful); Output predicated_compilation_key = s.output_true; Output inverse_predicated_compilation_key = s.output_false; ops::_XlaRun xla_run(root.WithOpName("xla_run"), xla_run_args, predicated_compilation_key, n->output_types()); MergeOutgoingControlEdges(root, n, xla_run.operation.node()); MergeOutgoingDataEdges(root, n, xla_run.operation.node(), cluster_info.function.name(), debugging_opts); TF_RETURN_IF_ERROR(root.status()); RemoveAllIncomingControlEdges(g, n); Operation inverse_predicate_as_control = DataToControl(root, inverse_predicated_compilation_key); g->AddControlEdge(inverse_predicate_as_control.node(), n); n->ClearAttr(kXlaCompiledKernelAttr); TF_ASSIGN_OR_RETURN(Node* const pco, ReplaceFunctionCallWithPartitionedCall( options, flib_def, n, g, cluster_info.function, root)); TF_RETURN_IF_ERROR( PredicateInt32Inputs(root, pco, inverse_predicate_as_control)); } return absl::OkStatus(); } } Status BuildXlaOpsPass::Run(const GraphOptimizationPassOptions& options) { Graph* graph = options.graph->get(); std::vector<Node*> xla_compiled_kernels; absl::c_copy_if(graph->op_nodes(), std::back_inserter(xla_compiled_kernels), [](const Node* n) { if (n->IsSend() || n->IsRecv() || n->IsControlFlow()) { return false; } return IsXlaCompiledKernel(*n); }); bool lazy_compilation_enabled = enable_lazy_compilation_ ? *enable_lazy_compilation_ : GetBuildXlaOpsPassFlags()->tf_xla_enable_lazy_compilation; jit::DeviceInfoCache device_info_cache; const BuildXlaOpsPassFlags& flags = *GetBuildXlaOpsPassFlags(); DebuggingOpts debugging_opts; debugging_opts.print_outputs = flags.tf_xla_print_cluster_outputs; debugging_opts.check_input_numerics = flags.tf_xla_check_cluster_input_numerics; debugging_opts.check_output_numerics = flags.tf_xla_check_cluster_output_numerics; VLOG(1) << "print_outputs = " << debugging_opts.print_outputs; VLOG(1) << "check_input_numerics = " << debugging_opts.check_input_numerics; VLOG(1) << "check_output_numerics = " << debugging_opts.check_output_numerics; for (Node* n : xla_compiled_kernels) { TF_RETURN_IF_ERROR(ReplaceNodeWithXlaCompileAndXlaRun( &device_info_cache, options, *options.flib_def, lazy_compilation_enabled, debugging_opts, graph, n)); } if (VLOG_IS_ON(1)) { DumpGraphToFile("build_xla_ops", *graph, options.flib_def); } return absl::OkStatus(); } }
#include "tensorflow/compiler/jit/build_xla_ops_pass.h" #include "tensorflow/cc/framework/ops.h" #include "tensorflow/cc/ops/array_ops.h" #include "tensorflow/cc/ops/resource_variable_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/compiler/jit/defs.h" #include "tensorflow/compiler/jit/encapsulate_subgraphs_pass.h" #include "tensorflow/compiler/jit/node_matchers.h" #include "tensorflow/compiler/jit/test_util.h" #include "tensorflow/core/common_runtime/device_factory.h" #include "tensorflow/core/graph/algorithm.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/public/session_options.h" namespace tensorflow { namespace { class BuildXlaOpsTest : public ::testing::Test { protected: void SetUp() override { CHECK(DeviceFactory::AddDevices( SessionOptions(), "/job:localhost/replica:0/task:0", &devices_) .ok()); } private: std::vector<std::unique_ptr<Device>> devices_; }; using ::tensorflow::testing::FindNodeByName; using ::tensorflow::testing::matchers::Attr; using ::tensorflow::testing::matchers::CtrlDeps; using ::tensorflow::testing::matchers::Inputs; using ::tensorflow::testing::matchers::NodeWith; using ::tensorflow::testing::matchers::Op; using ::tensorflow::testing::matchers::Out; using ::testing::_; Status BuildXlaOps(const Scope& s, const FunctionDefLibrary& fdef_lib, std::unique_ptr<Graph>* result) { auto graph = std::make_unique<Graph>(OpRegistry::Global()); TF_RETURN_IF_ERROR(s.ToGraph(graph.get())); FunctionLibraryDefinition flib_def(graph->op_registry(), fdef_lib); static const char* kCpuDevice = "/job:localhost/replica:0/task:0/cpu:0"; for (Node* n : graph->nodes()) { if (n->requested_device().empty()) { n->set_assigned_device_name(kCpuDevice); } else { n->set_assigned_device_name(n->requested_device()); } } FixupSourceAndSinkEdges(graph.get()); GraphOptimizationPassWrapper wrapper; GraphOptimizationPassOptions opt_options = wrapper.CreateGraphOptimizationPassOptions(&graph); opt_options.flib_def = &flib_def; BuildXlaOpsPass pass(true); TF_RETURN_IF_ERROR(pass.Run(opt_options)); VLOG(3) << graph->ToGraphDefDebug().DebugString(); *result = std::move(graph); return absl::OkStatus(); } Status MakeXlaCompiledKernel(Graph* graph, const string& callee_name, const string& node_name, int num_constant_args, int num_resource_args, Node** result) { NodeDef call_node; call_node.set_name(node_name); call_node.set_op(callee_name); AddNodeAttr(kXlaCompiledKernelAttr, true, &call_node); AddNodeAttr(kXlaNumConstantArgsAttr, num_constant_args, &call_node); AddNodeAttr(kXlaNumResourceArgsAttr, num_resource_args, &call_node); TF_ASSIGN_OR_RETURN(*result, graph->AddNode(call_node)); return absl::OkStatus(); } Status MakeXlaCompiledKernel(Graph* graph, const string& callee_name, const string& node_name, Node** result) { return MakeXlaCompiledKernel(graph, callee_name, node_name, 0, 0, result); } Node* MakeWrite(const Scope& scope, Output value_to_write, const string& id) { Output var_handle = ops::VarHandleOp(scope.WithOpName("Var_" + id), DT_FLOAT, TensorShape({})); ops::AssignVariableOp assign_op(scope.WithOpName("Assignee_" + id), var_handle, value_to_write); return assign_op.operation.node(); } Node* MakeWrite(const Scope& scope, const string& id) { return MakeWrite( scope, ops::Const(scope.WithOpName("ValueToAssign" + id), 1.0f), id); } FunctionDefLibrary CreateFunctionDefLibWithConstFunction(const string& name) { FunctionDefLibrary fdef_lib; FunctionDef func = FunctionDefHelper::Create( name, {}, {"out: float"}, {}, {FunctionDefHelper::Const("one", 1.0f)}, {{"out", "out:output:0"}}); *fdef_lib.add_function() = std::move(func); return fdef_lib; } TEST_F(BuildXlaOpsTest, ControlDepsPreserved) { const char* kXlaDeviceName = "/job:worker/replica:0/task:0/device:XLA_CPU:0"; Scope root = Scope::NewRootScope().WithDevice(kXlaDeviceName).ExitOnError(); FunctionDefLibrary fdef_lib = CreateFunctionDefLibWithConstFunction("cluster_0"); TF_ASSERT_OK(root.graph()->AddFunctionLibrary(fdef_lib)); Node* call; TF_ASSERT_OK(MakeXlaCompiledKernel(root.graph(), "cluster_0", "C", &call)); call->AddAttr(kXlaHasReferenceVarsAttr, false); call->set_requested_device(kXlaDeviceName); Node* write_op = MakeWrite(root, "write"); write_op->AddAttr(kXlaHasReferenceVarsAttr, false); root.graph()->AddControlEdge(call, write_op); std::unique_ptr<Graph> graph; TF_ASSERT_OK(BuildXlaOps(root, fdef_lib, &graph)); Node* write_op_new = FindNodeByName(graph.get(), write_op->name()); ASSERT_NE(write_op_new, nullptr); EXPECT_THAT(write_op_new, NodeWith(CtrlDeps(NodeWith(Op("_XlaRun"))))); } TEST_F(BuildXlaOpsTest, CleanFailureOnBogusAttr) { Scope root = Scope::NewRootScope().ExitOnError(); FunctionDefLibrary fdef_lib = CreateFunctionDefLibWithConstFunction("cluster_0"); TF_ASSERT_OK(root.graph()->AddFunctionLibrary(fdef_lib)); Node* call; TF_ASSERT_OK( MakeXlaCompiledKernel(root.graph(), "cluster_0", "C", 100, 100, &call)); Node* write_op = MakeWrite(root, "write"); root.graph()->AddControlEdge(call, write_op); std::unique_ptr<Graph> graph; Status failure_status = BuildXlaOps(root, fdef_lib, &graph); ASSERT_FALSE(failure_status.ok()); EXPECT_EQ(failure_status.code(), error::INVALID_ARGUMENT); } TEST_F(BuildXlaOpsTest, OnNonXlaDevice) { Scope root = Scope::NewRootScope().ExitOnError(); FunctionDefLibrary fdef_lib = CreateFunctionDefLibWithConstFunction("cluster_0"); TF_ASSERT_OK(root.graph()->AddFunctionLibrary(fdef_lib)); Node* call; TF_ASSERT_OK(MakeXlaCompiledKernel(root.graph(), "cluster_0", "C", &call)); TF_ASSERT_OK(root.DoShapeInference(call)); call->AddAttr(kXlaHasReferenceVarsAttr, false); Node* write_op = MakeWrite(root, Output(call), "write_result"); write_op->AddAttr(kXlaHasReferenceVarsAttr, false); auto xla_compile = NodeWith(Op("_XlaCompile"), Attr("must_compile", false)); auto predicated_compilation_key = NodeWith(Op("Switch"), Inputs(Out(0, xla_compile), Out(1, xla_compile))); auto xla_run = NodeWith(Op("_XlaRun"), Inputs(Out(1, predicated_compilation_key))); auto tf_call = NodeWith(Op("StatefulPartitionedCall"), CtrlDeps(NodeWith(Op("Identity"), Inputs(Out(0, predicated_compilation_key))))); auto merge = NodeWith(Op("_XlaMerge"), Inputs(Out(tf_call), Out(xla_run))); auto assign_var = NodeWith(Op("AssignVariableOp"), Inputs(_, Out(merge))); std::unique_ptr<Graph> graph; TF_ASSERT_OK(BuildXlaOps(root, fdef_lib, &graph)); Node* write_op_new = FindNodeByName(graph.get(), write_op->name()); ASSERT_NE(write_op_new, nullptr); EXPECT_THAT(write_op_new, assign_var); } TEST_F(BuildXlaOpsTest, OnXlaDevice) { const char* kXlaDeviceName = "/job:worker/replica:0/task:0/device:XLA_CPU:0"; Scope root = Scope::NewRootScope().WithDevice(kXlaDeviceName).ExitOnError(); FunctionDefLibrary fdef_lib = CreateFunctionDefLibWithConstFunction("cluster_0"); TF_ASSERT_OK(root.graph()->AddFunctionLibrary(fdef_lib)); Node* call; TF_ASSERT_OK(MakeXlaCompiledKernel(root.graph(), "cluster_0", "C", &call)); call->set_requested_device(kXlaDeviceName); TF_ASSERT_OK(root.DoShapeInference(call)); call->AddAttr(kXlaHasReferenceVarsAttr, false); Node* write_op = MakeWrite(root, Output(call), "write_result"); write_op->AddAttr(kXlaHasReferenceVarsAttr, false); std::unique_ptr<Graph> graph; TF_ASSERT_OK(BuildXlaOps(root, fdef_lib, &graph)); auto xla_op = NodeWith(Op("_XlaRun"), Inputs(Out(NodeWith(Op("_XlaCompile"))))); auto assign_var = NodeWith(Op("AssignVariableOp"), Inputs(Out(NodeWith()), Out(xla_op))); Node* write_op_new = FindNodeByName(graph.get(), write_op->name()); ASSERT_NE(write_op_new, nullptr); EXPECT_THAT(write_op_new, assign_var); } TEST_F(BuildXlaOpsTest, NoExtraMergeForEdgeToSink) { Scope root = Scope::NewRootScope().ExitOnError(); FunctionDefLibrary fdef_lib = CreateFunctionDefLibWithConstFunction("cluster_0"); TF_ASSERT_OK(root.graph()->AddFunctionLibrary(fdef_lib)); Node* call; TF_ASSERT_OK(MakeXlaCompiledKernel(root.graph(), "cluster_0", "C", &call)); call->AddAttr(kXlaHasReferenceVarsAttr, false); std::unique_ptr<Graph> graph; TF_ASSERT_OK(BuildXlaOps(root, fdef_lib, &graph)); Node* sink_node = graph->sink_node(); EXPECT_THAT(sink_node, NodeWith(CtrlDeps(NodeWith(Op("_XlaRun")), NodeWith(Op("StatefulPartitionedCall")), NodeWith(Op("NoOp"))))); } #ifdef GOOGLE_CUDA FunctionDefLibrary CreateFunctionDefLibWithInt32Input(const string& name) { FunctionDefLibrary fdef_lib; FunctionDef func = FunctionDefHelper::Create( name, {"in: int32"}, {"out: int32"}, {}, {{{"out"}, "Identity", {"in"}}}, {{"out", "out:output:0"}}); *fdef_lib.add_function() = std::move(func); return fdef_lib; } TEST_F(BuildXlaOpsTest, NoDeviceToHostCopiesForClustersWithInt32Inputs) { const char* kXlaDeviceName = "/job:worker/replica:0/task:0/device:GPU:0"; Scope root = Scope::NewRootScope() .WithDevice(kXlaDeviceName) .WithAssignedDevice(kXlaDeviceName) .ExitOnError(); FunctionDefLibrary fdef_lib = CreateFunctionDefLibWithInt32Input("cluster_int32"); TF_ASSERT_OK(root.graph()->AddFunctionLibrary(fdef_lib)); Node* call; TF_ASSERT_OK( MakeXlaCompiledKernel(root.graph(), "cluster_int32", "C", &call)); call->set_requested_device(kXlaDeviceName); call->AddAttr(kXlaHasReferenceVarsAttr, false); auto var = ops::VarHandleOp(root.WithOpName("var"), DT_INT32, TensorShape({})); auto int32_on_device = ops::ReadVariableOp(root.WithOpName("int32_on_device"), var, DT_INT32); root.graph()->AddEdge(int32_on_device.node(), 0, call, 0); std::unique_ptr<Graph> graph; TF_ASSERT_OK(BuildXlaOps(root, fdef_lib, &graph)); Node* stateful_partitioned_call_op = nullptr; for (Node* n : graph->op_nodes()) { if (n->type_string() == "StatefulPartitionedCall") { ASSERT_EQ(stateful_partitioned_call_op, nullptr); stateful_partitioned_call_op = n; } } ASSERT_NE(stateful_partitioned_call_op, nullptr); auto xla_compile = NodeWith(Op("_XlaCompile")); auto switch_on_compilation_pred = NodeWith(Op("Switch"), Inputs(Out(0, xla_compile), Out(1, xla_compile))); auto ctrl_dep = NodeWith(Op("Identity"), Inputs(Out(0, switch_on_compilation_pred))); EXPECT_THAT( stateful_partitioned_call_op, NodeWith(Inputs(Out(NodeWith(Op("IdentityN"), CtrlDeps(ctrl_dep)))))); } #endif } }
1,091
cpp
tensorflow/tensorflow
partially_decluster_pass
tensorflow/compiler/jit/partially_decluster_pass.cc
tensorflow/compiler/jit/partially_decluster_pass_test.cc
#ifndef TENSORFLOW_COMPILER_JIT_PARTIALLY_DECLUSTER_PASS_H_ #define TENSORFLOW_COMPILER_JIT_PARTIALLY_DECLUSTER_PASS_H_ #include "tensorflow/core/common_runtime/optimization_registry.h" namespace tensorflow { class PartiallyDeclusterPass : public GraphOptimizationPass { public: Status Run(const GraphOptimizationPassOptions& options) override; }; } #endif #include "tensorflow/compiler/jit/partially_decluster_pass.h" #include "absl/algorithm/container.h" #include "absl/container/flat_hash_set.h" #include "absl/strings/str_cat.h" #include "tensorflow/compiler/jit/device_util.h" #include "tensorflow/compiler/jit/xla_cluster_util.h" #include "tensorflow/compiler/tf2xla/const_analysis.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "tensorflow/core/common_runtime/function.h" #include "tensorflow/core/framework/function.h" #include "tensorflow/core/framework/memory_types.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/graph/graph_node_util.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/public/version.h" namespace tensorflow { namespace { bool NotBackedge(const Edge& edge) { return !edge.src()->IsNextIteration(); } namespace reduce_device_to_host_copies { Status FindNodesToDecluster(const Graph& graph, absl::flat_hash_set<Node*>* result, absl::Span<Node* const> post_order) { MemoryTypeVector input_mtypes, output_mtypes; for (Node* n : post_order) { std::optional<absl::string_view> from_cluster = GetXlaClusterForNode(*n); if (!from_cluster) { continue; } if (IsShapeConsumerOp(*n)) { continue; } if (HasResourceInputOrOutput(*n)) { continue; } DeviceType device_type(""); TF_RETURN_IF_ERROR( DeviceNameToDeviceType(n->assigned_device_name(), &device_type)); TF_RETURN_IF_ERROR(MemoryTypesForNode(graph.op_registry(), device_type, n->def(), &input_mtypes, &output_mtypes)); for (const Edge* e : n->out_edges()) { Node* dst = e->dst(); if (e->IsControlEdge()) { continue; } bool edge_incurs_extra_device_to_host_copy; if (output_mtypes[e->src_output()] == DEVICE_MEMORY) { edge_incurs_extra_device_to_host_copy = false; } else { MemoryTypeVector dst_input_mtypes, dst_output_mtypes; DeviceType dst_device_type(""); TF_RETURN_IF_ERROR(DeviceNameToDeviceType(dst->assigned_device_name(), &dst_device_type)); TF_RETURN_IF_ERROR(MemoryTypesForNode(graph.op_registry(), device_type, dst->def(), &dst_input_mtypes, &dst_output_mtypes)); edge_incurs_extra_device_to_host_copy = dst_input_mtypes[e->dst_input()] == HOST_MEMORY; } if (!edge_incurs_extra_device_to_host_copy) { continue; } std::optional<absl::string_view> dst_cluster = result->count(dst) ? std::nullopt : GetXlaClusterForNode(*dst); if (from_cluster != dst_cluster) { CHECK(result->insert(n).second); break; } } } return absl::OkStatus(); } Status PartiallyDeclusterNode(Graph* graph, Node* n) { absl::string_view cluster_name = *GetXlaClusterForNode(*n); absl::InlinedVector<const Edge*, 6> out_edges_to_clone; for (const Edge* out_edge : n->out_edges()) { if (out_edge->IsControlEdge()) { continue; } Node* dst = out_edge->dst(); std::optional<absl::string_view> dst_cluster_name = GetXlaClusterForNode(*dst); if (dst_cluster_name != cluster_name) { out_edges_to_clone.push_back(out_edge); } } CHECK(!out_edges_to_clone.empty()) << n->DebugString(); NodeDef ndef = n->def(); ndef.set_name(absl::StrCat(n->name(), "/declustered")); MergeDebugInfo(NodeDebugInfo(n->def()), &ndef); RemoveFromXlaCluster(&ndef); TF_ASSIGN_OR_RETURN(Node * cloned_node, graph->AddNode(ndef)); cloned_node->set_assigned_device_name(n->assigned_device_name()); for (const Edge* in_edge : n->in_edges()) { graph->AddEdge(in_edge->src(), in_edge->src_output(), cloned_node, in_edge->dst_input()); } for (const Edge* out_edge_to_clone : out_edges_to_clone) { graph->AddEdge(cloned_node, out_edge_to_clone->src_output(), out_edge_to_clone->dst(), out_edge_to_clone->dst_input()); graph->RemoveEdge(out_edge_to_clone); } if (n->out_edges().empty()) { graph->RemoveNode(n); } return absl::OkStatus(); } Status PartiallyDeclusterGraph(Graph* graph) { std::vector<Node*> post_order; GetPostOrder(*graph, &post_order, NodeComparatorName(), NotBackedge); absl::flat_hash_set<Node*> nodes_to_partially_decluster; TF_RETURN_IF_ERROR( FindNodesToDecluster(*graph, &nodes_to_partially_decluster, post_order)); if (VLOG_IS_ON(3)) { for (Node* n : post_order) { if (nodes_to_partially_decluster.count(n)) { VLOG(3) << n->DebugString(); } } } for (Node* n : post_order) { if (nodes_to_partially_decluster.count(n)) { TF_RETURN_IF_ERROR(PartiallyDeclusterNode(graph, n)); } } post_order.clear(); GetPostOrder(*graph, &post_order, NodeComparatorName(), NotBackedge); nodes_to_partially_decluster.clear(); TF_RETURN_IF_ERROR( FindNodesToDecluster(*graph, &nodes_to_partially_decluster, post_order)); CHECK(nodes_to_partially_decluster.empty()); return absl::OkStatus(); } } namespace reduce_recompilation { bool IsIntraClusterEdge(const Edge& edge) { std::optional<absl::string_view> src_cluster_name = GetXlaClusterForNode(*edge.src()); std::optional<absl::string_view> dst_cluster_name = GetXlaClusterForNode(*edge.dst()); return src_cluster_name.has_value() && src_cluster_name == dst_cluster_name; } bool IsMustCompileDevice(const DeviceType& device_type) { const XlaOpRegistry::DeviceRegistration* registration; if (XlaOpRegistry::GetCompilationDevice(device_type.type(), &registration)) { return registration->autoclustering_policy == XlaOpRegistry::AutoclusteringPolicy::kAlways; } return false; } Status MustCompileNode(const Node* n, bool* must_compile) { DeviceType device_type(""); TF_RETURN_IF_ERROR( DeviceNameToDeviceType(n->assigned_device_name(), &device_type)); if (IsMustCompileDevice(device_type)) { *must_compile = true; return absl::OkStatus(); } *must_compile = !FindKernelDef(device_type, n->def(), nullptr, nullptr).ok(); return absl::OkStatus(); } Status PartiallyDeclusterGraph(Graph* graph, const FunctionLibraryDefinition* flib_def, Env* env) { std::vector<bool> compile_time_const_nodes(graph->num_node_ids()); OptimizerOptions opts; auto pflr = std::make_unique<ProcessFunctionLibraryRuntime>( nullptr, env, nullptr, TF_GRAPH_DEF_VERSION, flib_def, opts); FunctionLibraryRuntime* lib_runtime = pflr->GetFLR(ProcessFunctionLibraryRuntime::kDefaultFLRDevice); TF_RETURN_IF_ERROR(BackwardsConstAnalysis(*graph, nullptr, &compile_time_const_nodes, lib_runtime, IsIntraClusterEdge)); std::vector<Node*> rpo; GetReversePostOrder(*graph, &rpo, NodeComparatorName(), NotBackedge); for (Node* n : rpo) { if (!compile_time_const_nodes[n->id()]) { continue; } absl::string_view cluster_name = *GetXlaClusterForNode(*n); bool node_on_cluster_edge = absl::c_all_of(n->in_edges(), [&](const Edge* e) { std::optional<absl::string_view> incoming_cluster = GetXlaClusterForNode(*e->src()); return !incoming_cluster || *incoming_cluster != cluster_name; }); if (node_on_cluster_edge) { bool must_compile_node; TF_RETURN_IF_ERROR(MustCompileNode(n, &must_compile_node)); if (!must_compile_node) { if (n->IsConstant()) { for (auto it : n->in_edges()) { if (!it->src()->assigned_device_name().empty() && it->src()->assigned_device_name() != n->assigned_device_name()) { VLOG(3) << "Declustering Const with cross-device control input " << n->name(); RemoveFromXlaCluster(n); break; } } } else { VLOG(3) << "Declustering must-be-constant node " << n->name(); RemoveFromXlaCluster(n); } } } } return absl::OkStatus(); } } namespace decluster_root_shape_consumers { Status PartiallyDeclusterGraph(Graph* graph) { std::vector<Node*> reverse_post_order; GetReversePostOrder(*graph, &reverse_post_order, NodeComparatorName(), NotBackedge); for (Node* n : reverse_post_order) { if (!IsShapeConsumerOp(*n)) { continue; } std::optional<absl::string_view> cluster = GetXlaClusterForNode(*n); if (!cluster.has_value()) { continue; } auto input_belongs_to_same_cluster = [&](const Edge* e) { return cluster == GetXlaClusterForNode(*e->src()); }; if (absl::c_any_of(n->in_edges(), input_belongs_to_same_cluster)) { continue; } VLOG(2) << "Declustering " << n->name() << " because it is a root shape consumer"; RemoveFromXlaCluster(n); } return absl::OkStatus(); } } } Status PartiallyDeclusterPass::Run( const GraphOptimizationPassOptions& options) { Graph* graph = options.graph->get(); TF_RETURN_IF_ERROR( reduce_device_to_host_copies::PartiallyDeclusterGraph(graph)); if (options.flib_def == nullptr) { return errors::InvalidArgument( "GraphOptimizationPassOptions::flib_def must be set for " "PartiallyDeclusterPass."); } if (options.session_options == nullptr || options.session_options->env == nullptr) { return errors::InvalidArgument( "GraphOptimizationPassOptions::session_options::env must be set for " "PartiallyDeclusterPass."); } TF_RETURN_IF_ERROR(reduce_recompilation::PartiallyDeclusterGraph( graph, options.flib_def, options.session_options->env)); TF_RETURN_IF_ERROR( decluster_root_shape_consumers::PartiallyDeclusterGraph(graph)); return absl::OkStatus(); } }
#include "tensorflow/compiler/jit/partially_decluster_pass.h" #include "absl/memory/memory.h" #include "tensorflow/cc/framework/ops.h" #include "tensorflow/cc/ops/array_ops.h" #include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/control_flow_ops_internal.h" #include "tensorflow/cc/ops/function_ops.h" #include "tensorflow/cc/ops/sendrecv_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/compiler/jit/defs.h" #include "tensorflow/compiler/jit/test_util.h" #include "tensorflow/compiler/jit/xla_cluster_util.h" #include "tensorflow/compiler/tf2xla/cc/ops/xla_ops.h" #include "tensorflow/compiler/tf2xla/xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/common_runtime/graph_def_builder_util.h" #include "tensorflow/core/framework/function.h" #include "tensorflow/core/framework/function.pb.h" #include "tensorflow/core/framework/node_def_util.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/graph/algorithm.h" #include "tensorflow/core/graph/graph_def_builder.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace { REGISTER_OP("FakeNullary").Output("out: int32"); REGISTER_OP("FakeBinary") .Input("host_in: int32") .Input("device_in: int32") .Output("host_out: int32") .Output("device_out: int32"); REGISTER_OP("FakeResourceVar").Output("out: resource"); REGISTER_OP("FakeResourceUpdate") .Input("in: resource") .Output("out: resource") .Output("something_else: int32"); class FakeBinaryOp : public OpKernel { public: explicit FakeBinaryOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* ctx) override { CHECK(false); } }; class FakeResourceUpdateOp : public OpKernel { public: explicit FakeResourceUpdateOp(OpKernelConstruction* context) : OpKernel(context) {} void Compute(OpKernelContext* ctx) override { CHECK(false); } }; REGISTER_KERNEL_BUILDER(Name("FakeBinary") .Device(DEVICE_CPU) .HostMemory("host_in") .HostMemory("host_out"), FakeBinaryOp); REGISTER_KERNEL_BUILDER( Name("FakeResourceUpdate").Device(DEVICE_CPU).HostMemory("something_else"), FakeResourceUpdateOp); Status PartiallyDecluster(std::unique_ptr<Graph>* graph) { FixupSourceAndSinkEdges(graph->get()); static const char* kCpuDevice = "/job:localhost/replica:0/task:0/cpu:0"; for (Node* n : (*graph)->nodes()) { if (n->assigned_device_name().empty()) { n->set_assigned_device_name(kCpuDevice); } } GraphOptimizationPassWrapper wrapper; GraphOptimizationPassOptions opt_options = wrapper.CreateGraphOptimizationPassOptions(graph); PartiallyDeclusterPass pass; return pass.Run(opt_options); } Node* FindNodeByName(const Graph& graph, const string& name) { for (Node* node : graph.nodes()) { if (node->name() == name) { return node; } } return nullptr; } bool GetInputsForNode(const Graph& graph, const string& node_name, std::vector<Node*>* inputs) { const Node* node = FindNodeByName(graph, node_name); if (node == nullptr) { return false; } for (const Edge* e : node->in_edges()) { inputs->push_back(e->src()); } std::sort(inputs->begin(), inputs->end(), NodeComparatorName()); return true; } TEST(PartiallyDeclusterPassTest, ClusteredAndUnclustered) { std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); { GraphDefBuilder builder(GraphDefBuilder::kFailImmediately); Node* input = ops::SourceOp("FakeNullary", builder.opts().WithName("Input")); Node* clustered_producer = ops::BinaryOp("FakeBinary", input, input, builder.opts().WithName("ClusteredProducer")); ops::BinaryOp("FakeBinary", clustered_producer, input, builder.opts().WithName("UnclusteredConsumer")); Node* clustered_consumer = ops::BinaryOp("FakeBinary", {clustered_producer, 1}, input, builder.opts().WithName("ClusteredConsumer")); clustered_producer->AddAttr(kXlaClusterAttr, "cluster_0"); clustered_consumer->AddAttr(kXlaClusterAttr, "cluster_0"); TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get())); } TF_ASSERT_OK(PartiallyDecluster(&graph)); std::vector<Node*> unclustered_consumer_inputs; ASSERT_TRUE(GetInputsForNode(*graph, "UnclusteredConsumer", &unclustered_consumer_inputs)); ASSERT_EQ(unclustered_consumer_inputs.size(), 2); EXPECT_EQ(unclustered_consumer_inputs[0]->name(), "ClusteredProducer/declustered"); EXPECT_EQ(unclustered_consumer_inputs[1]->name(), "Input"); std::vector<Node*> clustered_consumer_inputs; ASSERT_TRUE(GetInputsForNode(*graph, "ClusteredConsumer", &clustered_consumer_inputs)); ASSERT_EQ(clustered_consumer_inputs.size(), 2); EXPECT_EQ(clustered_consumer_inputs[0]->name(), "ClusteredProducer"); EXPECT_EQ(clustered_consumer_inputs[1]->name(), "Input"); } TEST(PartiallyDeclusterPassTest, DifferentClusters) { std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); { GraphDefBuilder builder(GraphDefBuilder::kFailImmediately); Node* input = ops::SourceOp("FakeNullary", builder.opts().WithName("Input")); Node* clustered_producer = ops::BinaryOp("FakeBinary", input, input, builder.opts().WithName("ClusteredProducer")); Node* consumer_in_different_cluster = ops::BinaryOp("FakeBinary", clustered_producer, input, builder.opts().WithName("ConsumerInDifferentCluster")); Node* clustered_consumer = ops::BinaryOp("FakeBinary", input, {clustered_producer, 1}, builder.opts().WithName("ClusteredConsumer")); clustered_producer->AddAttr(kXlaClusterAttr, "cluster_0"); clustered_consumer->AddAttr(kXlaClusterAttr, "cluster_0"); consumer_in_different_cluster->AddAttr(kXlaClusterAttr, "cluster_1"); TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get())); } TF_ASSERT_OK(PartiallyDecluster(&graph)); std::vector<Node*> inputs; ASSERT_TRUE(GetInputsForNode(*graph, "ConsumerInDifferentCluster", &inputs)); ASSERT_EQ(inputs.size(), 2); EXPECT_EQ(inputs[0]->name(), "ClusteredProducer/declustered"); EXPECT_EQ(inputs[1]->name(), "Input"); } TEST(PartiallyDeclusterPassTest, DontDeclusterIfUserIsDeviceMem) { std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); { GraphDefBuilder builder(GraphDefBuilder::kFailImmediately); Node* input = ops::SourceOp("FakeNullary", builder.opts().WithName("Input")); Node* clustered_producer = ops::BinaryOp("FakeBinary", input, input, builder.opts().WithName("ClusteredProducer")); Node* consumer_in_different_cluster = ops::BinaryOp("FakeBinary", input, clustered_producer, builder.opts().WithName("ConsumerInDifferentCluster")); Node* clustered_consumer = ops::BinaryOp("FakeBinary", input, {clustered_producer, 1}, builder.opts().WithName("ClusteredConsumer")); clustered_producer->AddAttr(kXlaClusterAttr, "cluster_0"); clustered_consumer->AddAttr(kXlaClusterAttr, "cluster_0"); consumer_in_different_cluster->AddAttr(kXlaClusterAttr, "cluster_1"); TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get())); } TF_ASSERT_OK(PartiallyDecluster(&graph)); std::vector<Node*> inputs; ASSERT_TRUE(GetInputsForNode(*graph, "ConsumerInDifferentCluster", &inputs)); ASSERT_EQ(inputs.size(), 2); EXPECT_EQ(inputs[0]->name(), "ClusteredProducer"); EXPECT_EQ(inputs[1]->name(), "Input"); } TEST(PartiallyDeclusterPassTest, DontDuplicateResourceVarOps) { std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); { GraphDefBuilder builder(GraphDefBuilder::kFailImmediately); Node* input = ops::SourceOp("FakeNullary", builder.opts().WithName("Input")); Node* resource_var = ops::SourceOp("FakeResourceVar", builder.opts().WithName("ResourceVar")); Node* clustered_producer = ops::UnaryOp("FakeResourceUpdate", resource_var, builder.opts().WithName("ClusteredProducer")); Node* consumer_in_different_cluster = ops::BinaryOp("FakeBinary", {clustered_producer, 1}, input, builder.opts().WithName("ConsumerInDifferentCluster")); Node* clustered_consumer = ops::BinaryOp("FakeBinary", input, {clustered_producer, 1}, builder.opts().WithName("ClusteredConsumer")); clustered_producer->AddAttr(kXlaClusterAttr, "cluster_0"); clustered_consumer->AddAttr(kXlaClusterAttr, "cluster_0"); consumer_in_different_cluster->AddAttr(kXlaClusterAttr, "cluster_1"); TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get())); } TF_ASSERT_OK(PartiallyDecluster(&graph)); std::vector<Node*> inputs; ASSERT_TRUE(GetInputsForNode(*graph, "ConsumerInDifferentCluster", &inputs)); ASSERT_EQ(inputs.size(), 2); EXPECT_EQ(inputs[0]->name(), "ClusteredProducer"); EXPECT_EQ(inputs[1]->name(), "Input"); } TEST(PartiallyDeclusterPassTest, DeclusterDependentNodes) { std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); { GraphDefBuilder builder(GraphDefBuilder::kFailImmediately); Node* input = ops::SourceOp("FakeNullary", builder.opts().WithName("Input")); Node* clustered_producer_0 = ops::BinaryOp("FakeBinary", input, input, builder.opts().WithName("ClusteredProducer0")); Node* clustered_producer_1 = ops::BinaryOp("FakeBinary", clustered_producer_0, input, builder.opts().WithName("ClusteredProducer1")); ops::BinaryOp("FakeBinary", clustered_producer_1, input, builder.opts().WithName("UnclusteredConsumer")); Node* clustered_consumer = ops::BinaryOp("FakeBinary", {clustered_producer_1, 1}, input, builder.opts().WithName("ClusteredConsumer")); clustered_producer_0->AddAttr(kXlaClusterAttr, "cluster_0"); clustered_producer_1->AddAttr(kXlaClusterAttr, "cluster_0"); clustered_consumer->AddAttr(kXlaClusterAttr, "cluster_0"); TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get())); } TF_ASSERT_OK(PartiallyDecluster(&graph)); std::vector<Node*> unclustered_consumer_inputs, declustered_producer_1_inputs; ASSERT_TRUE(GetInputsForNode(*graph, "UnclusteredConsumer", &unclustered_consumer_inputs)); ASSERT_EQ(unclustered_consumer_inputs.size(), 2); EXPECT_EQ(unclustered_consumer_inputs[0]->name(), "ClusteredProducer1/declustered"); EXPECT_EQ(unclustered_consumer_inputs[1]->name(), "Input"); ASSERT_TRUE(GetInputsForNode(*graph, "ClusteredProducer1/declustered", &declustered_producer_1_inputs)); ASSERT_EQ(declustered_producer_1_inputs.size(), 2); EXPECT_EQ(declustered_producer_1_inputs[0]->name(), "ClusteredProducer0/declustered"); EXPECT_EQ(declustered_producer_1_inputs[1]->name(), "Input"); } void AddToCluster(absl::Span<Node* const> nodes, absl::string_view cluster_name) { for (Node* n : nodes) { n->AddAttr(kXlaClusterAttr, string(cluster_name)); } } TEST(PartiallyDeclusterPassTest, DeclusterMustBeConstantNodes) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output shape_a = ops::Placeholder(s.WithOpName("shape_a"), DT_INT32, ops::Placeholder::Attrs{}); Output shape_b = ops::Placeholder(s.WithOpName("shape_b"), DT_INT32, ops::Placeholder::Attrs{}); Output shape = ops::Add(s.WithOpName("shape"), shape_a, shape_b); Output reshape_input = ops::Placeholder(s.WithOpName("reshape_input"), DT_FLOAT, ops::Placeholder::Attrs{}); Output reshape = ops::Reshape(s.WithOpName("reshape"), reshape_input, shape); AddToCluster({shape.node(), reshape.node()}, "cluster_0"); auto graph = std::make_unique<Graph>(OpRegistry::Global()); TF_ASSERT_OK(s.ToGraph(graph.get())); TF_ASSERT_OK(PartiallyDecluster(&graph)); const Node* n = FindNodeByName(*graph, "shape"); ASSERT_NE(n, nullptr); EXPECT_EQ(GetXlaClusterForNode(*n), std::nullopt); } TEST(PartiallyDeclusterPassTest, DeclusteringStopsAtMetadataOps) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output input_a = ops::Placeholder(s.WithOpName("input_a"), DT_INT32, ops::Placeholder::Attrs{}); Output input_b = ops::Placeholder(s.WithOpName("shape_b"), DT_FLOAT, ops::Placeholder::Attrs{}); Output mul = ops::Mul(s.WithOpName("mul"), input_b, input_b); Output shape_of_mul = ops::Shape(s.WithOpName("shape_of_mul"), mul); Output shape = ops::Add(s.WithOpName("shape"), shape_of_mul, input_a); Output reshape_input = ops::Placeholder(s.WithOpName("reshape_input"), DT_FLOAT, ops::Placeholder::Attrs{}); Output reshape = ops::Reshape(s.WithOpName("reshape"), reshape_input, shape); AddToCluster({mul.node(), shape_of_mul.node(), shape.node(), reshape.node()}, "cluster_0"); auto graph = std::make_unique<Graph>(OpRegistry::Global()); TF_ASSERT_OK(s.ToGraph(graph.get())); TF_ASSERT_OK(PartiallyDecluster(&graph)); const Node* n = FindNodeByName(*graph, "shape"); ASSERT_NE(n, nullptr); EXPECT_EQ(GetXlaClusterForNode(*n), "cluster_0"); } TEST(PartiallyDeclusterPassTest, EdgeAcrossDifferentClusters) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output shape_a = ops::Placeholder(s.WithOpName("shape_a"), DT_INT32, ops::Placeholder::Attrs{}); Output shape_b = ops::Placeholder(s.WithOpName("shape_b"), DT_INT32, ops::Placeholder::Attrs{}); Output shape = ops::Add(s.WithOpName("shape"), shape_a, shape_b); Output reshape_input = ops::Placeholder(s.WithOpName("reshape_input"), DT_FLOAT, ops::Placeholder::Attrs{}); Output reshape = ops::Reshape(s.WithOpName("reshape"), reshape_input, shape); AddToCluster({reshape.node()}, "cluster_0"); AddToCluster({shape.node()}, "cluster_1"); auto graph = std::make_unique<Graph>(OpRegistry::Global()); TF_ASSERT_OK(s.ToGraph(graph.get())); TF_ASSERT_OK(PartiallyDecluster(&graph)); const Node* n = FindNodeByName(*graph, "shape"); ASSERT_NE(n, nullptr); EXPECT_EQ(GetXlaClusterForNode(*n), "cluster_1"); } TEST(PartiallyDeclusterPassTest, DontDeclusterXlaDeviceOps) { tensorflow::Scope s = tensorflow::Scope::NewRootScope(); Output shape_a = ops::Placeholder(s.WithOpName("shape_a"), DT_INT32, ops::Placeholder::Attrs{}); Output shape_b = ops::Placeholder(s.WithOpName("shape_b"), DT_INT32, ops::Placeholder::Attrs{}); Output shape = ops::Add(s.WithOpName("shape"), shape_a, shape_b); Output reshape_input = ops::Placeholder(s.WithOpName("reshape_input"), DT_FLOAT, ops::Placeholder::Attrs{}); Output reshape = ops::Reshape(s.WithOpName("reshape"), reshape_input, shape); AddToCluster({shape.node(), reshape.node()}, "cluster_0"); auto graph = std::make_unique<Graph>(OpRegistry::Global()); TF_ASSERT_OK(s.ToGraph(graph.get())); std::vector<std::unique_ptr<Device>> devices; TF_ASSERT_OK(DeviceFactory::AddDevices( SessionOptions(), "/job:localhost/replica:0/task:0", &devices)); Node* n = FindNodeByName(*graph, "shape"); ASSERT_NE(n, nullptr); n->set_assigned_device_name( "/job:localhost/replica:0/task:0/device:XLA_GPU:0"); TF_ASSERT_OK(PartiallyDecluster(&graph)); EXPECT_EQ(GetXlaClusterForNode(*n), "cluster_0"); } TEST(PartiallyDeclusterPassTest, EliminatedUnusedNodes) { const char* const kClusteredProducer0Name = "ClusteredProducer0"; const char* const kClusteredProducer1Name = "ClusteredProducer1"; std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); { GraphDefBuilder builder(GraphDefBuilder::kFailImmediately); Node* input = ops::SourceOp("FakeNullary", builder.opts().WithName("Input")); Node* clustered_producer_0 = ops::BinaryOp("FakeBinary", input, input, builder.opts().WithName(kClusteredProducer0Name)); Node* clustered_producer_1 = ops::BinaryOp("FakeBinary", clustered_producer_0, input, builder.opts().WithName(kClusteredProducer1Name)); ops::BinaryOp("FakeBinary", clustered_producer_1, input, builder.opts().WithName("UnclusteredConsumer")); clustered_producer_0->AddAttr(kXlaClusterAttr, "cluster_0"); clustered_producer_1->AddAttr(kXlaClusterAttr, "cluster_0"); TF_EXPECT_OK(GraphDefBuilderToGraph(builder, graph.get())); } TF_ASSERT_OK(PartiallyDecluster(&graph)); EXPECT_EQ(FindNodeByName(*graph, kClusteredProducer0Name), nullptr); EXPECT_EQ(FindNodeByName(*graph, kClusteredProducer1Name), nullptr); } TEST(PartiallyDeclusterPassTest, MetadataOpsDontStartClusters) { tensorflow::Scope root = tensorflow::Scope::NewRootScope(); tensorflow::Scope in_cluster_and = root.WithXlaCluster("cluster_0"); Output a = ops::Placeholder(root.WithOpName("a"), DT_FLOAT); Output b = ops::Shape(in_cluster_and.WithOpName("b"), a); Output c = ops::Rank(in_cluster_and.WithOpName("c"), b); Output d = ops::Size(in_cluster_and.WithOpName("d"), c); (void)ops::Shape(in_cluster_and.WithOpName("e"), d); auto graph = std::make_unique<Graph>(OpRegistry::Global()); TF_ASSERT_OK(root.ToGraph(graph.get())); TF_ASSERT_OK(PartiallyDecluster(&graph)); Node* n_b = FindNodeByName(*graph, "b"); ASSERT_NE(n_b, nullptr); EXPECT_EQ(GetXlaClusterForNode(*n_b), std::nullopt); Node* n_c = FindNodeByName(*graph, "c"); ASSERT_NE(n_c, nullptr); EXPECT_EQ(GetXlaClusterForNode(*n_c), std::nullopt); Node* n_d = FindNodeByName(*graph, "d"); ASSERT_NE(n_d, nullptr); EXPECT_EQ(GetXlaClusterForNode(*n_d), std::nullopt); Node* n_e = FindNodeByName(*graph, "e"); ASSERT_NE(n_e, nullptr); EXPECT_EQ(GetXlaClusterForNode(*n_e), std::nullopt); } TEST(PartiallyDeclusterPassTest, MetaConsumersArentDeclustered) { tensorflow::Scope root = tensorflow::Scope::NewRootScope(); tensorflow::Scope in_cluster_and = root.WithXlaCluster("cluster_0"); auto graph = std::make_unique<Graph>(OpRegistry::Global()); Output a = ops::Placeholder(root.WithOpName("a"), DT_FLOAT); Output b = ops::Add(in_cluster_and.WithOpName("b"), a, a); Output c = ops::Rank(in_cluster_and.WithOpName("c"), b); Output e; TF_ASSERT_OK( CreateOutputWithScope("FakeBinary", {c, c}, root.WithOpName("e"), &e)); TF_ASSERT_OK(root.ToGraph(graph.get())); TF_ASSERT_OK(PartiallyDecluster(&graph)); Node* n_b = FindNodeByName(*graph, "b"); ASSERT_NE(n_b, nullptr); EXPECT_EQ(GetXlaClusterForNode(*n_b), "cluster_0"); Node* n_c = FindNodeByName(*graph, "c"); ASSERT_NE(n_c, nullptr); EXPECT_EQ(GetXlaClusterForNode(*n_c), "cluster_0"); } TEST(PartiallyDeclusterPassTest, ConstInputsToSliceArentDeclustered) { tensorflow::Scope root = tensorflow::Scope::NewRootScope(); tensorflow::Scope in_cluster_and = root.WithXlaCluster("cluster_0"); auto graph = std::make_unique<Graph>(OpRegistry::Global()); Output a = ops::Placeholder(root.WithOpName("a"), DT_FLOAT, ops::Placeholder::Attrs{{4}}); Output b = ops::Const(in_cluster_and.WithOpName("b"), {1}); Output c = ops::Const(in_cluster_and.WithOpName("c"), {2}); Output d = ops::Slice(in_cluster_and.WithOpName("d"), a, b, c); TF_ASSERT_OK(root.ToGraph(graph.get())); TF_ASSERT_OK(PartiallyDecluster(&graph)); Node* n_b = FindNodeByName(*graph, "b"); ASSERT_NE(n_b, nullptr); EXPECT_EQ(GetXlaClusterForNode(*n_b), "cluster_0"); Node* n_c = FindNodeByName(*graph, "c"); ASSERT_NE(n_c, nullptr); EXPECT_EQ(GetXlaClusterForNode(*n_c), "cluster_0"); } TEST(PartiallyDeclusterPassTest, ConstInLoopWithCrossDeviceControlInputsAreDeclustered) { tensorflow::Scope root = tensorflow::Scope::NewRootScope(); tensorflow::Scope in_cluster_and = root.WithXlaCluster("cluster_0"); auto graph = std::make_unique<Graph>(OpRegistry::Global()); Output a = ops::Placeholder(root.WithOpName("a"), DT_FLOAT, ops::Placeholder::Attrs{{4}}); Output b = ops::Const(in_cluster_and.WithOpName("b"), {1}); Output c = ops::Const(in_cluster_and.WithOpName("c"), {2}); Output slice = ops::Slice(in_cluster_and.WithOpName("slice"), a, b, c); Output cond = ops::Placeholder(root.WithOpName("cond"), DT_BOOL); Output value = ops::Placeholder(root.WithOpName("value"), DT_FLOAT); Output loop_cond = ops::LoopCond(root.WithOpName("loop_cond"), cond); ops::Switch switch_node(root.WithOpName("switch"), value, loop_cond); Output identity = ops::Identity(root.WithOpName("identity"), switch_node.output_true); root.graph()->AddControlEdge(identity.node(), b.node()); TF_ASSERT_OK(root.ToGraph(graph.get())); std::vector<std::unique_ptr<Device>> devices; TF_ASSERT_OK(DeviceFactory::AddDevices( SessionOptions(), "/job:localhost/replica:0/task:0", &devices)); Node* identity_node = FindNodeByName(*graph, "identity"); ASSERT_NE(identity_node, nullptr); identity_node->set_assigned_device_name( "/job:localhost/replica:0/task:0/device:XLA_GPU:0"); TF_ASSERT_OK(PartiallyDecluster(&graph)); Node* n_b = FindNodeByName(*graph, "b"); ASSERT_NE(n_b, nullptr); EXPECT_EQ(GetXlaClusterForNode(*n_b), std::nullopt); Node* n_c = FindNodeByName(*graph, "c"); ASSERT_NE(n_c, nullptr); EXPECT_EQ(GetXlaClusterForNode(*n_c), "cluster_0"); } } }
1,092
cpp
tensorflow/tensorflow
xla_platform_info
tensorflow/compiler/jit/xla_platform_info.cc
tensorflow/compiler/jit/xla_platform_info_test.cc
#ifndef TENSORFLOW_COMPILER_JIT_XLA_PLATFORM_INFO_H_ #define TENSORFLOW_COMPILER_JIT_XLA_PLATFORM_INFO_H_ #include <memory> #include <optional> #include <string> #include "tensorflow/compiler/jit/device_compiler.h" #include "tensorflow/compiler/jit/pjrt_base_device.h" #include "tensorflow/compiler/jit/xla_device.h" #include "xla/stream_executor/integrations/tf_allocator_adapter.h" #include "tensorflow/core/framework/op_kernel.h" namespace tensorflow { class XlaPlatformInfo { public: XlaPlatformInfo() : device_type_("") {} XlaPlatformInfo(XlaPlatformInfo&&) = default; explicit XlaPlatformInfo( const DeviceType device_type, se::Platform::Id platform_id, const XlaDevice::Metadata* xla_device_metadata, const PjRtBaseDevice::Metadata* pjrt_device_metadata, std::shared_ptr<se::DeviceMemoryAllocator> device_allocator) : device_type_(device_type), platform_id_(platform_id), xla_device_metadata_(xla_device_metadata), pjrt_device_metadata_(pjrt_device_metadata), device_allocator_(device_allocator) {} XlaPlatformInfo& operator=(XlaPlatformInfo&& other) = default; bool UseMultipleStreams() const { return xla_device_metadata_ && xla_device_metadata_->UseMultipleStreams(); } std::shared_ptr<se::DeviceMemoryAllocator> custom_allocator() const { return device_allocator_; } DeviceType device_type() const { return device_type_; } se::Platform::Id platform_id() const { return platform_id_; } const XlaDevice::Metadata* xla_device_metadata() const { return xla_device_metadata_; } bool is_on_xla_device() const { return xla_device_metadata() != nullptr; } const PjRtBaseDevice::Metadata* pjrt_device_metadata() const { return pjrt_device_metadata_; } private: DeviceType device_type_; se::Platform::Id platform_id_; const XlaDevice::Metadata* xla_device_metadata_; const PjRtBaseDevice::Metadata* pjrt_device_metadata_; std::shared_ptr<se::DeviceMemoryAllocator> device_allocator_; XlaPlatformInfo(const XlaPlatformInfo&) = delete; void operator=(const XlaPlatformInfo&) = delete; }; absl::StatusOr<std::optional<std::set<int>>> ParseVisibleDeviceList( absl::string_view visible_device_list); absl::StatusOr<DeviceType> GetCompilationDeviceType( const DeviceType& platform_device_type); Status BuildXlaDeviceCompiler( DeviceBase* dev, FunctionLibraryRuntime* flr, const XlaPlatformInfo& platform_info, DeviceType compilation_device_type, DeviceCompiler<xla::LocalExecutable, xla::LocalClient>** xla_device_compiler); Status GetOrCreatePjRtDeviceCompilerAndProfiler( const OpKernelContext& ctx, const XlaPlatformInfo& platform_info, FunctionLibraryRuntime* flr, DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>** pjrt_device_compiler, DeviceCompilationProfiler** profiler); Status GetOrCreatePjRtDeviceCompilerAndProfiler( const XlaPlatformInfo& platform_info, ResourceMgr* rm, FunctionLibraryRuntime* flr, DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>** pjrt_device_compiler, DeviceCompilationProfiler** profiler); XlaPlatformInfo XlaPlatformInfoFromDevice(DeviceBase* device); std::string GetPersistentCacheDirectory( const DeviceType& compilation_device_type); std::shared_ptr<se::DeviceMemoryAllocator> GetAllocator( DeviceBase* device, se::Stream* stream, const XlaPlatformInfo& platform_info); } #endif #include "tensorflow/compiler/jit/xla_platform_info.h" #include <memory> #include <optional> #include <set> #include <string> #include <utility> #include <vector> #include "absl/algorithm/container.h" #include "absl/status/status.h" #include "absl/strings/str_split.h" #include "absl/strings/string_view.h" #include "tensorflow/compiler/jit/device_executable_persistor.h" #include "tensorflow/compiler/jit/flags.h" #include "tensorflow/compiler/jit/pjrt_device_compiler_client.h" #include "tensorflow/compiler/jit/xla_compile_util.h" #include "tensorflow/compiler/jit/xla_device_compiler_client.h" #include "xla/client/client_library.h" #include "xla/client/local_client.h" #include "xla/pjrt/pjrt_client.h" #include "xla/service/compiler.h" #include "xla/stream_executor/platform_manager.h" #include "xla/tsl/framework/device_type.h" #include "tensorflow/core/framework/function.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/framework/resource_mgr.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/status.h" #include "tensorflow/core/tfrt/common/create_pjrt_client_util.h" #include "tensorflow/core/tfrt/common/global_state.h" #include "tensorflow/core/tfrt/common/pjrt_util.h" #include "tensorflow/core/tpu/tpu_defs.h" namespace tensorflow { namespace { using XlaDeviceCompiler = DeviceCompiler<xla::LocalExecutable, xla::LocalClient>; using PjRtDeviceCompiler = DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>; using XlaDeviceExecutablePersistor = DeviceExecutablePersistor<xla::LocalExecutable, xla::LocalClient>; using PjRtDeviceExecutablePersistor = DeviceExecutablePersistor<xla::PjRtLoadedExecutable, xla::PjRtClient>; XlaDeviceCompiler* CreateXlaDeviceCompiler( const XlaDeviceExecutablePersistor::Config& persistor_config, DeviceType compilation_device_type, xla::LocalClient* local_client) { return new XlaDeviceCompiler( std::make_unique<XlaDeviceExecutablePersistor>( std::move(persistor_config), compilation_device_type), std::make_unique<XlaDeviceCompilerClient>(local_client)); } PjRtDeviceCompiler* CreatePjRtDeviceCompiler(DeviceType compilation_device_type, xla::PjRtClient* pjrt_client) { std::string persistent_cache_directory = GetPersistentCacheDirectory(compilation_device_type); PjRtDeviceExecutablePersistor::Config persistor_config( persistent_cache_directory, GetMarkForCompilationPassFlags()->tf_xla_disable_strict_signature_checks, GetMarkForCompilationPassFlags()->tf_xla_persistent_cache_prefix, GetMarkForCompilationPassFlags()->tf_xla_persistent_cache_read_only); return new PjRtDeviceCompiler( std::make_unique<PjRtDeviceExecutablePersistor>( std::move(persistor_config), compilation_device_type), std::make_unique<PjRtDeviceCompilerClient>(pjrt_client)); } absl::StatusOr<std::optional<std::set<int>>> GetAllowedGpus( FunctionLibraryRuntime* flr) { std::optional<std::set<int>> gpu_ids = std::nullopt; if (flr->config_proto()) { string allowed_gpus = flr->config_proto()->gpu_options().visible_device_list(); TF_ASSIGN_OR_RETURN(gpu_ids, ParseVisibleDeviceList(allowed_gpus)); } return gpu_ids; } Status GetCompilationDeviceTypeAndPjRtClient( const XlaPlatformInfo& platform_info, FunctionLibraryRuntime* flr, DeviceType* compilation_device_type, xla::PjRtClient** pjrt_client) { DeviceType device_type = platform_info.device_type(); if (platform_info.xla_device_metadata()) { VLOG(2) << "Building PjRtDeviceCompiler using " "platform_info.xla_device_metadata()."; *compilation_device_type = platform_info.xla_device_metadata()->jit_device_type(); TF_ASSIGN_OR_RETURN(*pjrt_client, GetOrCreatePjRtClient(device_type)); return absl::OkStatus(); } if (platform_info.pjrt_device_metadata()) { VLOG(2) << "Building PjRtDeviceCompiler using " "platform_info.pjrt_device_metadata()."; *compilation_device_type = platform_info.pjrt_device_metadata()->jit_device_type(); TF_ASSIGN_OR_RETURN(*pjrt_client, GetOrCreatePjRtClient(device_type)); return absl::OkStatus(); } if (device_type == DEVICE_TPU) { *compilation_device_type = DeviceType(DEVICE_TPU_XLA_JIT); TF_ASSIGN_OR_RETURN(*pjrt_client, GetOrCreatePjRtClient(device_type)); return absl::OkStatus(); } VLOG(2) << "platform_info.xla_device_metadata not found and " "platform_info.device_type() != DEVICE_TPU. Building " "PjRtDeviceCompiler for non-XLA device."; const XlaOpRegistry::DeviceRegistration* registration; if (!XlaOpRegistry::GetCompilationDevice(device_type.type(), &registration)) { return errors::InvalidArgument("No JIT device registered for ", device_type.type()); } *compilation_device_type = DeviceType(registration->compilation_device_name); TF_ASSIGN_OR_RETURN(auto allowed_gpus, GetAllowedGpus(flr)); TF_ASSIGN_OR_RETURN(*pjrt_client, GetOrCreatePjRtClient(device_type, allowed_gpus)); return absl::OkStatus(); } } std::string GetPersistentCacheDirectory( const DeviceType& compilation_device_type) { if (!GetMarkForCompilationPassFlags() ->tf_xla_persistent_cache_device_types.empty() && !absl::c_any_of(absl::StrSplit(GetMarkForCompilationPassFlags() ->tf_xla_persistent_cache_device_types, ','), [&](absl::string_view device) { return compilation_device_type == DeviceType(device); })) { return ""; } return GetMarkForCompilationPassFlags()->tf_xla_persistent_cache_directory; } absl::StatusOr<std::optional<std::set<int>>> ParseVisibleDeviceList( absl::string_view visible_device_list) { std::set<int> gpu_ids; if (visible_device_list.empty()) { return {{std::nullopt}}; } const std::vector<string> visible_devices = absl::StrSplit(visible_device_list, ','); for (const string& platform_device_id_str : visible_devices) { int32_t platform_device_id; if (!absl::SimpleAtoi(platform_device_id_str, &platform_device_id)) { return errors::InvalidArgument( "Could not parse entry in 'visible_device_list': '", platform_device_id_str, "'. visible_device_list = ", visible_device_list); } gpu_ids.insert(platform_device_id); } return {{gpu_ids}}; } absl::StatusOr<DeviceType> GetCompilationDeviceType( const DeviceType& platform_device_type) { DeviceType compilation_device_type = platform_device_type; const XlaOpRegistry::DeviceRegistration* registration = nullptr; if (!XlaOpRegistry::GetCompilationDevice(platform_device_type.type(), &registration)) { return errors::InvalidArgument("No JIT device registered for ", platform_device_type.type()); } compilation_device_type = DeviceType(registration->compilation_device_name); return compilation_device_type; } Status BuildXlaDeviceCompiler(DeviceBase* device, FunctionLibraryRuntime* flr, const XlaPlatformInfo& platform_info, DeviceType compilation_device_type, XlaDeviceCompiler** xla_device_compiler) { if (platform_info.platform_id() == nullptr && platform_info.device_type() == DEVICE_GPU) { *xla_device_compiler = new XlaDeviceCompiler(nullptr, nullptr); return absl::OkStatus(); } std::string persistent_cache_directory = GetPersistentCacheDirectory(platform_info.device_type()); XlaDeviceExecutablePersistor::Config persistor_config( persistent_cache_directory, GetMarkForCompilationPassFlags()->tf_xla_disable_strict_signature_checks, GetMarkForCompilationPassFlags()->tf_xla_persistent_cache_prefix, GetMarkForCompilationPassFlags()->tf_xla_persistent_cache_read_only); if (platform_info.xla_device_metadata()) { *xla_device_compiler = CreateXlaDeviceCompiler( persistor_config, platform_info.xla_device_metadata()->jit_device_type(), platform_info.xla_device_metadata()->client()); return absl::OkStatus(); } if (platform_info.device_type() == DEVICE_TPU) { *xla_device_compiler = CreateXlaDeviceCompiler( persistor_config, DeviceType(DEVICE_TPU_XLA_JIT), nullptr); return absl::OkStatus(); } if (platform_info.platform_id() == nullptr) { return errors::InvalidArgument("platform_id is null."); } auto platform = se::PlatformManager::PlatformWithId(platform_info.platform_id()); if (!platform.ok()) { return platform.status(); } absl::StatusOr<xla::Compiler*> compiler_for_platform = xla::Compiler::GetForPlatform(platform.value()); if (!compiler_for_platform.ok()) { const Status& status = compiler_for_platform.status(); if (status.code() == error::NOT_FOUND) { return errors::Unimplemented("Could not find compiler for platform ", platform.value()->Name(), ": ", status.ToString()); } } xla::LocalClientOptions client_options; client_options.set_platform(platform.value()); if (device != nullptr) { client_options.set_intra_op_parallelism_threads( device->tensorflow_cpu_worker_threads()->num_threads); } if (flr != nullptr) { TF_ASSIGN_OR_RETURN(auto allowed_gpus, GetAllowedGpus(flr)); client_options.set_allowed_devices(allowed_gpus); } TF_ASSIGN_OR_RETURN( auto client, xla::ClientLibrary::GetOrCreateLocalClient(client_options)); *xla_device_compiler = CreateXlaDeviceCompiler( persistor_config, compilation_device_type, client); return absl::OkStatus(); } Status GetOrCreatePjRtDeviceCompilerAndProfiler( const XlaPlatformInfo& platform_info, ResourceMgr* rm, FunctionLibraryRuntime* flr, PjRtDeviceCompiler** pjrt_device_compiler, DeviceCompilationProfiler** profiler) { const auto& device_type = platform_info.device_type(); const std::string& compiler_name = GetPjRtDeviceCompilerResourceName(device_type); const std::string& profiler_name = GetPjRtDeviceCompilationProfilerResourceName(device_type); bool deleted_old_device_compiler = false; Status s = rm->Lookup<PjRtDeviceCompiler>( rm->default_container(), compiler_name, pjrt_device_compiler); if (s.ok() && device_type == DEVICE_TPU) { auto* existing_pjrt_client = (*pjrt_device_compiler)->client(); TF_ASSIGN_OR_RETURN(auto* latest_pjrt_client, GetPjRtClient(device_type)); if (existing_pjrt_client != latest_pjrt_client) { TF_RETURN_IF_ERROR(rm->Delete<PjRtDeviceCompiler>(rm->default_container(), compiler_name)); TF_RETURN_IF_ERROR(rm->Delete<DeviceCompilationProfiler>( rm->default_container(), profiler_name)); deleted_old_device_compiler = true; } } if (!s.ok() || deleted_old_device_compiler) { DeviceType compilation_device_type(""); xla::PjRtClient* pjrt_client = nullptr; TF_RETURN_IF_ERROR(GetCompilationDeviceTypeAndPjRtClient( platform_info, flr, &compilation_device_type, &pjrt_client)); TF_RETURN_IF_ERROR(rm->LookupOrCreate<PjRtDeviceCompiler>( rm->default_container(), compiler_name, pjrt_device_compiler, [&](PjRtDeviceCompiler** pjrt_device_compiler) { *pjrt_device_compiler = CreatePjRtDeviceCompiler(compilation_device_type, pjrt_client); return absl::OkStatus(); })); } TF_RETURN_IF_ERROR(rm->LookupOrCreate<DeviceCompilationProfiler>( rm->default_container(), profiler_name, profiler, [](DeviceCompilationProfiler** profiler) { *profiler = new DeviceCompilationProfiler(); return absl::OkStatus(); })); return absl::OkStatus(); } Status GetOrCreatePjRtDeviceCompilerAndProfiler( const OpKernelContext& ctx, const XlaPlatformInfo& platform_info, FunctionLibraryRuntime* flr, DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>** pjrt_device_compiler, DeviceCompilationProfiler** profiler) { TF_ASSIGN_OR_RETURN(ResourceMgr * rm, GetResourceMgrForDeviceCompiler( ctx, platform_info.device_type())); return GetOrCreatePjRtDeviceCompilerAndProfiler( platform_info, rm, flr, pjrt_device_compiler, profiler); } XlaPlatformInfo XlaPlatformInfoFromDevice(DeviceBase* device_base) { se::Platform::Id platform_id = nullptr; const XlaDevice::Metadata* xla_device_metadata = nullptr; const PjRtBaseDevice::Metadata* pjrt_device_metadata = nullptr; std::shared_ptr<se::DeviceMemoryAllocator> custom_allocator; const std::string& device_type = device_base->device_type(); if (device_type == DEVICE_CPU) { platform_id = se::host::kHostPlatformId; } else if (device_type == DEVICE_GPU) { auto device = static_cast<Device*>(device_base); platform_id = device->tensorflow_accelerator_device_info() ->stream->parent() ->GetPlatform() ->id(); } else if (XlaDevice::GetMetadataFromDevice(device_base, &xla_device_metadata) .ok()) { platform_id = xla_device_metadata->platform()->id(); custom_allocator = xla_device_metadata->client()->backend().shared_memory_allocator(); } else if (auto metadata = PjRtBaseDevice::GetMetadataFromDevice(device_base); metadata.ok()) { pjrt_device_metadata = *metadata; } return XlaPlatformInfo(DeviceType(device_type), platform_id, xla_device_metadata, pjrt_device_metadata, custom_allocator); } std::shared_ptr<se::DeviceMemoryAllocator> GetAllocator( DeviceBase* device, se::Stream* stream, const XlaPlatformInfo& platform_info) { if (platform_info.custom_allocator()) { return platform_info.custom_allocator(); } auto* alloc = device->GetAllocator({}); if (!stream) { se::Platform* platform = se::PlatformManager::PlatformWithId(platform_info.platform_id()) .value(); return std::make_shared<se::TfAllocatorAdapter>(alloc, platform); } return std::make_shared<se::TfAllocatorAdapter>(alloc, stream); } }
#include "tensorflow/compiler/jit/xla_platform_info.h" #include <memory> #include <vector> #include <gtest/gtest.h> #include "tensorflow/compiler/jit/flags.h" #include "tensorflow/compiler/jit/test_util.h" #include "xla/pjrt/tfrt_cpu_pjrt_client.h" #include "tensorflow/core/framework/device_base.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/refcount.h" #include "tensorflow/core/platform/status_matchers.h" #include "tensorflow/core/platform/statusor.h" #include "tensorflow/core/protobuf/error_codes.pb.h" #include "tensorflow/core/tfrt/common/create_pjrt_client_util.h" #include "tensorflow/core/tfrt/common/pjrt_util.h" #include "tensorflow/core/tpu/tpu_defs.h" namespace tensorflow { namespace { using XlaDeviceCompiler = DeviceCompiler<xla::LocalExecutable, xla::LocalClient>; using PjRtDeviceCompiler = DeviceCompiler<xla::PjRtLoadedExecutable, xla::PjRtClient>; class XlaPlatformInfoTest : public ::testing::Test { protected: void SetUp() override { tensorflow::GetXlaDeviceFlags()->tf_xla_enable_xla_devices = true; tensorflow::GetMarkForCompilationPassFlags() ->tf_xla_persistent_cache_directory = ""; tensorflow::GetMarkForCompilationPassFlags() ->tf_xla_persistent_cache_device_types = ""; } DeviceSetup device_setup_; }; class StubDevice : public DeviceBase { public: StubDevice() : DeviceBase(nullptr) {} }; #if GOOGLE_CUDA || TENSORFLOW_USE_ROCM TEST_F(XlaPlatformInfoTest, BuildXlaDeviceCompilerXlaDeviceMetadata) { device_setup_.AddDevicesAndSetUp({DEVICE_XLA_GPU}); Device* device = device_setup_.GetDevice(DEVICE_XLA_GPU); const XlaDevice::Metadata* metadata = nullptr; TF_CHECK_OK(XlaDevice::GetMetadataFromDevice(device, &metadata)); XlaPlatformInfo platform_info = XlaPlatformInfoFromDevice(device); TF_ASSERT_OK_AND_ASSIGN( DeviceType compilation_device_type, GetCompilationDeviceType(platform_info.device_type())); XlaDeviceCompiler* xla_device_compiler = nullptr; TF_EXPECT_OK(BuildXlaDeviceCompiler(device, device_setup_.flr(), platform_info, compilation_device_type, &xla_device_compiler)); core::ScopedUnref xla_device_compiler_ref(xla_device_compiler); EXPECT_EQ(xla_device_compiler->device_type(), metadata->jit_device_type()); EXPECT_EQ(xla_device_compiler->client(), metadata->client()); } TEST_F(XlaPlatformInfoTest, BuildXlaDeviceCompilerXlaDeviceCacheEnabled) { tensorflow::GetMarkForCompilationPassFlags() ->tf_xla_persistent_cache_directory = "/tmp/xla_cache"; tensorflow::GetMarkForCompilationPassFlags() ->tf_xla_persistent_cache_device_types = DEVICE_XLA_GPU; device_setup_.AddDevicesAndSetUp({DEVICE_XLA_GPU}); Device* device = device_setup_.GetDevice(DEVICE_XLA_GPU); const XlaDevice::Metadata* metadata = nullptr; TF_CHECK_OK(XlaDevice::GetMetadataFromDevice(device, &metadata)); XlaPlatformInfo platform_info = XlaPlatformInfoFromDevice(device); TF_ASSERT_OK_AND_ASSIGN( DeviceType compilation_device_type, GetCompilationDeviceType(platform_info.device_type())); XlaDeviceCompiler* xla_device_compiler = nullptr; TF_EXPECT_OK(BuildXlaDeviceCompiler(device, device_setup_.flr(), platform_info, compilation_device_type, &xla_device_compiler)); core::ScopedUnref xla_device_compiler_ref(xla_device_compiler); EXPECT_EQ(xla_device_compiler->device_type(), metadata->jit_device_type()); EXPECT_EQ(xla_device_compiler->client(), metadata->client()); EXPECT_EQ(xla_device_compiler->persistor()->persistent_cache_directory(), "/tmp/xla_cache"); } TEST_F(XlaPlatformInfoTest, BuildXlaDeviceCompilerNonXlaDevice) { device_setup_.AddDevicesAndSetUp({DEVICE_GPU}); Device* device = device_setup_.GetDevice(DEVICE_GPU); XlaPlatformInfo platform_info = XlaPlatformInfoFromDevice(device); TF_ASSERT_OK_AND_ASSIGN( DeviceType compilation_device_type, GetCompilationDeviceType(platform_info.device_type())); XlaDeviceCompiler* xla_device_compiler = nullptr; TF_EXPECT_OK(BuildXlaDeviceCompiler(device, device_setup_.flr(), platform_info, compilation_device_type, &xla_device_compiler)); core::ScopedUnref xla_device_compiler_ref(xla_device_compiler); EXPECT_EQ(xla_device_compiler->device_type(), DeviceType(DEVICE_GPU_XLA_JIT)); EXPECT_TRUE(xla_device_compiler->client() != nullptr); } TEST_F(XlaPlatformInfoTest, GetOrCreatePjRtDeviceCompilerAndProfilerXlaDevice) { DeviceType device_type = DeviceType(DEVICE_XLA_GPU); device_setup_.AddDevicesAndSetUp({device_type.type()}); Device* device = device_setup_.GetDevice(device_type.type()); const XlaDevice::Metadata* metadata = nullptr; TF_CHECK_OK(XlaDevice::GetMetadataFromDevice(device, &metadata)); XlaPlatformInfo platform_info = XlaPlatformInfoFromDevice(device); ResourceMgr resource_mgr(""); OpKernelContext::Params params; params.resource_manager = &resource_mgr; params.device = device; OpKernelContext ctx(&params, 0); PjRtDeviceCompiler* pjrt_device_compiler = nullptr; DeviceCompilationProfiler* profiler = nullptr; TF_EXPECT_OK(GetOrCreatePjRtDeviceCompilerAndProfiler( ctx, platform_info, device_setup_.flr(), &pjrt_device_compiler, &profiler)); core::ScopedUnref pjrt_device_compiler_ref(pjrt_device_compiler); core::ScopedUnref profiler_ref(profiler); TF_ASSERT_OK_AND_ASSIGN(auto pjrt_client, GetOrCreatePjRtClient(device_type)); EXPECT_EQ(pjrt_device_compiler->device_type(), metadata->jit_device_type()); EXPECT_EQ(pjrt_device_compiler->client(), pjrt_client); } TEST_F(XlaPlatformInfoTest, GetOrCreatePjRtDeviceCompilerAndProfilerGpuDeviceCacheEnabled) { tensorflow::GetMarkForCompilationPassFlags() ->tf_xla_persistent_cache_directory = "/tmp/xla_cache"; tensorflow::GetMarkForCompilationPassFlags() ->tf_xla_persistent_cache_device_types = DEVICE_GPU_XLA_JIT; device_setup_.AddDevicesAndSetUp({DEVICE_GPU}); Device* device = device_setup_.GetDevice(DEVICE_GPU); XlaPlatformInfo platform_info = XlaPlatformInfoFromDevice(device); ResourceMgr resource_mgr(""); OpKernelContext::Params params; params.resource_manager = &resource_mgr; params.device = device; OpKernelContext ctx(&params, 0); PjRtDeviceCompiler* pjrt_device_compiler = nullptr; DeviceCompilationProfiler* profiler = nullptr; TF_EXPECT_OK(GetOrCreatePjRtDeviceCompilerAndProfiler( ctx, platform_info, device_setup_.flr(), &pjrt_device_compiler, &profiler)); EXPECT_EQ(pjrt_device_compiler->persistor()->persistent_cache_directory(), "/tmp/xla_cache"); core::ScopedUnref pjrt_device_compiler_ref(pjrt_device_compiler); core::ScopedUnref profiler_ref(profiler); } #endif TEST_F(XlaPlatformInfoTest, BuildXlaDeviceCompilerTpuDevice) { DeviceType compilation_device_type = DeviceType(DEVICE_TPU_XLA_JIT); Device* device = nullptr; XlaPlatformInfo platform_info(DeviceType(DEVICE_TPU), nullptr, nullptr, nullptr, nullptr); XlaDeviceCompiler* xla_device_compiler = nullptr; TF_EXPECT_OK(BuildXlaDeviceCompiler(device, nullptr, platform_info, compilation_device_type, &xla_device_compiler)); core::ScopedUnref xla_device_compiler_ref(xla_device_compiler); EXPECT_EQ(xla_device_compiler->device_type(), compilation_device_type); EXPECT_EQ(xla_device_compiler->client(), nullptr); } TEST_F(XlaPlatformInfoTest, BuildXlaDeviceCompilerNoCompilationCache) { DeviceType compilation_device_type = DeviceType(DEVICE_TPU_XLA_JIT); tensorflow::GetMarkForCompilationPassFlags() ->tf_xla_persistent_cache_directory = "/tmp/xla_cache"; tensorflow::GetMarkForCompilationPassFlags() ->tf_xla_persistent_cache_device_types = DEVICE_XLA_GPU; Device* device = nullptr; XlaPlatformInfo platform_info(DeviceType(DEVICE_TPU), nullptr, nullptr, nullptr, nullptr); XlaDeviceCompiler* xla_device_compiler = nullptr; TF_EXPECT_OK(BuildXlaDeviceCompiler(device, nullptr, platform_info, compilation_device_type, &xla_device_compiler)); core::ScopedUnref xla_device_compiler_ref(xla_device_compiler); EXPECT_EQ(xla_device_compiler->device_type(), compilation_device_type); EXPECT_TRUE( xla_device_compiler->persistor()->persistent_cache_directory().empty()); } TEST_F(XlaPlatformInfoTest, GetOrCreatePjRtDeviceCompilerAndProfilerTpuDeviceNoCompilationCache) { tensorflow::GetMarkForCompilationPassFlags() ->tf_xla_persistent_cache_directory = "/tmp/xla_cache"; tensorflow::GetMarkForCompilationPassFlags() ->tf_xla_persistent_cache_device_types = DEVICE_GPU_XLA_JIT; DeviceType device_type = DeviceType(DEVICE_TPU); DeviceType compilation_device_type = DeviceType(DEVICE_TPU_XLA_JIT); TF_CHECK_OK(SetPjRtClientInTFGlobalResourceManager( device_type, xla::GetTfrtCpuClient(true, 1) .value())); TF_ASSERT_OK_AND_ASSIGN(auto pjrt_client, GetOrCreatePjRtClient(device_type)); XlaPlatformInfo platform_info(device_type, nullptr, nullptr, nullptr, nullptr); OpKernelContext::Params params; StubDevice stub_device; params.device = &stub_device; OpKernelContext ctx(&params, 0); PjRtDeviceCompiler* pjrt_device_compiler = nullptr; DeviceCompilationProfiler* profiler = nullptr; TF_EXPECT_OK(GetOrCreatePjRtDeviceCompilerAndProfiler( ctx, platform_info, nullptr, &pjrt_device_compiler, &profiler)); core::ScopedUnref pjrt_device_compiler_ref(pjrt_device_compiler); core::ScopedUnref profiler_ref(profiler); EXPECT_EQ(pjrt_device_compiler->device_type(), compilation_device_type); EXPECT_EQ(pjrt_device_compiler->client(), pjrt_client); EXPECT_TRUE( pjrt_device_compiler->persistor()->persistent_cache_directory().empty()); } TEST_F(XlaPlatformInfoTest, GetPersistentCacheDirectoryMultiple) { tensorflow::GetMarkForCompilationPassFlags() ->tf_xla_persistent_cache_directory = "/tmp/xla_cache"; tensorflow::GetMarkForCompilationPassFlags() ->tf_xla_persistent_cache_device_types = "GPU,CPU"; DeviceType device_gpu = DeviceType(DEVICE_GPU); EXPECT_EQ(GetPersistentCacheDirectory(device_gpu), "/tmp/xla_cache"); DeviceType device_cpu = DeviceType(DEVICE_CPU); EXPECT_EQ(GetPersistentCacheDirectory(device_cpu), "/tmp/xla_cache"); DeviceType device_tpu = DeviceType(DEVICE_TPU); EXPECT_TRUE(GetPersistentCacheDirectory(device_tpu).empty()); } TEST_F(XlaPlatformInfoTest, GetPersistentCacheDirectoryNoDeviceTypes) { tensorflow::GetMarkForCompilationPassFlags() ->tf_xla_persistent_cache_directory = "/tmp/xla_cache"; tensorflow::GetMarkForCompilationPassFlags() ->tf_xla_persistent_cache_device_types = ""; DeviceType device_gpu = DeviceType(DEVICE_GPU); EXPECT_EQ(GetPersistentCacheDirectory(device_gpu), "/tmp/xla_cache"); DeviceType device_cpu = DeviceType(DEVICE_CPU); EXPECT_EQ(GetPersistentCacheDirectory(device_cpu), "/tmp/xla_cache"); DeviceType device_tpu = DeviceType(DEVICE_TPU); EXPECT_EQ(GetPersistentCacheDirectory(device_tpu), "/tmp/xla_cache"); } } }
1,093
cpp
tensorflow/tensorflow
tf2xla_util
tensorflow/compiler/tf2xla/tf2xla_util.cc
tensorflow/compiler/tf2xla/tf2xla_util_test.cc
#ifndef TENSORFLOW_COMPILER_TF2XLA_TF2XLA_UTIL_H_ #define TENSORFLOW_COMPILER_TF2XLA_TF2XLA_UTIL_H_ #include <unordered_map> #include "absl/types/optional.h" #include "tensorflow/compiler/tf2xla/tf2xla.pb.h" #include "tensorflow/compiler/tf2xla/tf2xla_defs.h" #include "xla/status_macros.h" #include "tensorflow/core/framework/function.h" #include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/framework/kernel_def.pb.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/graph/graph.h" #include "tensorflow/core/lib/core/status.h" namespace tensorflow { Status ValidateConfig(const tf2xla::Config& config); Status AddPlaceholdersForFeeds( const tf2xla::Config& config, const OpRegistryInterface* op_registry, std::unordered_map<string, string>* feed_remapping, GraphDef* graph_def); Status PruneGraphDefInto(const tf2xla::Config& config, const GraphDef& in, GraphDef* out); string TensorIdToString(const tf2xla::TensorId& id); Status SetNodeShardingFromNeighbors(Node* n, bool out_edges); void AddDtypeToKernelDefConstraint(absl::string_view name, DataType dtype, KernelDef* kdef); uint32 GetXLARandomSeed(); class AssociatedFunctionInfo { public: enum AssociatedFunctionType { kFunctionAttr = 0, kFunctionCallNode = 1, kSymbolicGradient = 2, }; static AssociatedFunctionInfo FunctionAttr(const string& func_name, const AttrValueMap& attrs, const string& attr_name) { return AssociatedFunctionInfo(kFunctionAttr, func_name, attrs, attr_name); } static AssociatedFunctionInfo FunctionCall(const string& func_name, const AttrValueMap& attrs) { return AssociatedFunctionInfo(kFunctionCallNode, func_name, attrs, ""); } static AssociatedFunctionInfo SymbolicGradient(const string& func_name, const AttrValueMap& attrs) { return AssociatedFunctionInfo(kSymbolicGradient, func_name, attrs, ""); } AssociatedFunctionType type() const { return type_; } const string& func_name() const { return func_name_; } const string& attr_name() const { return attr_name_; } const AttrValueMap& attrs() const { return attrs_; } private: AssociatedFunctionInfo(AssociatedFunctionType type, const string& func_name, const AttrValueMap& attrs, const string& attr_name) : type_(type), func_name_(func_name), attrs_(attrs), attr_name_(attr_name) {} AssociatedFunctionType type_; string func_name_; AttrValueMap attrs_; string attr_name_; }; bool HasAssociatedFunction(const NodeDef& node_def, const FunctionLibraryDefinition* fld); std::vector<AssociatedFunctionInfo> GetAssociatedFunctions( const Node& node, const FunctionLibraryDefinition* fld); Status RewriteAssociatedFunction( Graph* graph, Node* node, FunctionLibraryDefinition* fld, const AssociatedFunctionInfo& associated_function, const string& rewritten_function_name); class CachedFunctionHandles { public: CachedFunctionHandles(FunctionLibraryRuntime* flr) : flr_(flr) {} Status GetOrInstantiate(const string& func_name, AttrSlice attrs, FunctionLibraryRuntime::Handle* handle); Status ReleaseAllHandles(); ~CachedFunctionHandles() { ReleaseAllHandles().IgnoreError(); } private: FunctionLibraryRuntime* flr_; std::map<string, FunctionLibraryRuntime::Handle> handles_; CachedFunctionHandles(const CachedFunctionHandles&) = delete; void operator=(const CachedFunctionHandles&) = delete; }; struct OutEdgeInfo { Node* dst; int src_output, dst_input; }; absl::StatusOr<Node*> ReplaceNode(Graph* g, Node* n, const NodeDef& node_def); absl::StatusOr<Node*> BuildIdentityNode(Graph* graph, const string& node_name, DataType dtype, const Node* input, std::optional<string> requested_device); Status PropagateConstIntoFunctionalNodes( Graph* g, const FunctionLibraryDefinition* lookup_fld, FunctionLibraryDefinition* fld); Status PruneUnreachableFunctionsFromGraph(const Graph& g, FunctionLibraryDefinition* fld); Status RewriteTensorListWithConstElement(Graph* g, FunctionLibraryDefinition* fld); inline bool IsConstTraversableOpType(const Node* node) { return node->type_string() == "Identity" || node->type_string() == "IdentityN" || node->IsWhileNode(); } absl::StatusOr<bool> IsLoopInvariant( const FunctionBody* loop_body, int index, const FunctionLibraryDefinition* lookup_fld); } #endif #include "tensorflow/compiler/tf2xla/tf2xla_util.h" #include <functional> #include <queue> #include <random> #include <set> #include <unordered_map> #include "absl/container/flat_hash_map.h" #include "absl/strings/str_cat.h" #include "tensorflow/compiler/tf2xla/sharding_util.h" #include "tensorflow/compiler/tf2xla/tf2xla.pb.h" #include "xla/xla_data.pb.h" #include "tensorflow/core/common_runtime/function.h" #include "tensorflow/core/common_runtime/function_body.h" #include "tensorflow/core/framework/function.h" #include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/framework/graph_def_util.h" #include "tensorflow/core/framework/graph_to_functiondef.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/node_def_util.h" #include "tensorflow/core/framework/op_def_builder.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_shape.pb.h" #include "tensorflow/core/framework/versions.pb.h" #include "tensorflow/core/graph/tensor_id.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/platform/errors.h" namespace tensorflow { namespace { Status ValidateTensorId(const tf2xla::TensorId& id) { if (id.node_name().empty()) { return errors::InvalidArgument("TensorId node_name must be non-empty"); } if (id.output_index() < 0) { return errors::InvalidArgument("TensorId output_index must be positive"); } return absl::OkStatus(); } Status CheckNameDuplicates(const string& kind, const string& name, std::set<string>* names) { if (!name.empty()) { if (!names->insert(name).second) { return errors::InvalidArgument("duplicate ", kind, " name: ", name); } } return absl::OkStatus(); } Status CheckFeedFetchNameConflicts(const string& kind, const std::set<string>& names) { for (const string& name : names) { const string name_data(name + "_data"); if (names.find(name_data) != names.end()) { return errors::InvalidArgument("conflicting ", kind, " name: ", name, " and ", name_data); } } return absl::OkStatus(); } Status CopyAssociatedFunctions(Graph* g, const FunctionLibraryDefinition* lookup_fld, FunctionLibraryDefinition* fld) { for (Node* n : g->op_nodes()) { for (const auto& associated_function : GetAssociatedFunctions(*n, lookup_fld)) { switch (associated_function.type()) { case AssociatedFunctionInfo::kFunctionCallNode: { const FunctionDef* fdef = lookup_fld->Find(associated_function.func_name()); if (!fdef) { return errors::Internal( "Cannot find function ", associated_function.func_name(), " for function call node ", n->DebugString()); } TF_RETURN_IF_ERROR(fld->AddFunctionDef(*fdef)); break; } case AssociatedFunctionInfo::kSymbolicGradient: case AssociatedFunctionInfo::kFunctionAttr: break; } } } return absl::OkStatus(); } absl::StatusOr<Node*> ReplaceEdge(Graph* g, Node* dst, int dst_input, Node* with, int with_output) { NodeDef replace_def = dst->def(); *replace_def.mutable_input(dst_input) = with->name(); TF_ASSIGN_OR_RETURN(Node * replace_node, ReplaceNode(g, dst, replace_def)); const Edge* usage_edge; TF_RETURN_IF_ERROR(replace_node->input_edge(dst_input, &usage_edge)); g->RemoveEdge(usage_edge); g->AddEdge(with, with_output, replace_node, dst_input); return replace_node; } Status ReplaceSrcOutputUsageWithNode(Graph* g, Node* src, int src_output, Node* replacement) { VLOG(1) << "Replace usages of output " << src_output << " of node " << (VLOG_IS_ON(3) ? src->DebugString() : src->name()) << " with " << (VLOG_IS_ON(3) ? replacement->DebugString() : replacement->name()); struct OutEdgeInfo { int dst_node_id, dst_input; }; std::vector<OutEdgeInfo> usages; for (const Edge* e : src->out_edges()) { if (e->IsControlEdge() || e->src_output() != src_output) { continue; } usages.push_back({e->dst()->id(), e->dst_input()}); } for (int i = 0, end = usages.size(); i < end; i++) { Node* usage_node = g->FindNodeId(usages[i].dst_node_id); VLOG(2) << " Replace usage by " << usage_node->DebugString(); TF_ASSIGN_OR_RETURN( Node * replace_node, ReplaceEdge(g, usage_node, usages[i].dst_input, replacement, 0)); for (int j = i + 1, end = usages.size(); j < end; j++) { if (usages[j].dst_node_id == usages[i].dst_node_id) { usages[j].dst_node_id = replace_node->id(); } } } return absl::OkStatus(); } Status ReplaceArgUsageWithConstNode( Graph* g, const absl::flat_hash_map<int, const Node*>& const_input_index_to_node) { absl::flat_hash_map<int, Node*> arg_nodes; for (Node* n : g->op_nodes()) { if (n->IsArg()) { int index; TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "index", &index)); arg_nodes[index] = n; } } for (const auto& iter : const_input_index_to_node) { int arg_index = iter.first; VLOG(2) << "Replace usages of _Arg " << arg_index; NodeDef const_def = iter.second->def(); const_def.set_name(g->NewName(const_def.name())); TF_ASSIGN_OR_RETURN(Node * const_node, g->AddNode(const_def)); Node* arg_node = arg_nodes[arg_index]; TF_RETURN_IF_ERROR( ReplaceSrcOutputUsageWithNode(g, arg_node, 0, const_node)); } return absl::OkStatus(); } Status ReplaceRetvalInputWithArg( Graph* g, const absl::flat_hash_map<int, const Node*>& const_input_index_to_node) { absl::flat_hash_map<int, Node*> arg_nodes; absl::flat_hash_map<int, Node*> ret_nodes; for (Node* n : g->op_nodes()) { if (n->IsRetval() || n->IsArg()) { int index; TF_RETURN_IF_ERROR(GetNodeAttr(n->attrs(), "index", &index)); if (n->IsRetval()) { ret_nodes[index] = n; } else { arg_nodes[index] = n; } } } for (const auto& iter : const_input_index_to_node) { int arg_index = iter.first; VLOG(2) << "Bind _Retval " << arg_index << " to _Arg " << arg_index; TF_RETURN_IF_ERROR( ReplaceEdge(g, ret_nodes[arg_index], 0, arg_nodes[arg_index], 0) .status()); } return absl::OkStatus(); } Status PropagateConstIntoFuncAttr( Node* n, const string& attr_name, const absl::flat_hash_map<int, const Node*>& const_input_index_to_node, const FunctionLibraryDefinition* lookup_fld, FunctionLibraryDefinition* fld, bool passthrough_arg_to_retval = false) { VLOG(1) << "Propagate const into " << attr_name << " of node " << n->name(); NameAttrList func_attr; TF_RETURN_IF_ERROR(GetNodeAttr(n->def(), attr_name, &func_attr)); const FunctionDef* fdef = lookup_fld->Find(func_attr.name()); if (!fdef) { return errors::Internal("Cannot find function ", func_attr.name(), " for node ", n->name()); } std::unique_ptr<FunctionBody> fbody; TF_RETURN_IF_ERROR(FunctionDefToBodyHelper( *fdef, AttrSlice(&func_attr.attr()), lookup_fld, &fbody)); Graph* func_graph = fbody->graph; TF_RETURN_IF_ERROR( ReplaceArgUsageWithConstNode(func_graph, const_input_index_to_node)); if (passthrough_arg_to_retval) { TF_RETURN_IF_ERROR( ReplaceRetvalInputWithArg(func_graph, const_input_index_to_node)); } FunctionDef replace_fdef; string new_func_name = fld->UniqueFunctionName(absl::StrCat(func_attr.name(), "_const_")); const StackTracesMap* stack_traces = lookup_fld->GetStackTraces(func_attr.name()); TF_RETURN_IF_ERROR( GraphToFunctionDef(*func_graph, new_func_name, &replace_fdef)); if (stack_traces != nullptr) { TF_RETURN_IF_ERROR(fld->AddFunctionDef(replace_fdef, *stack_traces)); } else { TF_RETURN_IF_ERROR(fld->AddFunctionDef(replace_fdef, {})); } VLOG(1) << "replace func " << func_attr.name() << " with " << new_func_name; func_attr.set_name(new_func_name); n->ClearAttr(attr_name); n->AddAttr(attr_name, func_attr); TF_RETURN_IF_ERROR(CopyAssociatedFunctions(func_graph, lookup_fld, fld)); return absl::OkStatus(); } Status PropagateConstIntoIfNode(Graph* g, Node* if_node, const FunctionLibraryDefinition* lookup_fld, FunctionLibraryDefinition* fld) { absl::flat_hash_map<int, const Node*> const_input_index_to_node; for (int i = 1; i < if_node->num_inputs(); i++) { const Node* input_node; TF_RETURN_IF_ERROR(if_node->input_node(i, &input_node)); if (input_node->type_string() == "Const") { const_input_index_to_node[i - 1] = input_node; } } if (const_input_index_to_node.empty()) { return absl::OkStatus(); } for (const auto& attr_name : std::vector<string>{"then_branch", "else_branch"}) { TF_RETURN_IF_ERROR(PropagateConstIntoFuncAttr( if_node, attr_name, const_input_index_to_node, lookup_fld, fld)); } return absl::OkStatus(); } using GraphCache = absl::flat_hash_map<string, std::unique_ptr<FunctionBody>>; absl::StatusOr<FunctionBody*> FindOrInsert( GraphCache* cache, const NameAttrList& body_attr, const FunctionLibraryDefinition* lookup_fld, const FunctionLibraryDefinition* fallback_fld) { const string name = body_attr.name(); std::unique_ptr<FunctionBody>& value = (*cache)[name]; if (!value) { const FunctionDef* body_func = lookup_fld->Find(name); if (!body_func && fallback_fld != nullptr) { body_func = fallback_fld->Find(name); } if (!body_func) { return errors::Internal("Traverse: Cannot find body function ", name); } std::unique_ptr<FunctionBody> fbody; Status s = FunctionDefToBodyHelper(*body_func, AttrSlice(&body_attr.attr()), lookup_fld, &fbody); if (!s.ok() && fallback_fld != nullptr) { TF_RETURN_IF_ERROR(FunctionDefToBodyHelper( *body_func, AttrSlice(&body_attr.attr()), fallback_fld, &fbody)); } value = std::move(fbody); } return value.get(); } absl::StatusOr<bool> IsLoopInvariant( const FunctionBody* loop_body, int index, const FunctionLibraryDefinition* lookup_fld, const FunctionLibraryDefinition* fallback_fld, GraphCache* cache); absl::StatusOr<const Edge*> TraverseUnmodifiedPathBackward( const Edge* src, const FunctionLibraryDefinition* lookup_fld, const FunctionLibraryDefinition* fallback_fld, GraphCache* cache) { const Edge* e = src; VLOG(2) << "Traverse: Begin at " << e->DebugString(); while (IsConstTraversableOpType(e->src())) { VLOG(3) << e->DebugString(); if (e->src()->IsWhileNode()) { NameAttrList body_attr; TF_RETURN_IF_ERROR(GetNodeAttr(e->src()->def(), "body", &body_attr)); TF_ASSIGN_OR_RETURN( FunctionBody * fbody, FindOrInsert(cache, body_attr, lookup_fld, fallback_fld)); TF_ASSIGN_OR_RETURN(bool is_loop_invariant, IsLoopInvariant(fbody, e->src_output(), lookup_fld, fallback_fld, cache)); if (!is_loop_invariant) { VLOG(2) << "Non-loop-invariant: index " << e->src_output() << " of " << body_attr.name(); break; } } TF_RETURN_IF_ERROR(e->src()->input_edge(e->src_output(), &e)); } VLOG(2) << "Traverse: Finish at " << e->DebugString(); return e; } absl::StatusOr<bool> IsLoopInvariant( const FunctionBody* loop_body, int index, const FunctionLibraryDefinition* lookup_fld, const FunctionLibraryDefinition* fallback_fld, GraphCache* cache) { const Edge* e; TF_RETURN_IF_ERROR(loop_body->ret_nodes[index]->input_edge(0, &e)); TF_ASSIGN_OR_RETURN( const Edge* reachable, TraverseUnmodifiedPathBackward(e, lookup_fld, fallback_fld, cache)); if (reachable->src()->id() == loop_body->arg_nodes[index]->id()) { VLOG(2) << "Index " << index << " is loop invariant."; return true; } VLOG(2) << "Index " << index << " not loop invariant: " << "walk backward from " << e->src()->DebugString() << " to " << reachable->src()->DebugString() << " did not reach " << loop_body->arg_nodes[index]->DebugString(); return false; } Status PropagateConstIntoAndAroundWhileNode( Graph* g, Node* while_node, const FunctionLibraryDefinition* lookup_fld, FunctionLibraryDefinition* fld) { VLOG(1) << "Propagate const into " << while_node->name(); absl::flat_hash_map<int, const Node*> const_input_index_to_node; absl::flat_hash_map<int, Node*> const_input_index_to_mutable_node; NameAttrList body_attr; TF_RETURN_IF_ERROR(GetNodeAttr(while_node->def(), "body", &body_attr)); const string fn_name = body_attr.name(); const FunctionDef* body_func = lookup_fld->Find(fn_name); if (!body_func) { return errors::Internal("Propagate: Cannot find body function ", fn_name, " for While node ", while_node->name()); } std::unique_ptr<FunctionBody> fbody; TF_RETURN_IF_ERROR(FunctionDefToBodyHelper( *body_func, AttrSlice(&body_attr.attr()), lookup_fld, &fbody)); GraphCache cache; for (int i = 0; i < while_node->num_inputs(); i++) { if (i >= body_func->signature().output_arg_size()) { break; } const Edge* input_edge; TF_RETURN_IF_ERROR(while_node->input_edge(i, &input_edge)); TF_ASSIGN_OR_RETURN(input_edge, TraverseUnmodifiedPathBackward( input_edge, lookup_fld, fld, &cache)); if (!input_edge->src()->IsConstant()) { VLOG(2) << "Input " << i << " is not Const; is " << input_edge->src()->type_string(); continue; } TF_ASSIGN_OR_RETURN( bool is_loop_invariant, IsLoopInvariant(fbody.get(), i, lookup_fld, fld, &cache)); if (!is_loop_invariant) { VLOG(2) << "While state not loop-invariant; not propagating Const " << i; continue; } VLOG(2) << "While state is loop-invariant; propagating Const " << i; const_input_index_to_mutable_node[i] = input_edge->src(); const_input_index_to_node[i] = input_edge->src(); } if (const_input_index_to_node.empty()) { return absl::OkStatus(); } for (const auto& attr_name : std::vector<string>{"cond", "body"}) { TF_RETURN_IF_ERROR(PropagateConstIntoFuncAttr( while_node, attr_name, const_input_index_to_node, lookup_fld, fld, attr_name == "body")); } for (const auto& it : const_input_index_to_mutable_node) { TF_RETURN_IF_ERROR( ReplaceSrcOutputUsageWithNode(g, while_node, it.first, it.second)); } return absl::OkStatus(); } } absl::StatusOr<bool> IsLoopInvariant( const FunctionBody* loop_body, int index, const FunctionLibraryDefinition* lookup_fld) { GraphCache cache; return IsLoopInvariant(loop_body, index, lookup_fld, nullptr, &cache); } Status ValidateConfig(const tf2xla::Config& config) { std::set<string> names; for (const tf2xla::Feed& feed : config.feed()) { TF_RETURN_IF_ERROR(ValidateTensorId(feed.id())); TF_RETURN_IF_ERROR(TensorShape::IsValidShape(feed.shape())); TF_RETURN_IF_ERROR(CheckNameDuplicates("feed", feed.name(), &names)); } TF_RETURN_IF_ERROR(CheckFeedFetchNameConflicts("feed", names)); names.clear(); for (const tf2xla::Fetch& fetch : config.fetch()) { TF_RETURN_IF_ERROR(ValidateTensorId(fetch.id())); TF_RETURN_IF_ERROR(CheckNameDuplicates("fetch", fetch.name(), &names)); } TF_RETURN_IF_ERROR(CheckFeedFetchNameConflicts("fetch", names)); if (config.fetch().empty()) { return errors::InvalidArgument("fetches must be specified"); } return absl::OkStatus(); } Status AddPlaceholdersForFeeds( const tf2xla::Config& config, const OpRegistryInterface* op_registry, std::unordered_map<string, string>* feed_remapping, GraphDef* graph_def) { struct PlaceholderInfo { const tf2xla::Feed* feed = nullptr; string placeholder_name; DataType data_type = DT_INVALID; }; std::map<string, PlaceholderInfo> placeholder_info; for (int i = 0; i < config.feed_size(); ++i) { const tf2xla::Feed* feed = &config.feed(i); const string name_port = TensorIdToString(feed->id()); PlaceholderInfo& info = placeholder_info[name_port]; info.feed = feed; info.placeholder_name = absl::StrCat("aot_feed_", feed->id().output_index(), "/", feed->id().node_name()); (*feed_remapping)[name_port] = info.placeholder_name; } std::unordered_map<string, const NodeDef*> name_to_node; for (int i = 0; i < graph_def->node_size(); ++i) { name_to_node[graph_def->node(i).name()] = &graph_def->node(i); } for (auto it = placeholder_info.begin(); it != placeholder_info.end(); ++it) { PlaceholderInfo& info = it->second; const tf2xla::TensorId& feed_id = info.feed->id(); auto node_it = name_to_node.find(feed_id.node_name()); if (node_it == name_to_node.end()) { return errors::NotFound("Can't find feed node: ", TensorIdToString(feed_id)); } const NodeDef* existing = node_it->second; if (info.feed->type() != DT_INVALID) { info.data_type = info.feed->type(); } else { GraphDef gd; *gd.mutable_versions() = graph_def->versions(); *gd.add_node() = *existing; MergeDebugInfo(NodeDebugInfo(*existing), gd.mutable_node(0)); TF_RETURN_IF_ERROR( AddDefaultAttrsToGraphDef(&gd, *op_registry, 0 )); Graph g(op_registry); g.set_versions(graph_def->versions()); TF_ASSIGN_OR_RETURN(Node * feed_node, g.AddNode(gd.node(0))); if (info.feed->id().output_index() < feed_node->num_outputs()) { info.data_type = BaseType(feed_node->output_type(info.feed->id().output_index())); } else { return errors::InvalidArgument( "Invalid output_index ", info.feed->id().output_index(), " for feed node ", info.feed->id().node_name()); } } } for (auto it = placeholder_info.begin(); it != placeholder_info.end(); ++it) { const PlaceholderInfo& info = it->second;
#include "tensorflow/compiler/tf2xla/tf2xla_util.h" #include "absl/strings/match.h" #include "absl/strings/str_cat.h" #include "absl/strings/string_view.h" #include "tensorflow/cc/framework/ops.h" #include "tensorflow/cc/ops/data_flow_ops.h" #include "tensorflow/cc/ops/function_ops.h" #include "tensorflow/cc/ops/functional_ops.h" #include "tensorflow/cc/ops/list_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/compiler/tf2xla/sharding_util.h" #include "tensorflow/core/common_runtime/function.h" #include "tensorflow/core/common_runtime/graph_optimizer.h" #include "tensorflow/core/common_runtime/process_function_library_runtime.h" #include "tensorflow/core/framework/function.h" #include "tensorflow/core/framework/graph_to_functiondef.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/graph/graph.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/public/version.h" namespace tensorflow { namespace { void ExpectErrorContains(const Status& status, absl::string_view str) { EXPECT_NE(absl::OkStatus(), status); EXPECT_TRUE(absl::StrContains(status.message(), str)) << "expected error: " << status.message() << " to contain: " << str; } TEST(ValidateConfig, Good) { tf2xla::Config config; tf2xla::Feed* feed = config.add_feed(); feed->mutable_id()->set_node_name("foo"); feed->mutable_id()->set_output_index(123); feed->set_name("foo_debug"); feed = config.add_feed(); feed->mutable_id()->set_node_name("bar"); feed->mutable_id()->set_output_index(0); tf2xla::Fetch* fetch = config.add_fetch(); fetch->mutable_id()->set_node_name("baz"); fetch->mutable_id()->set_output_index(456); fetch->set_name("baz_debug"); fetch = config.add_fetch(); fetch->mutable_id()->set_node_name("banana"); fetch->mutable_id()->set_output_index(0); TF_EXPECT_OK(ValidateConfig(config)); } TEST(ValidateConfig, BadEmpty) { tf2xla::Config config; ExpectErrorContains(ValidateConfig(config), "fetches must be specified"); } TEST(ValidateConfig, BadNoFetch) { tf2xla::Config config; tf2xla::Feed* feed = config.add_feed(); feed->mutable_id()->set_node_name("foo"); ExpectErrorContains(ValidateConfig(config), "fetches must be specified"); } TEST(ValidateConfig, BadFeedNodeName) { tf2xla::Config config; config.add_feed(); ExpectErrorContains(ValidateConfig(config), "node_name must be non-empty"); } TEST(ValidateConfig, BadFeedOutputIndex) { tf2xla::Config config; tf2xla::Feed* feed = config.add_feed(); feed->mutable_id()->set_node_name("foo"); feed->mutable_id()->set_output_index(-1); ExpectErrorContains(ValidateConfig(config), "output_index must be positive"); } TEST(ValidateConfig, BadFetchNodeName) { tf2xla::Config config; tf2xla::Feed* feed = config.add_feed(); feed->mutable_id()->set_node_name("foo"); config.add_fetch(); ExpectErrorContains(ValidateConfig(config), "node_name must be non-empty"); } TEST(ValidateConfig, BadFetchOutputIndex) { tf2xla::Config config; tf2xla::Feed* feed = config.add_feed(); feed->mutable_id()->set_node_name("foo"); tf2xla::Fetch* fetch = config.add_fetch(); fetch->mutable_id()->set_node_name("bar"); fetch->mutable_id()->set_output_index(-1); ExpectErrorContains(ValidateConfig(config), "output_index must be positive"); } TEST(ValidateConfig, DuplicateFeedName) { tf2xla::Config config; tf2xla::Feed* feed = config.add_feed(); feed->mutable_id()->set_node_name("foo"); feed->set_name("dup"); feed = config.add_feed(); feed->mutable_id()->set_node_name("bar"); feed->set_name("dup"); ExpectErrorContains(ValidateConfig(config), "duplicate feed name"); } TEST(ValidateConfig, DuplicateFetchName) { tf2xla::Config config; tf2xla::Feed* feed = config.add_feed(); feed->mutable_id()->set_node_name("foo"); tf2xla::Fetch* fetch = config.add_fetch(); fetch->mutable_id()->set_node_name("bar"); fetch->set_name("dup"); fetch = config.add_fetch(); fetch->mutable_id()->set_node_name("baz"); fetch->set_name("dup"); ExpectErrorContains(ValidateConfig(config), "duplicate fetch name"); } TEST(ValidateConfig, ConflictingFeedName) { tf2xla::Config config; tf2xla::Feed* feed = config.add_feed(); feed->mutable_id()->set_node_name("foo"); feed->set_name("conflict"); feed = config.add_feed(); feed->mutable_id()->set_node_name("bar"); feed->set_name("conflict_data"); ExpectErrorContains(ValidateConfig(config), "conflicting feed name"); } TEST(ValidateConfig, ConflictingFetchName) { tf2xla::Config config; tf2xla::Feed* feed = config.add_feed(); feed->mutable_id()->set_node_name("foo"); tf2xla::Fetch* fetch = config.add_fetch(); fetch->mutable_id()->set_node_name("bar"); fetch->set_name("conflict"); fetch = config.add_fetch(); fetch->mutable_id()->set_node_name("baz"); fetch->set_name("conflict_data"); ExpectErrorContains(ValidateConfig(config), "conflicting fetch name"); } static tf2xla::Config FetchesConfig(std::vector<string> fetches) { tf2xla::Config config; for (const auto& fetch_node_name : fetches) { auto* fetch = config.add_fetch(); fetch->set_name(absl::StrCat("fetch_", fetch_node_name)); fetch->mutable_id()->set_node_name(fetch_node_name); } return config; } TEST(PruneGraphDefInto, Basic) { GraphDef def; auto* n = def.add_node(); n->set_name("a"); n->add_input("b:0"); n->add_input("^c"); GraphDef copy; ExpectErrorContains(PruneGraphDefInto(FetchesConfig({"missing"}), def, &copy), "node missing needed"); ExpectErrorContains(PruneGraphDefInto(FetchesConfig({"a"}), def, &copy), "node b needed"); n = def.add_node(); n->set_name("b"); ExpectErrorContains(PruneGraphDefInto(FetchesConfig({"a"}), def, &copy), "node c needed"); n->add_input("d:1"); n = def.add_node(); n->set_name("c"); n->add_input("d:1"); n = def.add_node(); n->set_name("d"); TF_EXPECT_OK(PruneGraphDefInto(FetchesConfig({"a"}), def, &copy)); EXPECT_EQ(def.DebugString(), copy.DebugString()); GraphDef pruned_a = copy; n = def.add_node(); n->set_name("e"); n->add_input("^d"); n->add_input("b:2"); copy.Clear(); TF_EXPECT_OK(PruneGraphDefInto(FetchesConfig({"a"}), def, &copy)); EXPECT_EQ(pruned_a.DebugString(), copy.DebugString()); copy.Clear(); TF_EXPECT_OK(PruneGraphDefInto(FetchesConfig({"a", "e"}), def, &copy)); EXPECT_EQ(def.DebugString(), copy.DebugString()); } TEST(SetNodeShardingFromNeighbors, Basic) { Scope scope = Scope::NewRootScope().ExitOnError(); auto a = ops::_Arg(scope.WithOpName("A"), DT_INT32, 0); auto b = ops::_Arg(scope.WithOpName("B"), DT_INT32, 1); auto c = ops::Add(scope.WithOpName("C"), a, b); std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); TF_ASSERT_OK(scope.ToGraph(graph.get())); Node* a_node = nullptr; Node* b_node = nullptr; Node* c_node = nullptr; for (Node* n : graph->nodes()) { if (n->name() == "A") a_node = n; if (n->name() == "B") b_node = n; if (n->name() == "C") c_node = n; } const int num_cores_per_replica = 4; a_node->set_assigned_device_name("foo"); EXPECT_FALSE(SetNodeShardingFromNeighbors(c_node, false).ok()); a_node->set_assigned_device_name("/device:TPU_REPLICATED_CORE:2"); TF_ASSERT_OK(SetNodeShardingFromNeighbors(c_node, false)); auto parse_status = ParseShardingFromDevice(*c_node, num_cores_per_replica, false); TF_ASSERT_OK(parse_status.status()); ASSERT_TRUE(parse_status.value().has_value()); EXPECT_EQ(2, parse_status.value().value().tile_assignment_devices(0)); b_node->set_assigned_device_name("/device:TPU_REPLICATED_CORE:1"); TF_ASSERT_OK(SetNodeShardingFromNeighbors(c_node, false)); parse_status = ParseShardingFromDevice(*c_node, num_cores_per_replica, false); TF_ASSERT_OK(parse_status.status()); ASSERT_TRUE(parse_status.value().has_value()); EXPECT_EQ(1, parse_status.value().value().tile_assignment_devices(0)); TF_ASSERT_OK(SetNodeShardingFromNeighbors(a_node, true)); parse_status = ParseShardingFromDevice(*a_node, num_cores_per_replica, false); TF_ASSERT_OK(parse_status.status()); ASSERT_TRUE(parse_status.value().has_value()); EXPECT_EQ(1, parse_status.value().value().tile_assignment_devices(0)); } REGISTER_OP("One") .Output("y: T") .Attr("T: {float, double, int32, int64}") .Doc(R"doc( Returns a tensor with a single element (1) of type T. y: A scalar in type T. )doc"); TEST(CachedFunctionHandles, Basic) { FunctionDef func = FunctionDefHelper::Define( "TestFunc", {}, {"y:T"}, {"T:{float, double, int32, int64}"}, { {{"y"}, "One", {}, {{"T", "$T"}}}, }); FunctionDefLibrary proto; *proto.add_function() = func; FunctionLibraryDefinition fld(OpRegistry::Global(), proto); std::unique_ptr<ProcessFunctionLibraryRuntime> pflr( new ProcessFunctionLibraryRuntime( nullptr, Env::Default(), nullptr, TF_GRAPH_DEF_VERSION, &fld, OptimizerOptions())); FunctionLibraryRuntime* flr = pflr->GetFLR(ProcessFunctionLibraryRuntime::kDefaultFLRDevice); CachedFunctionHandles cached_function_handles(flr); FunctionLibraryRuntime::Handle first_handle; AttrValue attr; attr.set_type(DT_FLOAT); AttrValueMap attrs; attrs["T"] = attr; TF_ASSERT_OK(cached_function_handles.GetOrInstantiate( "TestFunc", AttrSlice(&attrs), &first_handle)); const FunctionBody* body = flr->GetFunctionBody(first_handle); EXPECT_NE(body, nullptr); FunctionLibraryRuntime::Handle second_handle; TF_ASSERT_OK(cached_function_handles.GetOrInstantiate( "TestFunc", AttrSlice(&attrs), &second_handle)); EXPECT_EQ(first_handle, second_handle); attr.set_type(DT_INT32); attrs["T"] = attr; FunctionLibraryRuntime::Handle third_handle; TF_ASSERT_OK(cached_function_handles.GetOrInstantiate( "TestFunc", AttrSlice(&attrs), &third_handle)); EXPECT_NE(first_handle, third_handle); TF_EXPECT_OK(cached_function_handles.ReleaseAllHandles()); } TEST(PropagateConstIntoFunctionalNodes, WhileLoopWithResourceInput) { FunctionLibraryDefinition fld(OpRegistry::Global(), FunctionDefLibrary()); { Scope scope = Scope::NewRootScope().ExitOnError(); auto pred = ops::_Arg(scope.WithOpName("pred"), DT_BOOL, 0); auto input = ops::_Arg(scope.WithOpName("input"), DT_RESOURCE, 1); auto ret = ops::_Retval(scope.WithOpName("ret"), pred, 0); Graph graph(OpRegistry::Global()); TF_ASSERT_OK(scope.ToGraph(&graph)); FunctionDef cond_fdef; TF_ASSERT_OK(GraphToFunctionDef(graph, "cond", &cond_fdef)); TF_ASSERT_OK(fld.AddFunctionDef(cond_fdef)); FunctionDef body_fdef; TF_ASSERT_OK(GraphToFunctionDef(graph, "body", &body_fdef)); TF_ASSERT_OK(fld.AddFunctionDef(body_fdef)); } Scope scope = Scope::NewRootScope().ExitOnError(); auto pred = ops::Const(scope.WithOpName("pred"), false, TensorShape({})); auto input = ops::Const(scope.WithOpName("input"), 0, TensorShape({})); NameAttrList cond_fn, body_fn; cond_fn.set_name("cond"); body_fn.set_name("body"); auto while_op = ops::While(scope.WithOpName("while"), std::initializer_list<Input>{pred, input}, cond_fn, body_fn); Graph graph(OpRegistry::Global()); TF_ASSERT_OK(scope.ToGraph(&graph)); TF_EXPECT_OK(PropagateConstIntoFunctionalNodes(&graph, &fld, &fld)); } TEST(PropagateConstIntoFunctionalNodes, CopiedConstNodeHasUniqueName) { FunctionLibraryDefinition fld(OpRegistry::Global(), FunctionDefLibrary()); { Scope scope = Scope::NewRootScope().ExitOnError(); auto pred = ops::_Arg(scope.WithOpName("arg0"), DT_BOOL, 0); auto input = ops::_Arg(scope.WithOpName("arg1"), DT_BOOL, 1); auto duplicate_name = ops::NoOp(scope.WithOpName("duplicate_name")); auto ret = ops::_Retval(scope.WithOpName("ret"), pred, 0); Graph graph(OpRegistry::Global()); TF_ASSERT_OK(scope.ToGraph(&graph)); FunctionDef cond_fdef; TF_ASSERT_OK(GraphToFunctionDef(graph, "cond", &cond_fdef)); TF_ASSERT_OK(fld.AddFunctionDef(cond_fdef)); FunctionDef body_fdef; TF_ASSERT_OK(GraphToFunctionDef(graph, "body", &body_fdef)); TF_ASSERT_OK(fld.AddFunctionDef(body_fdef)); } Scope scope = Scope::NewRootScope().ExitOnError(); auto pred = ops::Const(scope.WithOpName("duplicate_name"), false, TensorShape({})); auto input = ops::Const(scope.WithOpName("input"), false, TensorShape({})); NameAttrList cond_fn, body_fn; cond_fn.set_name("cond"); body_fn.set_name("body"); auto while_op = ops::While(scope.WithOpName("while"), std::initializer_list<Input>{pred, input}, cond_fn, body_fn); Graph graph(OpRegistry::Global()); TF_ASSERT_OK(scope.ToGraph(&graph)); TF_EXPECT_OK(PropagateConstIntoFunctionalNodes(&graph, &fld, &fld)); auto node_name_index = graph.BuildNodeNameIndex(); Node* while_node = node_name_index["while"]; ASSERT_NE(while_node, nullptr); TF_ASSERT_OK(GetNodeAttr(while_node->def(), "body", &body_fn)); const FunctionDef* rewritten_body_fn = fld.Find(body_fn.name()); ASSERT_NE(rewritten_body_fn, nullptr); std::unordered_map<string, NodeDef> nodes; for (const NodeDef& node_def : rewritten_body_fn->node_def()) { nodes[node_def.name()] = node_def; } auto noop_def = nodes.find("duplicate_name"); ASSERT_NE(noop_def, nodes.end()); EXPECT_EQ(noop_def->second.op(), "NoOp"); auto const_def = nodes.find("duplicate_name/_0"); ASSERT_NE(const_def, nodes.end()); EXPECT_EQ(const_def->second.op(), "Const"); } TEST(PropagateConstIntoFunctionalNodes, RewriteTensorListWithConstMember) { FunctionLibraryDefinition fld(OpRegistry::Global(), FunctionDefLibrary()); { Scope scope = Scope::NewRootScope().ExitOnError(); auto input = ops::_Arg(scope.WithOpName("arg"), DT_VARIANT, 0); auto result = ops::Const(scope.WithOpName("result"), false, TensorShape({})); auto ret = ops::_Retval(scope.WithOpName("ret"), result, 0); Graph graph(OpRegistry::Global()); TF_ASSERT_OK(scope.ToGraph(&graph)); FunctionDef fdef; TF_ASSERT_OK(GraphToFunctionDef(graph, "cond", &fdef)); TF_ASSERT_OK(fld.AddFunctionDef(fdef)); } { Scope scope = Scope::NewRootScope().ExitOnError(); auto input = ops::_Arg(scope.WithOpName("arg"), DT_VARIANT, 0); auto element = ops::Const(scope.WithOpName("element"), 0, TensorShape({})); auto push = ops::TensorListPushBack(scope.WithOpName("push"), input, element); auto ret = ops::_Retval(scope.WithOpName("ret"), push.output_handle, 0); Graph graph(OpRegistry::Global()); TF_ASSERT_OK(scope.ToGraph(&graph)); FunctionDef fdef; TF_ASSERT_OK(GraphToFunctionDef(graph, "fwd_body", &fdef)); TF_ASSERT_OK(fld.AddFunctionDef(fdef)); } { Scope scope = Scope::NewRootScope().ExitOnError(); auto input = ops::_Arg(scope.WithOpName("arg"), DT_VARIANT, 0); auto shape = ops::Const(scope.WithOpName("element"), -1, TensorShape({})); auto pop = ops::TensorListPopBack(scope.WithOpName("pop"), input, shape, DT_INT32); auto identity = ops::Identity(scope.WithOpName("identity"), pop.tensor); auto ret = ops::_Retval(scope.WithOpName("ret"), pop.output_handle, 0); Graph graph(OpRegistry::Global()); TF_ASSERT_OK(scope.ToGraph(&graph)); FunctionDef fdef; TF_ASSERT_OK(GraphToFunctionDef(graph, "bwd_body", &fdef)); TF_ASSERT_OK(fld.AddFunctionDef(fdef)); } Scope scope = Scope::NewRootScope().ExitOnError(); auto shape = ops::Const(scope.WithOpName("element"), -1, TensorShape({})); auto max_num_elements = ops::Const(scope.WithOpName("max_num_elements"), 10, TensorShape({})); auto tl = ops::EmptyTensorList(scope.WithOpName("tl"), shape, max_num_elements, DT_INT32); NameAttrList cond_fn, fwd_body_fn, bwd_body_fn; cond_fn.set_name("cond"); fwd_body_fn.set_name("fwd_body"); bwd_body_fn.set_name("bwd_body"); auto fwd_while_op = ops::While(scope.WithOpName("fwd_while"), std::initializer_list<Input>{tl}, cond_fn, fwd_body_fn); auto bwd_while_op = ops::While(scope.WithOpName("bwd_while"), std::initializer_list<Input>{fwd_while_op.output[0]}, cond_fn, bwd_body_fn); Graph graph(OpRegistry::Global()); TF_ASSERT_OK(scope.ToGraph(&graph)); TF_EXPECT_OK(RewriteTensorListWithConstElement(&graph, &fld)); const FunctionDef* bwd_body = fld.Find("bwd_body_tl_rewrite_0"); ASSERT_NE(bwd_body, nullptr); std::unique_ptr<FunctionBody> bwd_fbody; TF_CHECK_OK( FunctionDefToBodyHelper(*bwd_body, AttrSlice(), &fld, &bwd_fbody)); auto node_name_index = bwd_fbody->graph->BuildNodeNameIndex(); const Node* identity = node_name_index.at("identity"); ASSERT_NE(identity, nullptr); const Node* input; TF_ASSERT_OK(identity->input_node(0, &input)); EXPECT_EQ(input->type_string(), "Const"); } } }
1,094
cpp
tensorflow/tensorflow
tf2xla
tensorflow/compiler/tf2xla/tf2xla.cc
tensorflow/compiler/tf2xla/tf2xla_test.cc
#ifndef TENSORFLOW_COMPILER_TF2XLA_TF2XLA_H_ #define TENSORFLOW_COMPILER_TF2XLA_TF2XLA_H_ #include "absl/strings/string_view.h" #include "tensorflow/compiler/tf2xla/tf2xla.pb.h" #include "xla/client/client.h" #include "xla/client/xla_computation.h" #include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/platform/status.h" namespace tensorflow { Status ConvertGraphDefToXla(GraphDef graph_def, const tf2xla::Config& config, xla::Client* client, xla::XlaComputation* computation); Status ConvertGraphDefToXlaViaMlir( GraphDef graph_def, const tf2xla::Config& config, xla::XlaComputation* computation, absl::string_view debug_info_filename, absl::string_view debug_info_path_begin_marker); } #endif #include "tensorflow/compiler/tf2xla/tf2xla.h" #include <map> #include <memory> #include <string> #include <unordered_map> #include <utility> #include <vector> #include "absl/strings/str_cat.h" #include "absl/strings/str_join.h" #include "tensorflow/compiler/aot/aot_only_var_handle_op.h" #include "tensorflow/compiler/tf2xla/graph_compiler_util.h" #include "tensorflow/compiler/tf2xla/shape_util.h" #include "tensorflow/compiler/tf2xla/tf2xla_util.h" #include "tensorflow/compiler/tf2xla/xla_compiler.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/client/xla_computation.h" #include "tensorflow/core/common_runtime/function.h" #include "tensorflow/core/framework/function.h" #include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/framework/graph_def_util.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/versions.pb.h" #include "tensorflow/core/graph/algorithm.h" #include "tensorflow/core/graph/graph.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/platform/errors.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/types.h" #include "tensorflow/core/util/dump_graph.h" namespace tensorflow { namespace { Status ConvertGraphToXla(std::unique_ptr<Graph> graph, const tf2xla::Config& config, xla::Client* client, xla::XlaComputation* computation) { XlaOpRegistry::RegisterCompilationKernels(); for (Node* node : graph->nodes()) { node->set_assigned_device_name( absl::StrCat("/device:", DEVICE_CPU_XLA_JIT)); } std::vector<XlaCompiler::Argument> xla_args; TF_RETURN_IF_ERROR(CreateXlaArgs(*graph, &xla_args)); PopulateXlaArgs(config, &xla_args); XlaCompiler::Options compiler_options; compiler_options.client = client; compiler_options.device_type = DeviceType(DEVICE_CPU_XLA_JIT); compiler_options.flib_def = &graph->flib_def(); compiler_options.graph_def_version = graph->versions().producer(); compiler_options.allow_cpu_custom_calls = true; XlaCompiler compiler(compiler_options); XlaCompiler::CompilationResult result; XlaCompiler::CompileOptions options; options.alias_resource_update = true; TF_RETURN_IF_ERROR(compiler.CompileGraph( options, "tfcompile", std::move(graph), xla_args, &result)); *computation = std::move(*result.computation); int num_const_results = 0; for (int i = 0, end = result.outputs.size(); i < end; ++i) { if (result.outputs[i].is_constant) { ++num_const_results; LOG(ERROR) << "ConstRetVal index:" << i << " value:" << result.outputs[i].constant_value.DebugString(); } } if (num_const_results > 0) { return errors::Unimplemented( "Conversion from TensorFlow graph to XLA resulted in ", num_const_results, " constant results. The configuration of " "the output args (i.e. fetch ids) is probably wrong."); } { std::vector<bool> updated_inputs(xla_args.size()); for (const XlaCompiler::ResourceUpdate& update : result.resource_updates) { updated_inputs[update.input_index] = true; } int64_t input_index = xla_args.size() - config.variable_size(); for (const tf2xla::Variable& variable : config.variable()) { if (variable.readonly() == updated_inputs[input_index]) { return errors::InvalidArgument( "Variable \"", variable.node_name(), "\" is marked as ", variable.readonly() ? "" : "not ", "readonly, but is ", updated_inputs[input_index] ? "" : "not ", "modified by the computation."); } ++input_index; } } return absl::OkStatus(); } Status ConvertVarHandlesToAotVarHandles(GraphDef* graph_def) { auto update_var_handle_op_node = [](NodeDef& node) -> Status { if (node.op() == "VarHandleOp") { node.set_op(tfcompile::kXlaAotOnlyVarHandleOp); const auto& it = node.attr().find("allowed_devices"); if (it != node.attr().end()) { if (!it->second.list().s().empty()) { return errors::InvalidArgument( "VarHandleOp with non-empty allowed devices is not supported."); } node.mutable_attr()->erase("allowed_devices"); } } return absl::OkStatus(); }; for (auto& node : *graph_def->mutable_node()) { TF_RETURN_IF_ERROR(update_var_handle_op_node(node)); } for (auto& fn : *graph_def->mutable_library()->mutable_function()) { for (auto& node : *fn.mutable_node_def()) { TF_RETURN_IF_ERROR(update_var_handle_op_node(node)); } } return absl::OkStatus(); } } Status ConvertGraphDefToXla(GraphDef graph_def, const tf2xla::Config& config, xla::Client* client, xla::XlaComputation* computation) { std::unique_ptr<Graph> graph; TF_RETURN_IF_ERROR(ConvertVarHandlesToAotVarHandles(&graph_def)); TF_RETURN_IF_ERROR(InitGraph(graph_def, config, &graph)); TF_RETURN_IF_ERROR( ConvertGraphToXla(std::move(graph), config, client, computation)); return absl::OkStatus(); } }
#include "tensorflow/compiler/tf2xla/tf2xla.h" #include <vector> #include "tensorflow/compiler/tf2xla/tf2xla.pb.h" #include "xla/client/client_library.h" #include "xla/client/local_client.h" #include "xla/client/xla_computation.h" #include "xla/literal.h" #include "xla/literal_util.h" #include "tensorflow/core/framework/attr_value.pb.h" #include "tensorflow/core/framework/attr_value_util.h" #include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_shape.pb.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/statusor.h" #include "tensorflow/core/platform/stringpiece.h" #include "tensorflow/core/platform/test.h" #include "tsl/platform/tensor_float_32_utils.h" namespace tensorflow { namespace { class ConvertGraphDefToXlaWithTF32Disabled : public ::testing::Test { public: ConvertGraphDefToXlaWithTF32Disabled() { tsl::enable_tensor_float_32_execution(false); } ~ConvertGraphDefToXlaWithTF32Disabled() override { tsl::enable_tensor_float_32_execution(true); } }; AttrValue TypeAttrValue(DataType type) { AttrValue attr_value; SetAttrValue(type, &attr_value); return attr_value; } AttrValue StringAttrValue(StringPiece str) { AttrValue attr_value; SetAttrValue(str, &attr_value); return attr_value; } AttrValue IntAttrValue(int i) { AttrValue attr_value; SetAttrValue(i, &attr_value); return attr_value; } AttrValue IntVectorAttrValue(const std::vector<int>& ints) { AttrValue attr_value; SetAttrValue(ints, &attr_value); return attr_value; } TensorShapeProto TensorShape(const std::vector<int>& dims) { TensorShapeProto shape; for (int i = 0; i < dims.size(); ++i) { shape.add_dim(); shape.mutable_dim(i)->set_size(dims[i]); } return shape; } GraphDef SumGraph() { GraphDef graph_def; NodeDef* x = graph_def.add_node(); x->set_name("x"); x->set_op("Placeholder"); (*x->mutable_attr())["dtype"] = TypeAttrValue(DT_INT32); NodeDef* y = graph_def.add_node(); y->set_name("y"); y->set_op("Placeholder"); (*y->mutable_attr())["dtype"] = TypeAttrValue(DT_INT32); NodeDef* sum = graph_def.add_node(); sum->set_name("sum"); sum->set_op("Add"); sum->add_input("x"); sum->add_input("y"); (*sum->mutable_attr())["T"] = TypeAttrValue(DT_INT32); return graph_def; } tf2xla::Config SumConfig() { tf2xla::Config config; config.add_feed()->mutable_id()->set_node_name("x"); config.add_feed()->mutable_id()->set_node_name("y"); config.add_fetch()->mutable_id()->set_node_name("sum"); return config; } TEST(ConvertGraphDefToXla, Sum) { GraphDef graph_def = SumGraph(); tf2xla::Config config = SumConfig(); xla::LocalClient* client = xla::ClientLibrary::LocalClientOrDie(); xla::XlaComputation computation; TF_EXPECT_OK(ConvertGraphDefToXla(graph_def, config, client, &computation)); auto x_literal = xla::LiteralUtil::CreateR0<int32>(10); auto y_literal = xla::LiteralUtil::CreateR0<int32>(32); auto x_global_or = client->TransferToServer(x_literal); auto y_global_or = client->TransferToServer(y_literal); TF_EXPECT_OK(x_global_or.status()); TF_EXPECT_OK(y_global_or.status()); std::unique_ptr<xla::GlobalData> x_global = std::move(x_global_or.value()); std::unique_ptr<xla::GlobalData> y_global = std::move(y_global_or.value()); auto result_or = client->ExecuteAndTransfer(computation, {x_global.get(), y_global.get()}); TF_EXPECT_OK(result_or.status()); xla::Literal result = std::move(result_or.value()); EXPECT_EQ("(\ns32[] 42\n)", result.ToString()); config.mutable_feed(0)->mutable_id()->set_output_index( 123); EXPECT_TRUE(errors::IsInvalidArgument( ConvertGraphDefToXla(graph_def, config, client, &computation))); } GraphDef EinsumGraph() { GraphDef graph_def; NodeDef* x = graph_def.add_node(); x->set_name("x"); x->set_op("Placeholder"); (*x->mutable_attr())["dtype"] = TypeAttrValue(DT_FLOAT); NodeDef* y = graph_def.add_node(); y->set_name("y"); y->set_op("Placeholder"); (*y->mutable_attr())["dtype"] = TypeAttrValue(DT_FLOAT); NodeDef* einsum = graph_def.add_node(); einsum->set_name("einsum"); einsum->set_op("Einsum"); einsum->add_input("x"); einsum->add_input("y"); (*einsum->mutable_attr())["equation"] = StringAttrValue("ij,jk->ik"); (*einsum->mutable_attr())["T"] = TypeAttrValue(DT_FLOAT); (*einsum->mutable_attr())["N"] = IntAttrValue(2); return graph_def; } tf2xla::Config EinsumConfig() { tf2xla::Config config; tf2xla::Feed* x_feed = config.add_feed(); x_feed->mutable_id()->set_node_name("x"); *x_feed->mutable_shape() = TensorShape({2, 2}); tf2xla::Feed* y_feed = config.add_feed(); y_feed->mutable_id()->set_node_name("y"); *y_feed->mutable_shape() = TensorShape({2, 2}); config.add_fetch()->mutable_id()->set_node_name("einsum"); return config; } TEST(ConvertGraphDefToXla, EinsumIsConvertedToDotWithDefaultPrecision) { GraphDef graph_def = EinsumGraph(); tf2xla::Config config = EinsumConfig(); xla::LocalClient* client = xla::ClientLibrary::LocalClientOrDie(); xla::XlaComputation computation; TF_EXPECT_OK(ConvertGraphDefToXla(graph_def, config, client, &computation)); int num_dots = 0; const xla::HloModuleProto& module_proto = computation.proto(); for (const xla::HloComputationProto& computation_proto : module_proto.computations()) { for (const xla::HloInstructionProto& instruction_proto : computation_proto.instructions()) { if (instruction_proto.opcode() == "dot") { num_dots++; ASSERT_EQ(instruction_proto.precision_config().operand_precision_size(), 2); EXPECT_EQ(instruction_proto.precision_config().operand_precision(0), xla::PrecisionConfig::DEFAULT); EXPECT_EQ(instruction_proto.precision_config().operand_precision(1), xla::PrecisionConfig::DEFAULT); } } } EXPECT_EQ(num_dots, 1); } TEST_F(ConvertGraphDefToXlaWithTF32Disabled, EinsumIsConvertedToDotWithHighestPrecision) { GraphDef graph_def = EinsumGraph(); tf2xla::Config config = EinsumConfig(); xla::LocalClient* client = xla::ClientLibrary::LocalClientOrDie(); xla::XlaComputation computation; TF_EXPECT_OK(ConvertGraphDefToXla(graph_def, config, client, &computation)); int num_dots = 0; const xla::HloModuleProto& module_proto = computation.proto(); for (const xla::HloComputationProto& computation_proto : module_proto.computations()) { for (const xla::HloInstructionProto& instruction_proto : computation_proto.instructions()) { if (instruction_proto.opcode() == "dot") { num_dots++; ASSERT_EQ(instruction_proto.precision_config().operand_precision_size(), 2); EXPECT_EQ(instruction_proto.precision_config().operand_precision(0), xla::PrecisionConfig::HIGHEST); EXPECT_EQ(instruction_proto.precision_config().operand_precision(1), xla::PrecisionConfig::HIGHEST); } } } EXPECT_EQ(num_dots, 1); } GraphDef Conv2DGraph() { GraphDef graph_def; NodeDef* x = graph_def.add_node(); x->set_name("x"); x->set_op("Placeholder"); (*x->mutable_attr())["dtype"] = TypeAttrValue(DT_FLOAT); NodeDef* y = graph_def.add_node(); y->set_name("y"); y->set_op("Placeholder"); (*y->mutable_attr())["dtype"] = TypeAttrValue(DT_FLOAT); NodeDef* einsum = graph_def.add_node(); einsum->set_name("conv2d"); einsum->set_op("Conv2D"); einsum->add_input("x"); einsum->add_input("y"); (*einsum->mutable_attr())["T"] = TypeAttrValue(DT_FLOAT); (*einsum->mutable_attr())["padding"] = StringAttrValue("VALID"); (*einsum->mutable_attr())["strides"] = IntVectorAttrValue({1, 1, 1, 1}); return graph_def; } tf2xla::Config Conv2DConfig() { tf2xla::Config config; tf2xla::Feed* x_feed = config.add_feed(); x_feed->mutable_id()->set_node_name("x"); *x_feed->mutable_shape() = TensorShape({1, 1, 2, 2}); tf2xla::Feed* y_feed = config.add_feed(); y_feed->mutable_id()->set_node_name("y"); *y_feed->mutable_shape() = TensorShape({1, 1, 2, 2}); config.add_fetch()->mutable_id()->set_node_name("conv2d"); return config; } TEST(ConvertGraphDefToXla, Conv2DIsConvertedToConvolutionWithDefaultPrecision) { GraphDef graph_def = Conv2DGraph(); tf2xla::Config config = Conv2DConfig(); xla::LocalClient* client = xla::ClientLibrary::LocalClientOrDie(); xla::XlaComputation computation; TF_EXPECT_OK(ConvertGraphDefToXla(graph_def, config, client, &computation)); int num_convolutions = 0; const xla::HloModuleProto& module_proto = computation.proto(); for (const xla::HloComputationProto& computation_proto : module_proto.computations()) { for (const xla::HloInstructionProto& instruction_proto : computation_proto.instructions()) { if (instruction_proto.opcode() == "convolution") { num_convolutions++; ASSERT_EQ(instruction_proto.precision_config().operand_precision_size(), 2); EXPECT_EQ(instruction_proto.precision_config().operand_precision(0), xla::PrecisionConfig::DEFAULT); EXPECT_EQ(instruction_proto.precision_config().operand_precision(1), xla::PrecisionConfig::DEFAULT); } } } EXPECT_EQ(num_convolutions, 1); } TEST_F(ConvertGraphDefToXlaWithTF32Disabled, Conv2DIsConvertedToConvolutionWithHighestPrecision) { GraphDef graph_def = Conv2DGraph(); tf2xla::Config config = Conv2DConfig(); xla::LocalClient* client = xla::ClientLibrary::LocalClientOrDie(); xla::XlaComputation computation; TF_EXPECT_OK(ConvertGraphDefToXla(graph_def, config, client, &computation)); int num_convolutions = 0; const xla::HloModuleProto& module_proto = computation.proto(); for (const xla::HloComputationProto& computation_proto : module_proto.computations()) { for (const xla::HloInstructionProto& instruction_proto : computation_proto.instructions()) { if (instruction_proto.opcode() == "convolution") { num_convolutions++; ASSERT_EQ(instruction_proto.precision_config().operand_precision_size(), 2); EXPECT_EQ(instruction_proto.precision_config().operand_precision(0), xla::PrecisionConfig::HIGHEST); EXPECT_EQ(instruction_proto.precision_config().operand_precision(1), xla::PrecisionConfig::HIGHEST); } } } EXPECT_EQ(num_convolutions, 1); } TEST(ConvertGraphDefToXla, SumWithUnusedArgument) { GraphDef graph_def = SumGraph(); tf2xla::Config config = SumConfig(); NodeDef* unused = graph_def.add_node(); unused->set_name("unused"); unused->set_op("Placeholder"); (*unused->mutable_attr())["dtype"] = TypeAttrValue(DT_INT32); config.add_feed()->mutable_id()->set_node_name("unused"); xla::LocalClient* client = xla::ClientLibrary::LocalClientOrDie(); xla::XlaComputation computation; TF_EXPECT_OK(ConvertGraphDefToXla(graph_def, config, client, &computation)); auto x_literal = xla::LiteralUtil::CreateR0<int32>(10); auto y_literal = xla::LiteralUtil::CreateR0<int32>(32); auto x_global_or = client->TransferToServer(x_literal); auto y_global_or = client->TransferToServer(y_literal); auto unused_global_or = client->TransferToServer(y_literal); TF_EXPECT_OK(x_global_or.status()); TF_EXPECT_OK(y_global_or.status()); TF_EXPECT_OK(unused_global_or.status()); std::unique_ptr<xla::GlobalData> x_global = std::move(x_global_or.value()); std::unique_ptr<xla::GlobalData> y_global = std::move(y_global_or.value()); std::unique_ptr<xla::GlobalData> unused_global = std::move(unused_global_or.value()); auto result_or = client->ExecuteAndTransfer( computation, {x_global.get(), y_global.get(), unused_global.get()}); TF_EXPECT_OK(result_or.status()); xla::Literal result = std::move(result_or.value()); EXPECT_EQ("(\ns32[] 42\n)", result.ToString()); } } }
1,095
cpp
tensorflow/tensorflow
xla_op_registry
tensorflow/compiler/tf2xla/xla_op_registry.cc
tensorflow/compiler/tf2xla/xla_op_registry_test.cc
#ifndef TENSORFLOW_COMPILER_TF2XLA_XLA_OP_REGISTRY_H_ #define TENSORFLOW_COMPILER_TF2XLA_XLA_OP_REGISTRY_H_ #include <functional> #include <memory> #include <set> #include <unordered_map> #include <vector> #include "tensorflow/core/common_runtime/device_factory.h" #include "tensorflow/core/common_runtime/local_device.h" #include "tensorflow/core/framework/device_base.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/platform/mem.h" #include "tensorflow/core/platform/mutex.h" #include "tensorflow/core/platform/thread_annotations.h" #include "tensorflow/core/public/session_options.h" namespace tensorflow { extern const char* const DEVICE_CPU_XLA_JIT; extern const char* const DEVICE_GPU_XLA_JIT; extern const char* const DEVICE_XLA_CPU; extern const char* const DEVICE_XLA_GPU; constexpr std::array<DataType, 4> kFloatTypes = { {DT_HALF, DT_FLOAT, DT_DOUBLE, DT_BFLOAT16}}; constexpr std::array<DataType, 6> kFloatAndComplexTypes = { {DT_HALF, DT_FLOAT, DT_DOUBLE, DT_BFLOAT16, DT_COMPLEX64, DT_COMPLEX128}}; constexpr std::array<DataType, 14> kNumericTypes = { {DT_UINT8, DT_UINT16, DT_UINT32, DT_UINT64, DT_INT8, DT_INT16, DT_INT32, DT_INT64, DT_HALF, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128, DT_BFLOAT16}}; constexpr std::array<DataType, 22> kCpuAllTypes = { {DT_UINT8, DT_QUINT8, DT_UINT16, DT_UINT32, DT_UINT64, DT_INT8, DT_QINT8, DT_INT16, DT_INT32, DT_QINT32, DT_INT64, DT_HALF, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128, DT_BOOL, DT_BFLOAT16, DT_FLOAT8_E5M2, DT_FLOAT8_E4M3FN, DT_INT4, DT_UINT4}}; constexpr std::array<DataType, 22> kGpuAllTypes = { {DT_UINT8, DT_QUINT8, DT_UINT16, DT_UINT32, DT_UINT64, DT_INT8, DT_QINT8, DT_INT16, DT_INT32, DT_QINT32, DT_INT64, DT_HALF, DT_FLOAT, DT_DOUBLE, DT_COMPLEX64, DT_COMPLEX128, DT_BOOL, DT_BFLOAT16, DT_FLOAT8_E5M2, DT_FLOAT8_E4M3FN, DT_INT4, DT_UINT4}}; class XlaOpRegistry { public: typedef OpKernel* (*Factory)(OpKernelConstruction*); enum class AutoclusteringPolicy { kIfExplicitlyRequested, kIfEnabledGlobally, kAlways, }; struct DeviceRegistration { string compilation_device_name; AutoclusteringPolicy autoclustering_policy; bool cluster_resource_variable_ops_unsafely = false; bool cluster_stack_ops = false; bool cluster_tensor_array_ops = false; bool cluster_stateful_rng_ops = false; bool cluster_control_trigger = false; bool elide_assert_and_checknumerics = false; bool cluster_variant_ops = false; bool cluster_slow_ops = false; bool cluster_inaccurate_ops = false; }; typedef bool (*BackendOpFilter)(KernelDef* kdef); static void RegisterBackend(const string& compilation_device_name, absl::Span<const DataType> supported_types, BackendOpFilter op_filter); static std::vector<string> BackendNames(); static bool IsBackendRegistered(const string& name); static void RegisterCompilationDevice(const string& device_name, const DeviceRegistration& registration); static bool IsCompilationDevice(const string& device_name); static bool GetCompilationDevice(const string& device_name, const DeviceRegistration** registration); static void RegisterCompilationKernels(); static std::vector<const KernelDef*> DeviceKernels( const string& compilation_device_name, bool include_compilation_only_kernels); static std::vector<string> GetAllRegisteredOps(); static Status CompileTimeConstantInputs(const NodeDef& node_def, const OpDef& op_def, std::vector<int>* result) { return CompileTimeConstantInputs(node_def, nullptr, &op_def, result); } static absl::StatusOr<std::vector<int>> CompileTimeConstantInputs( const NodeDef& node_def, const OpDef& op_def) { std::vector<int> out; TF_RETURN_IF_ERROR(CompileTimeConstantInputs(node_def, op_def, &out)); return out; } static Status CompileTimeConstantInputs(const OpKernel& op_kernel, std::vector<int>* result) { return CompileTimeConstantInputs(op_kernel.def(), &op_kernel, nullptr, result); } static const std::unordered_set<std::string>* CompileTimeConstantInputArgNames(const string& op); static bool IsMetadataOp(const string& op); private: friend class XlaBackendRegistrar; friend class XlaOpRegistrar; friend class XlaOpRegistrationBuilder; static XlaOpRegistry& Instance(); XlaOpRegistry(); ~XlaOpRegistry(); mutex mutex_; struct Backend { std::set<DataType> supported_types; BackendOpFilter op_filter; std::vector<std::unique_ptr<KernelDef>> kernel_defs; }; std::unordered_map<string, Backend> backends_ TF_GUARDED_BY(mutex_); std::unordered_map<string, DeviceRegistration> compilation_devices_ TF_GUARDED_BY(mutex_); struct OpRegistration { string name; bool compilation_only = false; bool allow_resource_types = false; bool allow_variant_types = false; bool allow_string_type = false; std::unordered_map<string, std::set<DataType>> type_constraints; bool has_device_allowlist = false; std::unordered_set<string> device_allowlist; std::unordered_set<string> compile_time_constant_inputs; bool is_metadata_op = false; std::string label; Factory factory; }; static bool IsCompatible(const OpRegistration& x, const OpRegistration& y); static Status CompileTimeConstantInputs(const NodeDef& node_def, const OpKernel* op_kernel, const OpDef* op_def, std::vector<int>* result); std::unordered_map<string, std::vector<std::unique_ptr<OpRegistration>>> ops_ TF_GUARDED_BY(mutex_); bool jit_kernels_registered_ = false; std::vector<std::unique_ptr<kernel_factory::OpKernelRegistrar>> kernel_registrars_ TF_GUARDED_BY(mutex_); }; #define REGISTER_XLA_OP(NAME, OP) \ REGISTER_XLA_OP_UNIQ_HELPER(__COUNTER__, NAME, OP) #define REGISTER_XLA_CONV_OP(BUILDER, OP) \ REGISTER_XLA_OP(BUILDER.TypeConstraint("T", GetXlaConvTypesForNonGpu()), OP) \ REGISTER_XLA_OP(BUILDER.TypeConstraint("T", GetXlaConvTypesForGpu()) \ .Device(DEVICE_GPU_XLA_JIT), \ OP) class XlaOpRegistrationBuilder { public: static XlaOpRegistrationBuilder Name(absl::string_view name); XlaOpRegistrationBuilder& Device(absl::string_view devices); XlaOpRegistrationBuilder& Device(absl::Span<const absl::string_view> devices); XlaOpRegistrationBuilder& TypeConstraint(absl::string_view attr_name, DataType allowed); XlaOpRegistrationBuilder& TypeConstraint(absl::string_view attr_name, absl::Span<const DataType> allowed); XlaOpRegistrationBuilder& CompilationOnly(); XlaOpRegistrationBuilder& AllowResourceTypes(); XlaOpRegistrationBuilder& AllowVariantTypes(); XlaOpRegistrationBuilder& AllowStringType(); XlaOpRegistrationBuilder& CompileTimeConstantInput( absl::string_view input_name); XlaOpRegistrationBuilder& IsMetadataOp(); XlaOpRegistrationBuilder& Label(std::string label); std::unique_ptr<XlaOpRegistry::OpRegistration> Build( XlaOpRegistry::Factory factory); private: XlaOpRegistrationBuilder(absl::string_view name); std::unique_ptr<XlaOpRegistry::OpRegistration> registration_; }; #define REGISTER_XLA_BACKEND(NAME, ...) \ REGISTER_XLA_BACKEND_UNIQ_HELPER(__COUNTER__, NAME, __VA_ARGS__) class XlaOpRegistrar { public: XlaOpRegistrar(std::unique_ptr<XlaOpRegistry::OpRegistration> registration); }; #define REGISTER_XLA_OP_UNIQ_HELPER(COUNTER, BUILDER, OP) \ REGISTER_XLA_OP_UNIQ(COUNTER, BUILDER, OP) #define REGISTER_XLA_OP_UNIQ(CTR, BUILDER, OP) \ static ::tensorflow::XlaOpRegistrar xla_op_registrar__body__##CTR##__object( \ ::tensorflow::XlaOpRegistrationBuilder::BUILDER.Build( \ [](::tensorflow::OpKernelConstruction* context) \ -> ::tensorflow::OpKernel* { return new OP(context); })); class XlaBackendRegistrar { public: XlaBackendRegistrar(absl::string_view name, absl::Span<const DataType> types, XlaOpRegistry::BackendOpFilter op_filter = nullptr); }; #define REGISTER_XLA_BACKEND_UNIQ_HELPER(COUNTER, NAME, ...) \ REGISTER_XLA_BACKEND_UNIQ(COUNTER, NAME, __VA_ARGS__) #define REGISTER_XLA_BACKEND_UNIQ(CTR, NAME, ...) \ static ::tensorflow::XlaBackendRegistrar \ xla_backend_registrar__body__##CTR##__object(NAME, __VA_ARGS__); } #endif #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include <functional> #include <memory> #include <string> #include "tensorflow/compiler/jit/flags.h" #include "tensorflow/compiler/jit/xla_cluster_util.h" #include "tensorflow/compiler/tf2xla/type_util.h" #include "tensorflow/compiler/tf2xla/xla_context.h" #include "xla/client/client_library.h" #include "tensorflow/core/common_runtime/device_factory.h" #include "tensorflow/core/common_runtime/local_device.h" #include "tensorflow/core/common_runtime/next_pluggable_device/next_pluggable_device_factory.h" #include "tensorflow/core/framework/device_base.h" #include "tensorflow/core/framework/kernel_def.pb.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/op_def_util.h" #include "tensorflow/core/platform/mem.h" #include "tensorflow/core/platform/stream_executor_no_cuda.h" #include "tensorflow/core/tfrt/common/pjrt_util.h" namespace tensorflow { const char* const DEVICE_CPU_XLA_JIT = "XLA_CPU_JIT"; const char* const DEVICE_GPU_XLA_JIT = "XLA_GPU_JIT"; const char* const DEVICE_XLA_CPU = "XLA_CPU"; const char* const DEVICE_XLA_GPU = "XLA_GPU"; static Status LaunchOpHasKernelForDevice(const DeviceType& device_type) { const OpDef* op_def; TF_RETURN_IF_ERROR(OpRegistry::Global()->LookUpOpDef("XlaLaunch", &op_def)); NodeDef node_def; node_def.set_name("_XlaLaunch-op"); node_def.set_op("XlaLaunch"); string kernel_class_name; TF_RETURN_IF_ERROR(FindKernelDef(device_type, node_def, nullptr, &kernel_class_name)); VLOG(1) << "LaunchOpHasKernelForDevice" << " kernel_class_name: " << kernel_class_name; return absl::OkStatus(); } XlaOpRegistry::XlaOpRegistry() = default; XlaOpRegistry::~XlaOpRegistry() = default; bool XlaOpRegistry::IsCompatible(const OpRegistration& x, const OpRegistration& y) { if (x.name != y.name) return true; if (x.label != y.label) return true; if (x.compilation_only != y.compilation_only) { LOG(WARNING) << "Registrations of " << x.name << " have incompatible compilation_only settings."; return false; } if (x.allow_resource_types != y.allow_resource_types) { LOG(WARNING) << "Registrations of " << x.name << " have incompatible allow_resource_types settings."; return false; } if (x.allow_variant_types != y.allow_variant_types) { LOG(WARNING) << "Registrations of " << x.name << " have incompatible allow_variant_types settings."; return false; } if (x.allow_string_type != y.allow_string_type) { LOG(WARNING) << "Registrations of " << x.name << " have incompatible allow_string_type settings."; return false; } if (!x.has_device_allowlist && !y.has_device_allowlist) { LOG(WARNING) << "Duplicate registrations of " << x.name << " with no device allowlists."; return false; } if (x.has_device_allowlist && y.has_device_allowlist) { for (const auto& device : x.device_allowlist) { if (y.device_allowlist.count(device) != 0) { LOG(WARNING) << "Multiple registrations of " << x.name << " on device " << device; return false; } } } if (x.compile_time_constant_inputs != y.compile_time_constant_inputs) { LOG(WARNING) << "Registrations of " << x.name << " have incompatible compile time constant inputs."; return false; } if (x.is_metadata_op != y.is_metadata_op) { LOG(WARNING) << "Registrations of " << x.name << " have incompatible values for is_metadata_op."; return false; } return true; } void XlaOpRegistry::RegisterCompilationDevice( const string& device_name, const DeviceRegistration& registration) { XlaOpRegistry& registry = Instance(); mutex_lock lock(registry.mutex_); auto result = registry.compilation_devices_.emplace(device_name, registration); CHECK(result.second || result.first->second.compilation_device_name == registration.compilation_device_name); } void XlaOpRegistry::RegisterBackend( const string& compilation_device_name, absl::Span<const DataType> supported_types, BackendOpFilter op_filter) { XlaOpRegistry& registry = Instance(); mutex_lock lock(registry.mutex_); auto result = registry.backends_.emplace(compilation_device_name, Backend()); CHECK(result.second) << "Duplicate XLA backend registration " << compilation_device_name; result.first->second.supported_types.insert(supported_types.begin(), supported_types.end()); result.first->second.op_filter = op_filter; } bool XlaOpRegistry::IsCompilationDevice( const string& device_name) { XlaOpRegistry& registry = Instance(); mutex_lock lock(registry.mutex_); return registry.backends_.find(device_name) != registry.backends_.end(); } bool XlaOpRegistry::GetCompilationDevice( const string& device_name, const DeviceRegistration** registration) { XlaOpRegistry& registry = Instance(); static void* registration_init = [&registry]() { MarkForCompilationPassFlags* flags = GetMarkForCompilationPassFlags(); bool cpu_global_jit = flags->tf_xla_cpu_global_jit; VLOG(2) << "tf_xla_cpu_global_jit = " << cpu_global_jit; mutex_lock lock(registry.mutex_); if (LaunchOpHasKernelForDevice(DeviceType(DEVICE_CPU)).ok()) { DeviceRegistration& registration = registry.compilation_devices_[DEVICE_CPU]; registration.compilation_device_name = DEVICE_CPU_XLA_JIT; registration.autoclustering_policy = cpu_global_jit ? XlaOpRegistry::AutoclusteringPolicy::kIfEnabledGlobally : XlaOpRegistry::AutoclusteringPolicy::kIfExplicitlyRequested; } if (LaunchOpHasKernelForDevice(DeviceType(DEVICE_GPU)).ok()) { DeviceRegistration& registration = registry.compilation_devices_[DEVICE_GPU]; registration.compilation_device_name = DEVICE_GPU_XLA_JIT; registration.autoclustering_policy = XlaOpRegistry::AutoclusteringPolicy::kIfEnabledGlobally; } return nullptr; }(); (void)registration_init; if (DeviceFactory::IsPluggableDevice(device_name) && GetPjRtClient(DeviceType(device_name)).ok()) { mutex_lock lock(registry.mutex_); NextPluggableDeviceFactory* device_factory = static_cast<NextPluggableDeviceFactory*>( DeviceFactory::GetFactory(device_name)); if (device_factory != nullptr && DeviceType(device_factory->compilation_device_name()) == DeviceType(DEVICE_GPU_XLA_JIT) && registry.compilation_devices_.find(device_name) == registry.compilation_devices_.end()) { DeviceRegistration& registration = registry.compilation_devices_[device_name]; registration.compilation_device_name = DEVICE_GPU_XLA_JIT; registration.autoclustering_policy = XlaOpRegistry::AutoclusteringPolicy::kIfEnabledGlobally; } } mutex_lock lock(registry.mutex_); auto it = registry.compilation_devices_.find(device_name); if (it == registry.compilation_devices_.end()) return false; *registration = &it->second; return true; } void XlaOpRegistry::RegisterCompilationKernels() { XlaOpRegistry& registry = Instance(); mutex_lock lock(registry.mutex_); if (registry.jit_kernels_registered_) return; registry.jit_kernels_registered_ = true; OpRegistryInterface* op_registry = OpRegistry::Global(); for (auto& ops : registry.ops_) { const string& op_name = ops.first; std::vector<std::unique_ptr<OpRegistration>>& op_registrations = ops.second; std::partition(op_registrations.begin(), op_registrations.end(), [](const std::unique_ptr<OpRegistration>& op_reg) { return op_reg->has_device_allowlist; }); std::unordered_set<string> allowlisted_backend; for (auto& op_registration : op_registrations) { if (op_registration->has_device_allowlist) { allowlisted_backend.insert(op_registration->device_allowlist.begin(), op_registration->device_allowlist.end()); } } for (auto& op_registration : op_registrations) { const OpDef* op_def; Status lookup_status = op_registry->LookUpOpDef(op_name, &op_def); if (!lookup_status.ok()) { LOG(ERROR) << lookup_status.message(); XLA_LOG_LINES( ERROR, "Ops registered: \n" + dynamic_cast<OpRegistry*>(op_registry)->DebugString(true)); } TF_CHECK_OK(lookup_status); std::unordered_set<string> type_attrs; for (const OpDef::AttrDef& attr_def : op_def->attr()) { if (attr_def.type() == "type" || attr_def.type() == "list(type)") { type_attrs.insert(attr_def.name()); } } for (const auto& constraint : op_registration->type_constraints) { if (type_attrs.find(constraint.first) == type_attrs.end()) { LOG(FATAL) << "Unknown type attribute " << constraint.first << " in XLA op registration for " << op_name; } } for (auto& backend : registry.backends_) { if (op_registration->has_device_allowlist && op_registration->device_allowlist.find(backend.first) == op_registration->device_allowlist.end()) { continue; } if (!op_registration->has_device_allowlist && allowlisted_backend.find(backend.first) != allowlisted_backend.end()) { continue; } std::unique_ptr<KernelDef> kdef(new KernelDef); kdef->set_op(op_registration->name); kdef->set_device_type(backend.first); kdef->set_label(op_registration->label); bool unsatisfiable_type_constraint = false; for (const string& type_attr : type_attrs) { KernelDef::AttrConstraint* attr_constraint = kdef->add_constraint(); attr_constraint->set_name(type_attr); auto* allowed_values = attr_constraint->mutable_allowed_values()->mutable_list(); const OpDef::AttrDef& op_def_attr = *FindAttr(type_attr, *op_def); const auto* op_def_allowed_types = op_def_attr.has_allowed_values() ? &op_def_attr.allowed_values().list().type() : nullptr; auto constraint_it = op_registration->type_constraints.find(type_attr); const std::set<DataType>* type_constraints = constraint_it != op_registration->type_constraints.end() ? &constraint_it->second : nullptr; for (DataType dtype : backend.second.supported_types) { if (op_def_allowed_types != nullptr && std::find(op_def_allowed_types->begin(), op_def_allowed_types->end(), dtype) == op_def_allowed_types->end()) { continue; } if (type_constraints != nullptr && type_constraints->find(dtype) == type_constraints->end()) { continue; } allowed_values->add_type(dtype); } if (op_registration->allow_resource_types) { allowed_values->add_type(DT_RESOURCE); } if (op_registration->allow_variant_types) { allowed_values->add_type(DT_VARIANT); } if (op_registration->allow_string_type) { allowed_values->add_type(DT_STRING); } if (allowed_values->type().empty()) { unsatisfiable_type_constraint = true; break; } } if (unsatisfiable_type_constraint) continue; if (backend.second.op_filter != null
#include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "tensorflow/compiler/tf2xla/xla_op_kernel.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace { class DummyCPUOp : public XlaOpKernel { public: explicit DummyCPUOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {} void Compile(XlaOpKernelContext* ctx) override { ctx->SetOutput(0, ctx->Input(0)); } }; class DummyGenericOp : public XlaOpKernel { public: explicit DummyGenericOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {} void Compile(XlaOpKernelContext* ctx) override { ctx->SetOutput(0, ctx->Input(0)); } }; REGISTER_OP("DummyDuplicateOp") .Attr("T: {float, int32}") .Input("input: int32") .Output("output: int32") .Doc(R"doc( A dummy Op. input: dummy input. output: dummy output. )doc"); REGISTER_XLA_OP(Name("DummyDuplicateOp") .Device(DEVICE_CPU_XLA_JIT) .TypeConstraint("T", DT_INT32), DummyCPUOp); REGISTER_XLA_OP(Name("DummyDuplicateOp").TypeConstraint("T", DT_FLOAT), DummyGenericOp); TEST(XlaOpRegistryTest, XlaOpRegistrationWithOverride) { XlaOpRegistry::RegisterCompilationKernels(); auto registered_kernels = GetAllRegisteredKernels().kernel(); for (const auto& kernels : registered_kernels) { if (kernels.op() == "DummyDuplicateOp") { EXPECT_EQ(kernels.constraint_size(), 1); EXPECT_EQ(kernels.constraint(0).name(), "T"); if (kernels.device_type() == "XLA_CPU_JIT") { EXPECT_EQ(kernels.constraint(0).allowed_values().list().type(0), DT_INT32); } else { EXPECT_EQ(kernels.constraint(0).allowed_values().list().type(0), DT_FLOAT); } } } } TEST(XlaOpReigstryTest, XlaOpRegistrationDeviceKernels) { XlaOpRegistry::RegisterCompilationKernels(); auto registered_devices = XlaOpRegistry::BackendNames(); for (const auto& resgistered_device : registered_devices) { auto kernels = XlaOpRegistry::DeviceKernels(resgistered_device, true); for (const auto& kernel : kernels) { if (kernel->op() == "DummyDuplicateOp") { if (resgistered_device == DEVICE_CPU_XLA_JIT) { EXPECT_EQ(kernel->constraint(0).allowed_values().list().type(0), DT_INT32); } else { EXPECT_EQ(kernel->constraint(0).allowed_values().list().type(0), DT_FLOAT); } } } } } class DummyInfeasibleTypeConstraintOp : public XlaOpKernel { public: explicit DummyInfeasibleTypeConstraintOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {} void Compile(XlaOpKernelContext* ctx) override { LOG(FATAL) << "unreachable"; } }; REGISTER_OP("DummyInfeasibleTypeConstraintOp") .Attr("T: {float, string}") .Input("input: T") .Output("output: T") .Doc(R"doc( A dummy Op. input: dummy input. output: dummy output. )doc"); REGISTER_XLA_OP( Name("DummyInfeasibleTypeConstraintOp").TypeConstraint("T", DT_STRING), DummyInfeasibleTypeConstraintOp); TEST(XlaOpRegistryTest, OpWithInfeasibleTypeConstraintIsNotRegistered) { XlaOpRegistry::RegisterCompilationKernels(); auto registered_kernels = GetAllRegisteredKernels().kernel(); for (const auto& kernels : registered_kernels) { EXPECT_NE(kernels.op(), "DummyInfeasibleTypeConstraintOp"); } } } }
1,096
cpp
tensorflow/tensorflow
xla_compiler
third_party/xla/xla/python/pjrt_ifrt/xla_compiler.cc
tensorflow/compiler/tf2xla/xla_compiler_test.cc
#ifndef XLA_PYTHON_PJRT_IFRT_XLA_COMPILER_H_ #define XLA_PYTHON_PJRT_IFRT_XLA_COMPILER_H_ #include <memory> #include <optional> #include <utility> #include <vector> #include "llvm/Support/ExtensibleRTTI.h" #include "mlir/IR/BuiltinOps.h" #include "mlir/IR/MLIRContext.h" #include "mlir/IR/OwningOpRef.h" #include "xla/pjrt/pjrt_executable.h" #include "xla/python/ifrt/compiler.h" #include "xla/python/ifrt/host_callback.h" namespace xla { namespace ifrt { struct XlaCompileOptions : llvm::RTTIExtends<XlaCompileOptions, CompileOptions> { XlaCompileOptions() = default; explicit XlaCompileOptions(xla::CompileOptions compile_options, std::vector<tsl::RCReference<LoadedHostCallback>> loaded_host_callbacks = {}) : compile_options(std::move(compile_options)), loaded_host_callbacks(std::move(loaded_host_callbacks)) {} xla::CompileOptions compile_options; std::vector<tsl::RCReference<LoadedHostCallback>> loaded_host_callbacks; ~XlaCompileOptions() override = default; static char ID; }; struct XlaDeserializeExecutableOptions : llvm::RTTIExtends<XlaDeserializeExecutableOptions, DeserializeExecutableOptions> { XlaDeserializeExecutableOptions() = default; explicit XlaDeserializeExecutableOptions( std::optional<xla::CompileOptions> compile_options, std::vector<tsl::RCReference<LoadedHostCallback>> loaded_host_callbacks = {}) : compile_options(std::move(compile_options)), loaded_host_callbacks(std::move(loaded_host_callbacks)) {} std::optional<xla::CompileOptions> compile_options; std::vector<tsl::RCReference<LoadedHostCallback>> loaded_host_callbacks; ~XlaDeserializeExecutableOptions() override = default; static char ID; }; absl::StatusOr<std::unique_ptr<XlaCompileOptions>> GetXlaCompileOptions( std::unique_ptr<CompileOptions> options); absl::StatusOr<std::unique_ptr<XlaDeserializeExecutableOptions>> GetXlaDeserializeExecutableOptions( std::unique_ptr<DeserializeExecutableOptions> options); } } #endif #include "xla/python/pjrt_ifrt/xla_compiler.h" #include <memory> #include <string> #include "absl/status/status.h" #include "absl/status/statusor.h" #include "absl/strings/string_view.h" #include "llvm/Support/Casting.h" #include "llvm/Support/ExtensibleRTTI.h" #include "xla/pjrt/pjrt_executable.h" #include "xla/python/ifrt/serdes.h" #include "xla/python/pjrt_ifrt/xla_compiler.pb.h" #include "tsl/platform/statusor.h" namespace xla { namespace ifrt { namespace { class XlaCompileOptionsSerDes : public llvm::RTTIExtends<XlaCompileOptionsSerDes, SerDes> { public: absl::string_view type_name() const override { return "xla::ifrt::XlaCompileOptions"; } absl::StatusOr<std::string> Serialize(Serializable& serializable) override { const auto& options = llvm::cast<XlaCompileOptions>(serializable); XlaCompileOptionsProto proto; TF_ASSIGN_OR_RETURN(*proto.mutable_compile_options(), options.compile_options.ToProto()); if (!options.loaded_host_callbacks.empty()) { return absl::UnimplementedError( "xla::ifrt::XlaCompileOptions with loaded_host_callbacks is not " "serializable"); } return proto.SerializeAsString(); } absl::StatusOr<std::unique_ptr<Serializable>> Deserialize( const std::string& serialized, std::unique_ptr<DeserializeOptions>) override { XlaCompileOptionsProto proto; if (!proto.ParseFromString(serialized)) { return absl::DataLossError( "Unable to parse serialized XlaCompileOptionsProto"); } auto options = std::make_unique<XlaCompileOptions>(); TF_ASSIGN_OR_RETURN( options->compile_options, xla::CompileOptions::FromProto(proto.compile_options())); return options; } static char ID; }; char XlaCompileOptionsSerDes::ID = 0; bool register_xla_compile_options_serdes = ([]{ RegisterSerDes<XlaCompileOptions>( std::make_unique<XlaCompileOptionsSerDes>()); }(), true); } char XlaCompileOptions::ID = 0; char XlaDeserializeExecutableOptions::ID = 0; absl::StatusOr<std::unique_ptr<XlaCompileOptions>> GetXlaCompileOptions( std::unique_ptr<CompileOptions> options) { if (!llvm::isa<XlaCompileOptions>(options.get())) { return xla::InvalidArgument("options must be XlaCompileOptions"); } return std::unique_ptr<XlaCompileOptions>( static_cast<XlaCompileOptions*>(options.release())); } absl::StatusOr<std::unique_ptr<XlaDeserializeExecutableOptions>> GetXlaDeserializeExecutableOptions( std::unique_ptr<DeserializeExecutableOptions> options) { if (!llvm::isa<XlaDeserializeExecutableOptions>(options.get())) { return xla::InvalidArgument( "options must be XlaDeserializeExecutableOptions"); } return std::unique_ptr<XlaDeserializeExecutableOptions>( static_cast<XlaDeserializeExecutableOptions*>(options.release())); } } }
#include "tensorflow/compiler/tf2xla/xla_compiler.h" #include "absl/strings/match.h" #include "tensorflow/cc/framework/ops.h" #include "tensorflow/cc/ops/const_op.h" #include "tensorflow/cc/ops/data_flow_ops.h" #include "tensorflow/cc/ops/function_ops.h" #include "tensorflow/cc/ops/functional_ops.h" #include "tensorflow/cc/ops/list_ops.h" #include "tensorflow/cc/ops/math_ops.h" #include "tensorflow/cc/ops/resource_variable_ops.h" #include "tensorflow/cc/ops/standard_ops.h" #include "tensorflow/compiler/tf2xla/literal_util.h" #include "tensorflow/compiler/tf2xla/shape_util.h" #include "tensorflow/compiler/tf2xla/side_effect_util.h" #include "tensorflow/compiler/tf2xla/type_util.h" #include "tensorflow/compiler/tf2xla/xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "xla/client/client_library.h" #include "xla/client/local_client.h" #include "xla/client/xla_builder.h" #include "xla/literal.h" #include "xla/service/hlo.pb.h" #include "xla/service/hlo_proto_util.h" #include "xla/shape_util.h" #include "xla/status_macros.h" #include "xla/tests/literal_test_util.h" #include "tensorflow/core/common_runtime/function.h" #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/framework/common_shape_fns.h" #include "tensorflow/core/framework/fake_input.h" #include "tensorflow/core/framework/function.h" #include "tensorflow/core/framework/function.pb.h" #include "tensorflow/core/framework/function_testlib.h" #include "tensorflow/core/framework/graph_to_functiondef.h" #include "tensorflow/core/framework/node_def_builder.h" #include "tensorflow/core/framework/node_def_util.h" #include "tensorflow/core/framework/resource_mgr.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/graph/algorithm.h" #include "tensorflow/core/graph/graph.h" #include "tensorflow/core/kernels/ops_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" #include "tensorflow/core/public/version.h" #include "tsl/platform/statusor.h" namespace tensorflow { class XlaCompilerTest : public ::testing::Test { protected: void SetUp() override { client_ = xla::ClientLibrary::LocalClientOrDie(); XlaOpRegistry::RegisterCompilationKernels(); FunctionDefLibrary flib; flib_def_.reset(new FunctionLibraryDefinition(OpRegistry::Global(), flib)); } XlaCompiler::Options DefaultOptions() { XlaCompiler::Options options; options.device_type = DeviceType(DEVICE_CPU_XLA_JIT); options.client = client_; options.flib_def = flib_def_.get(); return options; } FunctionLibraryDefinition* LocalFlibDef(XlaCompiler* compiler) { return compiler->local_flib_def_.get(); } xla::Client* client_; std::unique_ptr<FunctionLibraryDefinition> flib_def_; }; namespace { class DummyResourceForTest : public ResourceBase { public: string DebugString() const override { return "dummy"; } void Increment() { ++value_; } int Get() { return value_; } private: int value_ = 0; }; class DummyReadResourceOp : public XlaOpKernel { public: explicit DummyReadResourceOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {} void Compile(XlaOpKernelContext* ctx) override { ResourceMgr* rm = ctx->op_kernel_context()->resource_manager(); OP_REQUIRES(ctx, rm, errors::Internal("No resource manager.")); DummyResourceForTest* dummy; OP_REQUIRES_OK(ctx, rm->Lookup<DummyResourceForTest>( rm->default_container(), "dummy", &dummy)); dummy->Increment(); dummy->Unref(); ctx->SetOutput(0, ctx->Input(0)); ctx->SetOutput(1, ctx->Input(0)); } }; class DummyReadResourceCC { public: DummyReadResourceCC(const Scope& scope, const Input& value) { if (!scope.ok()) return; auto _value = ops::AsNodeOut(scope, value); if (!scope.ok()) return; Node* ret; const auto unique_name = scope.GetUniqueNameForOp("DummyReadResource"); auto builder = NodeBuilder(unique_name, "DummyReadResource").Input(_value); scope.UpdateBuilder(&builder); scope.UpdateStatus(builder.Finalize(scope.graph(), &ret)); if (!scope.ok()) return; scope.UpdateStatus(scope.DoShapeInference(ret)); if (!scope.ok()) return; this->output1_ = Output(ret, 0); this->output2_ = Output(ret, 1); } Output output1_; Output output2_; }; REGISTER_OP("DummyReadResource") .Input("input: int32") .Output("output1: int32") .Output("output2: int32") .SetShapeFn(shape_inference::UnknownShape) .Doc(R"doc( A dummy Op. input: dummy input. output1: dummy output. output2: dummy output. )doc"); REGISTER_XLA_OP(Name("DummyReadResource"), DummyReadResourceOp); class DummyDuplicateOp : public XlaOpKernel { public: explicit DummyDuplicateOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {} void Compile(XlaOpKernelContext* ctx) override { ctx->SetOutput(0, ctx->Input(0)); } }; REGISTER_OP("DummyDuplicateOp") .Input("input: int32") .Output("output: int32") .Doc(R"doc( A dummy Op. input: dummy input. output: dummy output. )doc"); REGISTER_XLA_OP(Name("DummyDuplicateOp").Device(DEVICE_CPU_XLA_JIT), DummyDuplicateOp); REGISTER_XLA_OP(Name("DummyDuplicateOp").Device(DEVICE_GPU_XLA_JIT), DummyDuplicateOp); TEST_F(XlaCompilerTest, EmptyReturnValues) { XlaCompiler compiler(DefaultOptions()); std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); XlaCompiler::CompilationResult result; TF_ASSERT_OK(compiler.CompileGraph(XlaCompiler::CompileOptions(), "add", std::move(graph), {}, &result)); TF_ASSERT_OK(client_->Execute(*result.computation, {}).status()); } TEST_F(XlaCompilerTest, Simple) { Scope scope = Scope::NewRootScope().ExitOnError(); auto a = ops::_Arg(scope.WithOpName("A"), DT_INT32, 0); auto b = ops::_Arg(scope.WithOpName("B"), DT_INT32, 1); auto c = ops::Add(scope.WithOpName("C"), a, b); auto d = ops::_Retval(scope.WithOpName("D"), c, 0); std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); TF_ASSERT_OK(scope.ToGraph(graph.get())); std::vector<XlaCompiler::Argument> args(2); args[0].kind = XlaCompiler::Argument::kParameter; args[0].type = DT_INT32; args[0].shape = TensorShape({2}); args[1].kind = XlaCompiler::Argument::kParameter; args[1].type = DT_INT32; args[1].shape = TensorShape({2}); XlaCompiler compiler(DefaultOptions()); XlaCompiler::CompilationResult result; TF_ASSERT_OK(compiler.CompileGraph(XlaCompiler::CompileOptions(), "add", std::move(graph), args, &result)); xla::Literal param0_literal = xla::LiteralUtil::CreateR1<int32>({7, 42}); xla::Literal param1_literal = xla::LiteralUtil::CreateR1<int32>({-3, 101}); std::unique_ptr<xla::GlobalData> param0_data = client_->TransferToServer(param0_literal).value(); std::unique_ptr<xla::GlobalData> param1_data = client_->TransferToServer(param1_literal).value(); std::unique_ptr<xla::GlobalData> actual = client_ ->Execute(*result.computation, {param0_data.get(), param1_data.get()}) .value(); xla::Literal actual_literal = client_->Transfer(*actual).value(); xla::Literal expected0 = xla::LiteralUtil::CreateR1<int32>({4, 143}); xla::Literal expected_literal = xla::LiteralUtil::MakeTuple({&expected0}); EXPECT_TRUE(xla::LiteralTestUtil::Equal(expected_literal, actual_literal)); } absl::StatusOr<std::unique_ptr<xla::HloModule>> LoadModuleFromHloProto( const xla::HloModuleProto& module_proto) { TF_ASSIGN_OR_RETURN(auto module_config, xla::HloModule::CreateModuleConfigFromProto( module_proto, xla::GetDebugOptionsFromFlags())); return xla::CreateModuleFromProto(module_proto, module_config); } TEST_F(XlaCompilerTest, SimpleDynamicShapeParameter) { Scope scope = Scope::NewRootScope().ExitOnError(); auto a = ops::_Arg(scope.WithOpName("A"), DT_INT32, 0); auto b = ops::_Arg(scope.WithOpName("B"), DT_INT32, 1); auto c = ops::Add(scope.WithOpName("C"), a, b); auto d = ops::_Retval(scope.WithOpName("D"), c, 0); std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); TF_ASSERT_OK(scope.ToGraph(graph.get())); std::vector<XlaCompiler::Argument> args(2); args[0].kind = XlaCompiler::Argument::kParameter; args[0].type = DT_INT32; args[0].shape = xla::ShapeUtil::MakeShape(xla::S32, {2}, {true}); args[1].kind = XlaCompiler::Argument::kParameter; args[1].type = DT_INT32; args[1].shape = TensorShape({2}); XlaCompiler compiler(DefaultOptions()); XlaCompiler::CompilationResult result; TF_ASSERT_OK(compiler.CompileGraph(XlaCompiler::CompileOptions(), "add", std::move(graph), args, &result)); auto hlo = result.computation->proto(); TF_ASSERT_OK_AND_ASSIGN(auto module, LoadModuleFromHloProto(hlo)); EXPECT_EQ(module->computation_count(), 1); EXPECT_TRUE(module->mutable_computation(0) ->parameter_instruction(0) ->shape() .is_dynamic()); } TEST_F(XlaCompilerTest, OutOfOrderGraph) { Scope scope = Scope::NewRootScope().ExitOnError(); auto a = ops::_Arg(scope.WithOpName("A"), DT_INT32, 0); auto b = ops::_Arg(scope.WithOpName("B"), DT_INT32, 1); auto d = ops::_Retval(scope.WithOpName("D"), a, 0); auto c = ops::Add(scope.WithOpName("C"), a, b); std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); TF_ASSERT_OK(scope.ToGraph(graph.get())); std::vector<XlaCompiler::Argument> args(2); args[0].kind = XlaCompiler::Argument::kParameter; args[0].type = DT_INT32; args[0].shape = TensorShape({2}); args[1].kind = XlaCompiler::Argument::kParameter; args[1].type = DT_INT32; args[1].shape = TensorShape({2}); XlaCompiler compiler(DefaultOptions()); XlaCompiler::CompileOptions compile_options; compile_options.always_return_tuple = false; XlaCompiler::CompilationResult result; TF_ASSERT_OK(compiler.CompileGraph(compile_options, "add", std::move(graph), args, &result)); xla::Literal param0_literal = xla::LiteralUtil::CreateR1<int32>({7, 42}); xla::Literal param1_literal = xla::LiteralUtil::CreateR1<int32>({-3, 101}); std::unique_ptr<xla::GlobalData> param0_data = client_->TransferToServer(param0_literal).value(); std::unique_ptr<xla::GlobalData> param1_data = client_->TransferToServer(param1_literal).value(); std::unique_ptr<xla::GlobalData> actual = client_ ->Execute(*result.computation, {param0_data.get(), param1_data.get()}) .value(); xla::Literal actual_literal = client_->Transfer(*actual).value(); EXPECT_TRUE(xla::LiteralTestUtil::Equal(param0_literal, actual_literal)); } TEST_F(XlaCompilerTest, HonorShapeRepresentationFnForUnwrittenResource) { Scope scope = Scope::NewRootScope().ExitOnError(); auto var = ops::_Arg(scope.WithOpName("V"), DT_RESOURCE, 0); auto d = ops::_Retval(scope.WithOpName("D"), var, 0); std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); TF_ASSERT_OK(scope.ToGraph(graph.get())); std::vector<XlaCompiler::Argument> args(1); args[0].kind = XlaCompiler::Argument::kResource; args[0].resource_kind = XlaResource::kVariable; args[0].initialized = true; args[0].type = DT_INT32; args[0].shape = TensorShape({2, 3}); auto options = DefaultOptions(); XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns; shape_determination_fns.shape_representation_fn = [](const TensorShape& shape, DataType dt, bool use_fast_memory, XlaLayoutPreference layout_preference) -> absl::StatusOr<xla::Shape> { xla::Shape xla_shape; TF_RETURN_IF_ERROR(TensorShapeToXLAShape(dt, shape, &xla_shape)); *xla_shape.mutable_layout() = xla::LayoutUtil::MakeLayout({0, 1}); return xla_shape; }; options.shape_determination_fns = shape_determination_fns; XlaCompiler compiler(options); XlaCompiler::CompilationResult result; XlaCompiler::CompileOptions compile_options; compile_options.return_updated_values_for_all_resources = true; TF_ASSERT_OK(compiler.CompileGraph(compile_options, "add", std::move(graph), args, &result)); xla::Shape transposed = xla::ShapeUtil::MakeShapeWithDenseLayout(xla::S32, {2, 3}, {0, 1}); EXPECT_EQ(result.xla_output_shape, xla::ShapeUtil::MakeTupleShape({transposed})); } TEST_F(XlaCompilerTest, HonorShapeRepresentationFnForFastMemVar) { Scope scope = Scope::NewRootScope().ExitOnError(); auto var = ops::_Arg(scope.WithOpName("V"), DT_RESOURCE, 0); auto d = ops::_Retval(scope.WithOpName("D"), var, 0); std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); TF_ASSERT_OK(scope.ToGraph(graph.get())); std::vector<XlaCompiler::Argument> args(1); args[0].kind = XlaCompiler::Argument::kResource; args[0].resource_kind = XlaResource::kVariable; args[0].initialized = true; args[0].type = DT_INT32; args[0].shape = TensorShape({2, 3}); args[0].fast_mem = true; auto options = DefaultOptions(); int fast_mem_arg_count = 0; XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns; shape_determination_fns.shape_representation_fn = [&fast_mem_arg_count]( const TensorShape& shape, DataType dt, bool use_fast_memory, XlaLayoutPreference layout_preference) -> absl::StatusOr<xla::Shape> { xla::Shape xla_shape; TF_RETURN_IF_ERROR(TensorShapeToXLAShape(dt, shape, &xla_shape)); *xla_shape.mutable_layout() = xla::LayoutUtil::MakeLayout({0, 1}); if (use_fast_memory) { fast_mem_arg_count++; } return xla_shape; }; options.shape_determination_fns = shape_determination_fns; XlaCompiler compiler(options); XlaCompiler::CompilationResult result; XlaCompiler::CompileOptions compile_options; compile_options.return_updated_values_for_all_resources = true; TF_ASSERT_OK(compiler.CompileGraph(compile_options, "add", std::move(graph), args, &result)); EXPECT_EQ(fast_mem_arg_count, 2); } TEST_F(XlaCompilerTest, HonorShapeRepresentationFnForRetVal) { Scope scope = Scope::NewRootScope().ExitOnError(); auto a = ops::_Arg(scope.WithOpName("A"), DT_INT32, 0); auto var = ops::_Arg(scope.WithOpName("V"), DT_RESOURCE, 1); auto identity = ops::Identity(scope.WithOpName("VIdentity"), var); auto write = ops::AssignAddVariableOp(scope, identity, a); auto read = ops::ReadVariableOp( scope.WithControlDependencies(std::vector<Operation>{write}), var, DT_INT32); auto read_plus_one = ops::Add(scope, read, ops::Const<int32>(scope, 1)); auto d = ops::_Retval(scope.WithOpName("D"), read_plus_one, 0); std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); TF_ASSERT_OK(scope.ToGraph(graph.get())); std::vector<XlaCompiler::Argument> args(2); args[0].kind = XlaCompiler::Argument::kParameter; args[0].type = DT_INT32; args[0].shape = TensorShape({2, 3}); args[1].kind = XlaCompiler::Argument::kResource; args[1].resource_kind = XlaResource::kVariable; args[1].initialized = true; args[1].type = DT_INT32; args[1].shape = TensorShape({2, 3}); auto options = DefaultOptions(); XlaShapeLayoutHelpers::ShapeDeterminationFns shape_determination_fns; shape_determination_fns.shape_representation_fn = [](const TensorShape& shape, DataType dt, bool use_fast_memory, XlaLayoutPreference layout_preference) -> absl::StatusOr<xla::Shape> { xla::Shape xla_shape; TF_RETURN_IF_ERROR(TensorShapeToXLAShape(dt, shape, &xla_shape)); *xla_shape.mutable_layout() = xla::LayoutUtil::MakeLayout({0, 1}); return xla_shape; }; options.shape_determination_fns = shape_determination_fns; XlaCompiler compiler(options); XlaCompiler::CompilationResult result; TF_ASSERT_OK(compiler.CompileGraph(XlaCompiler::CompileOptions(), "add", std::move(graph), args, &result)); xla::Shape transposed = xla::ShapeUtil::MakeShapeWithDenseLayout(xla::S32, {2, 3}, {0, 1}); EXPECT_EQ(result.xla_output_shape, xla::ShapeUtil::MakeTupleShape({transposed, transposed})); EXPECT_EQ(result.computation->GetProgramShape().value().result(), xla::ShapeUtil::MakeTupleShape({transposed, transposed})); } TEST_F(XlaCompilerTest, TransposeVariables) { Scope scope = Scope::NewRootScope().ExitOnError(); auto a = ops::_Arg(scope.WithOpName("A"), DT_INT32, 0); auto var = ops::_Arg(scope.WithOpName("V"), DT_RESOURCE, 1); auto identity = ops::Identity(scope.WithOpName("VIdentity"), var); auto write = ops::AssignAddVariableOp(scope, identity, a); auto read = ops::ReadVariableOp( scope.WithControlDependencies(std::vector<Operation>{write}), var, DT_INT32); auto transposed_read = ops::Transpose(scope, read, {1, 0}); auto reshape = ops::Reshape(scope, transposed_read, {2, 3}); auto d = ops::_Retval(scope.WithOpName("D"), reshape, 0); std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); TF_ASSERT_OK(scope.ToGraph(graph.get())); std::vector<XlaCompiler::Argument> args(2); args[0].kind = XlaCompiler::Argument::kParameter; args[0].type = DT_INT32; args[0].shape = TensorShape({2, 3}); args[1].kind = XlaCompiler::Argument::kResource; args[1].resource_kind = XlaResource::kVariable; args[1].initialized = true; args[1].type = DT_INT32; args[1].shape = TensorShape({2, 3}); XlaCompiler compiler(DefaultOptions()); XlaCompiler::CompilationResult result; TF_ASSERT_OK(compiler.CompileGraph(XlaCompiler::CompileOptions(), "transpose", std::move(graph), args, &result)); xla::Shape transposed = xla::ShapeUtil::MakeShapeWithDenseLayout(xla::S32, {2, 3}, {1, 0}); EXPECT_EQ(result.xla_output_shape, xla::ShapeUtil::MakeTupleShape({transposed, transposed})); } TEST_F(XlaCompilerTest, UnrankedFakeParam) { Scope scope = Scope::NewRootScope().ExitOnError(); PartialTensorShape shape; auto a = ops::FakeParam(scope, DT_INT32, shape); auto ret = ops::_Retval(scope.WithOpName("D"), a, 0); std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); TF_ASSERT_OK(scope.ToGraph(graph.get())); XlaCompiler compiler(DefaultOptions()); XlaCompiler::CompilationResult result; TF_ASSERT_OK(compiler.CompileGraph(XlaCompiler::CompileOptions(), "compile", std::move(graph), {}, &result)); EXPECT_EQ(result.xla_output_shape, xla::ShapeUtil::MakeTupleShape( {xla::ShapeUtil::MakeShape(xla::S32, {0})})); } TEST_F(XlaCompilerTest, MixedOrderArguments) { for (bool swap_order : {false, true}) { Scope scope = Scope::NewRootScope().ExitOnError(); auto var = ops::_Arg(scope.WithOpName("V"), DT_RESOURCE, swap_order ? 0 : 1); auto a = ops::_Arg(scope.WithOpName("A"), DT_INT32, swap_order ? 1 : 0); auto identity = ops::Identity(scope.WithOpName("VIdentity"), var); auto write = ops::AssignAddVariableOp(scope, identity, a); auto read = ops::ReadVariableOp( scope.WithControlDependencies(std::vector<Operation>{write}), var, DT_INT32); auto read_plus_one = ops::Add(scope, read, ops::Const<int32>(scope, 1)); auto d = ops::_Retval(scope.WithOpName("D"), read_plus_one, 0); std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); TF_ASSERT_OK(scope.ToGraph(graph.get())); std::vector<XlaCompiler::Argument> args(2); args[0].kind = XlaCompiler::Argument::kParameter; args[0].type = DT_INT32; args[0].shape = TensorShape({2}); args[1].kind = XlaCompiler::Argument::kResource; args[1].resource_kind = XlaResource::kVariable; args[1].initialized = true; args[1].type = DT_INT32; args[1].shape = TensorShape({2}); if (swap_order) { std::swap(args[0], args[1]); } XlaCompiler compiler(DefaultOptions()); XlaCompiler::CompileOptions compile_options; compile_options.always_return_tuple = false; XlaCompiler::CompilationResult result; TF_ASSERT_OK(compiler.CompileGraph(compile_options, "add", std::move(graph), args, &result)); EXPECT_THAT(result.input_mapping, ::testing::ElementsAre(0, 1)); } } TEST_F(XlaCompilerTest, HasSaneErrorOnNonCompileTimeConstantInputToReshape) { Scope scope = Scope::NewRootScope().ExitOnError(); auto a = ops::_Arg(scope.WithOpName("A"), DT_INT32, 0); auto b = ops::_Arg(scope.WithOpName("B"), DT_INT32, 1); auto c = ops::Reshape(scope.WithOpName("C"), a, b); auto d = ops::_Retval(scope.WithOpName("D"), c, 0); std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); TF_ASSERT_OK(scope.ToGraph(graph.get())); std::vector<XlaCompiler::Argument> args(2); args[0].kind = XlaCompiler::Argument::kParameter; args[0].type = DT_INT32; args[0].shape = TensorShape({2}); args[1].kind = XlaCompiler::Argument::kParameter; args[1].type = DT_INT32; args[1].shape = TensorShape({2}); XlaCompiler compiler(DefaultOptions()); XlaCompiler::CompilationResult result; Status status = compiler.CompileGraph(XlaCompiler::CompileOptions(), "reshape", std::move(graph), args, &result); EXPECT_FALSE(status.ok()); EXPECT_TRUE(absl::StrContains(status.message(), "depends on a parameter")) << status.message(); EXPECT_TRUE(absl::StrContains(status.message(), "{{node C}}")) << status.message(); EXPECT_TRUE( absl::StrContains(status.message(), "must be a compile-time constant")) << status.message(); } TEST_F(XlaCompilerTest, ConstantOutputs) { Scope scope = Scope::NewRootScope().ExitOnError(); auto a = ops::_Arg(scope.WithOpName("A"), DT_INT32, 0); auto b = ops::Const<int32>(scope.WithOpName("B"), 7); auto c = ops::Neg(scope.WithOpName("C"), a); auto d = ops::_Retval(scope.WithOpName("D"), b, 0); auto e = ops::_Retval(scope.WithOpName("E"), c, 1); std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); TF_ASSERT_OK(scope.ToGraph(graph.get())); std::vector<XlaCompiler::Argument> args(1); args[0].kind = XlaCompiler::Argument::kParameter; args[0].type = DT_INT32; args[0].shape = TensorShape({2}); XlaCompiler::Options options = DefaultOptions(); XlaCompiler compiler(options); { std::unique_ptr<Graph> graph_copy(new Graph(OpRegistry::Global())); CopyGraph(*graph, graph_copy.get()); XlaCompiler::CompileOptions compile_options; XlaCompiler::CompilationResult result; TF_ASSERT_OK(compiler.CompileGraph(compile_options, "constants", std::move(graph_copy), args, &result)); ASSERT_EQ(2, result.outputs.size()); EXPECT_FALSE(result.outputs[0].is_constant); EXPECT_FALSE(result.outputs[1].is_constant); xla::Literal param0_literal = xla::LiteralUtil::CreateR1<int32>({7, 42}); std::unique_ptr<xla::GlobalData> param0_data = client_->TransferToServer(param0_literal).value(); std::unique_ptr<xla::GlobalData> actual = client_->Execute(*result.computation, {param0_data.get()}).value(); xla::Literal actual_literal = client_->Transfer(*actual).value(); xla::Literal expected0 = xla::LiteralUtil::CreateR0<int32>(7); xla::Literal expected1 = xla::LiteralUtil::CreateR1<int32>({-7, -42}); xla::Literal expected = xla::LiteralUtil::MakeTuple({&expected0, &expected1}); EXPECT_TRUE(xla::LiteralTestUtil::Equal(expected, actual_literal)); } } TEST_F(XlaCompilerTest, ConstantOutputsOfFunctionalNode) { const Tensor seven = test::AsScalar<int>(7); FunctionDef fdef = FunctionDefHelper::Create( "foo", {"a_0:int32"}, {"const:int32", "a:int32"}, {}, { {{"Const"}, "Const", {}, {{"dtype", DT_INT32}, {"value", seven}}}, }, {{"a", "a_0"}, {"const", "Const:output:0"}}); (*fdef.mutable_attr())["_noinline"].set_b(true); FunctionDefLibrary fdef_lib; *(fdef_lib.add_function()) = fdef; std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); { Scope scope = Scope::NewRootScope().ExitOnError(); TF_EXPECT_OK(scope.graph()->AddFunctionLibrary(fdef_lib)); auto arg = ops::_Arg(scope.WithOpName("input_arg"), DT_INT32, 0); NodeDef foo; foo.set_name("foo"); foo.set_op("foo"); *foo.add_input() = "input_arg"; Status status; scope.graph()->AddNode(foo, &status); TF_ASSERT_OK(status); NodeDef retval_1; retval_1.set_name("retval_0"); retval_1.set_op(FunctionLibraryDefinition::kRetOp); *retval_1.add_input() = "foo"; (*retval_1.mutable_attr())["T"].set_type(DT_INT32); (*retval_1.mutable_attr())["index"].set_i(0); scope.graph()->AddNode(retval_1, &status); TF_ASSERT_OK(status); NodeDef retval_2; retval_2.set_name("retval_1"); retval_2.set_op(FunctionLibraryDefinition::kRetOp); *retval_2.add_input() = "foo:1"; (*retval_2.mutable_attr())["T"].set_type(DT_INT32); (*retval_2.mutable_attr())["index"].set_i(1); scope.graph()->AddNode(retval_2, &status); TF_ASSERT_OK(status); TF_ASSERT_OK(scope.ToGraph(graph.get())); } std::vector<XlaCompiler::Argument> args(1); args[0].kind = XlaCompiler::Argument::kParameter; args[0].type = DT_INT32; args[0].shape = TensorShape({1}); XlaCompiler::Options options = DefaultOptions(); FunctionLibraryDefinition flib_def(OpRegistry::Global(), fdef_lib); options.flib_def = &flib_def; XlaCompiler compiler(options); XlaCompiler::CompileOptions compile_options; XlaCompiler::CompilationResult result; TF_ASSERT_OK(compiler.CompileGraph(compile_options, "constants", std::move(graph), args, &result)); ASSERT_EQ(2, result.outputs.size()); EXPECT_FALSE(result.outputs[1].is_constant); } TEST_F(XlaCompilerTest, ResourceManager) { Scope scope = Scope::NewRootScope().ExitOnError(); auto a = ops::_Arg(scope.WithOpName("A"), DT_INT32, 0); auto b = DummyReadResourceCC(scope.WithOpName("B"), a); auto c = ops::Add(scope.WithOpName("C"), b.output2_, b.output1_); auto d = ops::_Retval(scope.WithOpName("D"), c, 0); std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); TF_ASSERT_OK(scope.ToGraph(graph.get())); std::vector<XlaCompiler::Argument> args(1); args[0].kind = XlaCompiler::Argument::kParameter; args[0].type = DT_INT32; args[0].shape = TensorShape({2}); DummyResourceForTest* resource = new DummyResourceForTest(); auto options = DefaultOptions(); std::function<Status(ResourceMgr*)> populate_function = [resource](ResourceMgr* rm) { resource->Ref(); return rm->Create(rm->default_container(), "dummy", resource); }; options.populate_resource_manager = &populate_function; XlaCompiler compiler(options); EXPECT_EQ(0, resource->Get()); XlaCompiler::CompilationResult result; TF_ASSERT_OK(compiler.CompileGraph(XlaCompiler::CompileOptions(), "dummy", std::move(graph), args, &result)); EXPECT_EQ(1, resource->Get()); resource->Unref(); } TEST_F(XlaCompilerTest, DeterministicCompilation) { const int64_t test_count = 2; std::vector<XlaCompiler::CompilationResult> results(test_count); for (int64_t i = 0; i < test_count; ++i) { Scope scope = Scope::NewRootScope().ExitOnError(); auto a = ops::_Arg(scope.WithOpName("A"), DT_INT32, 0); auto b = ops::Neg(scope.WithOpName("B"), a); auto c = ops::Neg(scope.WithOpName("C"), a); auto d = ops::Add(scope.WithOpName("D"), b, c); auto e = ops::_Retval(scope.WithOpName("E"), d, 0); std::unique_ptr<Graph> graph(new Graph(OpRegistry::Global())); TF_ASSERT_OK(scope.ToGraph(graph.get())); std::vector<XlaCompiler::Argument> args(1); args[0].kind = XlaCompiler::Argument::kPara
1,097
cpp
tensorflow/tensorflow
graph_compiler
tensorflow/compiler/tf2xla/graph_compiler.cc
tensorflow/compiler/tf2xla/graph_compiler_test.cc
#ifndef TENSORFLOW_COMPILER_TF2XLA_GRAPH_COMPILER_H_ #define TENSORFLOW_COMPILER_TF2XLA_GRAPH_COMPILER_H_ #include "tensorflow/compiler/tf2xla/xla_compilation_device.h" #include "tensorflow/compiler/tf2xla/xla_context.h" #include "xla/client/local_client.h" #include "tensorflow/core/common_runtime/device.h" #include "tensorflow/core/common_runtime/device_mgr.h" #include "tensorflow/core/common_runtime/function.h" #include "tensorflow/core/framework/function.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/platform/env.h" #include "tensorflow/core/platform/mutex.h" #include "tensorflow/core/platform/notification.h" #include "tensorflow/core/platform/thread_annotations.h" #include "tensorflow/core/public/version.h" namespace tensorflow { class GraphCompiler { public: GraphCompiler(XlaCompilationDevice* device, Graph* graph, FunctionLibraryRuntime* flib, ScopedStepContainer* step_container) : device_(device), graph_(graph), flib_(flib), step_container_(step_container) {} Status Compile(); private: void PartiallySetupParams(OpKernelContext::Params* params); Status CompileFunctionalNode(Node* n, OpKernelContext* op_context); XlaCompilationDevice* device_; Graph* graph_; FunctionLibraryRuntime* flib_; ScopedStepContainer* step_container_; absl::InlinedVector<TensorValue, 4> tensor_inputs_; }; } #endif #include "tensorflow/compiler/tf2xla/graph_compiler.h" #include <deque> #include <numeric> #include <utility> #include <vector> #include "tensorflow/compiler/tf2xla/const_analysis.h" #include "tensorflow/compiler/tf2xla/literal_util.h" #include "tensorflow/compiler/tf2xla/shape_util.h" #include "tensorflow/compiler/tf2xla/side_effect_util.h" #include "tensorflow/compiler/tf2xla/type_util.h" #include "tensorflow/compiler/tf2xla/xla_compiler.h" #include "tensorflow/compiler/tf2xla/xla_context.h" #include "tensorflow/compiler/tf2xla/xla_expression.h" #include "tensorflow/compiler/tf2xla/xla_op_kernel.h" #include "xla/client/client_library.h" #include "xla/client/xla_builder.h" #include "tensorflow/core/common_runtime/device.h" #include "tensorflow/core/common_runtime/executor.h" #include "tensorflow/core/common_runtime/function.h" #include "tensorflow/core/common_runtime/graph_constructor.h" #include "tensorflow/core/common_runtime/graph_optimizer.h" #include "tensorflow/core/framework/attr_value.pb.h" #include "tensorflow/core/framework/attr_value_util.h" #include "tensorflow/core/framework/function.h" #include "tensorflow/core/framework/node_def_util.h" #include "tensorflow/core/framework/op_kernel.h" #include "tensorflow/core/graph/algorithm.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/graph/validate.h" #include "tensorflow/core/lib/core/errors.h" #include "tensorflow/core/lib/gtl/cleanup.h" #include "tensorflow/core/lib/hash/hash.h" #include "tensorflow/core/lib/monitoring/counter.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/public/version.h" #include "tensorflow/core/util/dump_graph.h" namespace tensorflow { auto* graph_compiler_failed_compilation_op_count = tensorflow::monitoring::Counter<1>::New( "/tensorflow/core/tf2xla/graph_compilation_failed_op_count", "Records an op that failed to compile", "op_name"); namespace { Status PrepareArguments(XlaOpKernelContext* ctx, Graph* graph, const std::vector<const XlaExpression*>& expressions, const NameAttrList& func, std::vector<XlaCompiler::Argument>* args) { auto client = ctx->compiler()->client(); std::vector<bool> arg_must_be_compile_time_constant(expressions.size()); TF_RETURN_IF_ERROR(BackwardsConstAnalysis( *graph, &arg_must_be_compile_time_constant, nullptr, ctx->function_library())); args->resize(expressions.size()); for (int i = 0, end = args->size(); i < end; ++i) { XlaCompiler::Argument& arg = (*args)[i]; arg.type = ctx->input_type(i); arg.shape = ctx->InputShape(i); switch (expressions[i]->kind()) { case XlaExpression::Kind::kConstant: arg.kind = XlaCompiler::Argument::kConstant; arg.constant_value = *expressions[i]->constant_value(); break; case XlaExpression::Kind::kXlaOp: if (arg_must_be_compile_time_constant[i]) { TF_ASSIGN_OR_RETURN(std::optional<Tensor> value, expressions[i]->ResolveConstant(client)); if (value.has_value()) { arg.kind = XlaCompiler::Argument::kConstant; arg.constant_value = *value; } else { arg.kind = XlaCompiler::Argument::kParameter; } } else { arg.kind = XlaCompiler::Argument::kParameter; } break; case XlaExpression::Kind::kResource: { XlaResource* resource = expressions[i]->resource(); XlaCompiler::PopulateArgumentFromResource(*resource, &arg); break; } case XlaExpression::Kind::kTensorList: { arg.kind = XlaCompiler::Argument::kTensorList; const xla::XlaOp& tensor_list = expressions[i]->handle(); arg.shape = tensor_list.builder()->GetShape(tensor_list).value(); break; } case XlaExpression::Kind::kInvalid: return errors::InvalidArgument("Invalid function argument"); } } return absl::OkStatus(); } } Status GraphCompiler::Compile() { TF_RETURN_IF_ERROR(graph::ValidateGraphHasNoCycle(*graph_)); using NodeOutputs = std::vector<TensorValue>; std::vector<NodeOutputs> output_registry(graph_->num_node_ids()); auto output_registry_cleanup = gtl::MakeCleanup([&output_registry] { for (const NodeOutputs& outputs : output_registry) { for (const TensorValue& value : outputs) { CHECK(!value.is_ref()); delete value.tensor; } } }); std::vector<Node*> topo_sorted_nodes; GetReversePostOrder(*graph_, &topo_sorted_nodes, NodeComparatorName()); OpKernelContext::Params params; PartiallySetupParams(&params); for (Node* n : topo_sorted_nodes) { OpKernel* op_kernel_raw = nullptr; Status s = flib_->CreateKernel(n->properties(), &op_kernel_raw); std::unique_ptr<OpKernel> op_kernel(op_kernel_raw); if (!s.ok()) { s = AttachDef(s, *n); LOG(ERROR) << "Executor failed to create kernel. " << s; return s; } TF_RET_CHECK(!n->IsRecv() && !n->IsSend() && !n->IsSwitch()) << "Not supported node: " << n->DebugString(); params.op_kernel = op_kernel.get(); absl::InlinedVector<AllocatorAttributes, 4> output_attr(n->num_outputs()); params.output_attr_array = output_attr.data(); tensor_inputs_.clear(); tensor_inputs_.resize(n->num_inputs()); for (auto* e : n->in_edges()) { if (e->IsControlEdge()) continue; const Node* src = e->src(); const int output_registry_size = output_registry.size(); TF_RET_CHECK(src->id() < output_registry_size); const NodeOutputs& src_outputs = output_registry[src->id()]; tensor_inputs_.at(e->dst_input()) = src_outputs.at(e->src_output()); } params.inputs = tensor_inputs_; OpKernelContext op_context(&params, n->num_outputs()); VLOG(3) << "Translating " << params.op_kernel->name(); if (IsFunctionCall(*flib_->GetFunctionLibraryDefinition(), *n)) { TF_RETURN_IF_ERROR(CompileFunctionalNode(n, &op_context)); } else { device_->Compute(CHECK_NOTNULL(params.op_kernel), &op_context); Status s = op_context.status(); if (!s.ok()) { graph_compiler_failed_compilation_op_count ->GetCell(params.op_kernel->def().op()) ->IncrementBy(1); return AttachDef(s, n->def()); } } NodeOutputs& outputs = output_registry[n->id()]; outputs.resize(n->num_outputs()); for (int o = 0; o < n->num_outputs(); ++o) { outputs[o] = op_context.release_output(o); if (outputs[o].tensor == nullptr) { return errors::Internal("Missing xla_context ", o, "-th output from ", FormatNodeForError(*n)); } } } return absl::OkStatus(); } namespace { Status GetFunctionNameAndAttr(const FunctionLibraryRuntime& flib, const Node& node, NameAttrList* func) { if (node.IsPartitionedCall()) { const AttrValue* attr_value; TF_RETURN_IF_ERROR( node.attrs().Find(FunctionLibraryDefinition::kFuncAttr, &attr_value)); if (!attr_value->has_func()) { return errors::InvalidArgument( "The attribute value for attribute 'f' in node ", node.DebugString(), " does not have 'func' field set"); } *func = attr_value->func(); return absl::OkStatus(); } if (flib.GetFunctionLibraryDefinition()->Find(node.def().op())) { func->set_name(node.type_string()); } else { func->set_name(FunctionLibraryDefinition::kGradientOp); } *func->mutable_attr() = node.def().attr(); return absl::OkStatus(); } } Status GraphCompiler::CompileFunctionalNode(Node* n, OpKernelContext* op_context) { TF_RET_CHECK(IsFunctionCall(*flib_->GetFunctionLibraryDefinition(), *n)); XlaOpKernelContext xla_op_context(op_context); XlaContext& context = XlaContext::Get(op_context); auto* b = context.builder(); XlaCompiler* compiler = xla_op_context.compiler(); NameAttrList func; TF_RETURN_IF_ERROR(GetFunctionNameAndAttr(*flib_, *n, &func)); std::vector<const XlaExpression*> expressions; for (auto tensor : tensor_inputs_) { auto expression = reinterpret_cast<const XlaExpression*>(tensor->tensor_data().data()); expressions.push_back(expression); } std::vector<XlaCompiler::Argument> arguments; const FunctionBody* fbody; TF_RETURN_IF_ERROR(compiler->FindFunctionBody(func, &fbody)); auto graph = compiler->GetGraph(fbody); TF_RETURN_IF_ERROR(PrepareArguments(&xla_op_context, graph.get(), expressions, func, &arguments)); bool add_token_input_output = func.attr().find(kXlaTokenInputNodesAttrName) != func.attr().end(); XlaCompiler::CompileOptions compile_options; compile_options.is_entry_computation = false; compile_options.add_token_input_output = add_token_input_output; XlaCompiler::CompilationResult result; TF_RETURN_IF_ERROR( compiler->CompileFunction(compile_options, func, arguments, &result)); TF_RET_CHECK(arguments.size() == expressions.size()); std::vector<xla::XlaOp> handles; for (int64_t i = 0, end = expressions.size(); i < end; ++i) { if (arguments[i].kind == XlaCompiler::Argument::kConstant) { continue; } if (arguments[i].kind == XlaCompiler::Argument::kResource) { handles.push_back(expressions[i]->resource()->value()); } else { handles.push_back(expressions[i]->handle()); } } if (add_token_input_output) { std::vector<string> token_input_nodes; TF_RETURN_IF_ERROR(GetNodeAttr(AttrSlice(&func.attr()), kXlaTokenInputNodesAttrName, &token_input_nodes)); std::vector<xla::XlaOp> token_inputs; for (const string& node_name : token_input_nodes) { auto token_or = compiler->GetNodeToken(node_name); TF_RETURN_IF_ERROR(token_or.status()); token_inputs.push_back(std::move(token_or).value()); } xla::XlaOp token_input = xla::AfterAll(b, token_inputs); handles.push_back(token_input); } auto output_handle = xla::Call(b, *result.computation, handles); int computation_output = 0; for (int64_t i = 0; i < n->num_outputs(); ++i) { if (result.outputs[i].is_constant) { xla_op_context.SetConstantOutput(i, result.outputs[i].constant_value); } else { if (result.outputs[i].is_tensor_list) { xla_op_context.SetTensorListOutput( i, xla::GetTupleElement(output_handle, computation_output)); } else { xla_op_context.SetOutput( i, xla::GetTupleElement(output_handle, computation_output)); } ++computation_output; } } for (int64_t i = 0, end = result.resource_updates.size(); i < end; i++) { if (result.resource_updates[i].modified) { XlaResource* resource = expressions[result.resource_updates[i].input_index]->resource(); xla::XlaOp updated_value = xla::GetTupleElement(output_handle, i + n->num_outputs()); TF_RETURN_IF_ERROR(resource->SetValue(updated_value)); } } if (add_token_input_output) { std::string node_name; if (!GetNodeAttr(n->attrs(), kXlaOriginalOutsideCompilationNodeName, &node_name) .ok()) node_name = n->name(); TF_RETURN_IF_ERROR(compiler->SetNodeToken( node_name, xla::GetTupleElement(output_handle, computation_output))); } return b->first_error(); } void GraphCompiler::PartiallySetupParams(OpKernelContext::Params* params) { params->device = device_; params->step_container = step_container_; params->resource_manager = device_->resource_manager(); params->function_library = flib_; } }
#include "tensorflow/compiler/tf2xla/graph_compiler.h" #include <memory> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "tensorflow/compiler/tf2xla/graph_compiler_util.h" #include "tensorflow/compiler/tf2xla/tf2xla.pb.h" #include "tensorflow/compiler/tf2xla/xla_compilation_device.h" #include "tensorflow/compiler/tf2xla/xla_compiler.h" #include "tensorflow/compiler/tf2xla/xla_op_kernel.h" #include "tensorflow/compiler/tf2xla/xla_op_registry.h" #include "tensorflow/core/common_runtime/device_mgr.h" #include "tensorflow/core/common_runtime/process_function_library_runtime.h" #include "tensorflow/core/framework/attr_value.pb.h" #include "tensorflow/core/framework/attr_value_util.h" #include "tensorflow/core/framework/graph.pb.h" #include "tensorflow/core/framework/node_def.pb.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/types.h" #include "tensorflow/core/graph/node_builder.h" #include "tensorflow/core/lib/monitoring/cell_reader.h" #include "tensorflow/core/platform/refcount.h" #include "tensorflow/core/public/session_options.h" #include "tensorflow/core/public/version.h" namespace tensorflow { namespace { using ::tensorflow::monitoring::testing::CellReader; constexpr char kOpCompilationFailureStreamz[] = "/tensorflow/core/tf2xla/graph_compilation_failed_op_count"; class DummyOp : public XlaOpKernel { public: explicit DummyOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {} void Compile(XlaOpKernelContext* ctx) override {} }; REGISTER_KERNEL_BUILDER(Name("NoOp").Device(DEVICE_DEFAULT), DummyOp); REGISTER_KERNEL_BUILDER(Name("NoOp").Device("XLA_TPU_JIT"), DummyOp); REGISTER_KERNEL_BUILDER(Name("NoOp").Device("XLA_CPU_JIT"), DummyOp); class MockAlwaysFailsOp : public XlaOpKernel { public: explicit MockAlwaysFailsOp(OpKernelConstruction* ctx) : XlaOpKernel(ctx) {} void Compile(XlaOpKernelContext* ctx) override { ctx->CtxFailure(__FILE__, __LINE__, errors::InvalidArgument("MockBroken")); } }; REGISTER_OP("MockAlwaysFails") .SetShapeFn(shape_inference::UnknownShape) .Doc(R"doc( A test only Op that always fails to compile. )doc"); REGISTER_KERNEL_BUILDER(Name("MockAlwaysFails").Device(DEVICE_DEFAULT), MockAlwaysFailsOp); REGISTER_KERNEL_BUILDER(Name("MockAlwaysFails").Device("XLA_CPU_JIT"), MockAlwaysFailsOp); REGISTER_KERNEL_BUILDER(Name("MockAlwaysFails").Device("XLA_TPU_JIT"), MockAlwaysFailsOp); REGISTER_XLA_OP(Name("MockAlwaysFails").CompilationOnly(), MockAlwaysFailsOp); class GraphCompilerTest : public ::testing::Test { public: void SetUp() override { device_ = new tensorflow::XlaCompilationDevice( tensorflow::SessionOptions(), tensorflow::DeviceType("XLA_TPU_JIT")); device_mgr_ = std::make_unique<StaticDeviceMgr>(absl::WrapUnique(device_)); } Status RunGraphCompiler(Graph& graph) { ProcessFunctionLibraryRuntime runtime( device_mgr_.get(), Env::Default(), nullptr, TF_GRAPH_DEF_VERSION, &graph.flib_def(), OptimizerOptions()); xla::XlaBuilder builder("test_builder"); XlaCompiler::Options options; options.device_type = "XLA_TPU_JIT"; XlaCompiler xla_compiler(options); XlaContext* xla_context = new XlaContext(&xla_compiler, &builder, &graph); core::ScopedUnref context_unref(xla_context); xla_context->Ref(); auto step_container = std::make_unique<ScopedStepContainer>(0, [this](const string& name) { Status status = this->device_->resource_manager()->Cleanup(name); }); auto container_status = step_container->Create( device_->resource_manager(), XlaContext::kXlaContextResourceName, xla_context); GraphCompiler graph_compiler( device_, &graph, runtime.GetFLR(device_->name()), step_container.get()); return graph_compiler.Compile(); } protected: XlaCompilationDevice* device_; std::unique_ptr<StaticDeviceMgr> device_mgr_; }; TEST_F(GraphCompilerTest, CompilesGraph) { Graph graph(OpRegistry::Global()); EXPECT_TRUE(RunGraphCompiler(graph).ok()); } TEST_F(GraphCompilerTest, RecordsStreamzFailedCompilationNode) { Graph graph(OpRegistry::Global()); Node* mock_fail; ASSERT_TRUE(NodeBuilder("mock_fail", "MockAlwaysFails") .Finalize(&graph, &mock_fail) .ok()); graph.AddControlEdge(graph.source_node(), mock_fail); graph.AddControlEdge(mock_fail, graph.sink_node()); CellReader<int64_t> op_reader(kOpCompilationFailureStreamz); EXPECT_FALSE(RunGraphCompiler(graph).ok()); EXPECT_EQ(op_reader.Delta("MockAlwaysFails"), 1); } } }
1,098
cpp
tensorflow/tensorflow
type_util
third_party/xla/xla/mlir/utils/type_util.cc
third_party/xla/xla/mlir/utils/type_util_test.cc
#ifndef XLA_MLIR_UTILS_TYPE_UTIL_H_ #define XLA_MLIR_UTILS_TYPE_UTIL_H_ #include "absl/status/statusor.h" #include "mlir/IR/Builders.h" #include "mlir/IR/Types.h" #include "xla/xla_data.pb.h" namespace xla { absl::StatusOr<mlir::Type> ConvertPrimitiveTypeToMlirType( xla::PrimitiveType type, mlir::Builder b); xla::PrimitiveType ConvertMlirTypeToPrimitiveType(mlir::Type type); } #endif #include "xla/mlir/utils/type_util.h" #include "absl/status/statusor.h" #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/Types.h" #include "mlir/Support/LLVM.h" #include "xla/primitive_util.h" #include "xla/util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/statusor.h" namespace xla { absl::StatusOr<mlir::Type> ConvertPrimitiveTypeToMlirType( xla::PrimitiveType type, mlir::Builder b) { switch (type) { case xla::PrimitiveType::PRED: return b.getI1Type(); case xla::PrimitiveType::F8E5M2: return b.getFloat8E5M2Type(); case xla::PrimitiveType::F8E4M3FN: return b.getFloat8E4M3FNType(); case xla::PrimitiveType::F8E4M3B11FNUZ: return b.getFloat8E4M3B11FNUZType(); case xla::PrimitiveType::F8E5M2FNUZ: return b.getFloat8E5M2FNUZType(); case xla::PrimitiveType::F8E4M3FNUZ: return b.getFloat8E4M3FNUZType(); case xla::PrimitiveType::F16: return b.getF16Type(); case xla::PrimitiveType::BF16: return b.getBF16Type(); case xla::PrimitiveType::F32: return b.getF32Type(); case xla::PrimitiveType::F64: return b.getF64Type(); default: if (xla::primitive_util::IsIntegralType(type)) { return mlir::IntegerType::get( b.getContext(), xla::primitive_util::BitWidth(type), xla::primitive_util::IsUnsignedIntegralType(type) ? mlir::IntegerType::Unsigned : mlir::IntegerType::Signless); } if (xla::primitive_util::IsComplexType(type)) { TF_ASSIGN_OR_RETURN( mlir::Type component_type, xla::ConvertPrimitiveTypeToMlirType( xla::primitive_util::ComplexComponentType(type), b)); return mlir::ComplexType::get(component_type); } return xla::Internal("Unsupported type: %s", xla::PrimitiveType_Name(type)); } } xla::PrimitiveType ConvertMlirTypeToPrimitiveType(mlir::Type type) { if (type.isFloat8E5M2()) { return xla::PrimitiveType::F8E5M2; } else if (type.isFloat8E4M3FN()) { return xla::PrimitiveType::F8E4M3FN; } else if (type.isFloat8E4M3B11FNUZ()) { return xla::PrimitiveType::F8E4M3B11FNUZ; } else if (type.isFloat8E4M3FNUZ()) { return xla::PrimitiveType::F8E4M3FNUZ; } else if (type.isFloat8E5M2FNUZ()) { return xla::PrimitiveType::F8E5M2FNUZ; } else if (type.isBF16()) { return xla::PrimitiveType::BF16; } else if (type.isF16()) { return xla::PrimitiveType::F16; } else if (type.isF32()) { return xla::PrimitiveType::F32; } else if (type.isF64()) { return xla::PrimitiveType::F64; } else if (auto complex_type = mlir::dyn_cast<mlir::ComplexType>(type)) { mlir::Type element_ty = complex_type.getElementType(); return xla::primitive_util::ComplexType( ConvertMlirTypeToPrimitiveType(element_ty)); } else if (auto integer_type = mlir::dyn_cast<mlir::IntegerType>(type)) { bool is_unsigned = integer_type.isUnsigned(); if (integer_type.getWidth() == 1) { return xla::PrimitiveType::PRED; } return is_unsigned ? xla::primitive_util::UnsignedIntegralTypeForBitWidth( integer_type.getWidth()) : xla::primitive_util::SignedIntegralTypeForBitWidth( integer_type.getWidth()); } return xla::PrimitiveType::PRIMITIVE_TYPE_INVALID; } }
#include "xla/mlir/utils/type_util.h" #include <string> #include <vector> #include <gtest/gtest.h> #include "absl/functional/function_ref.h" #include "llvm/Support/raw_ostream.h" #include "mlir/IR/Builders.h" #include "mlir/IR/BuiltinTypes.h" #include "mlir/IR/MLIRContext.h" #include "xla/primitive_util.h" #include "xla/xla_data.pb.h" #include "tsl/platform/statusor.h" namespace xla { namespace { struct TypeUtilTestParam { xla::PrimitiveType xla_t; absl::FunctionRef<mlir::Type(mlir::Builder)> mlir_t; }; inline std::string mlirTypeToString(mlir::Type type) { std::string result{}; llvm::raw_string_ostream sstream(result); sstream << type; return result; } class TypeUtilTest : public ::testing::TestWithParam<TypeUtilTestParam> {}; TEST_P(TypeUtilTest, ConvertInvalidTypeTest) { mlir::MLIRContext context; mlir::Builder b(&context); EXPECT_EQ(ConvertMlirTypeToPrimitiveType(b.getIntegerType(17)), xla::PrimitiveType::PRIMITIVE_TYPE_INVALID); } TEST_P(TypeUtilTest, MLIRToPrimitiveTypeConversionTest) { mlir::MLIRContext context = mlir::MLIRContext(); mlir::Builder b = mlir::Builder(&context); xla::PrimitiveType xla_type_expected = GetParam().xla_t; mlir::Type mlir_type = GetParam().mlir_t(b); xla::PrimitiveType xla_type_actual = ConvertMlirTypeToPrimitiveType(mlir_type); EXPECT_EQ(xla_type_actual, xla_type_expected) << "Expected: " << primitive_util::LowercasePrimitiveTypeName(xla_type_expected) << ". Actual: " << primitive_util::LowercasePrimitiveTypeName(xla_type_actual) << "."; } TEST_P(TypeUtilTest, PrimitiveTypeToMLIRTypeConversionTest) { mlir::MLIRContext context = mlir::MLIRContext(); mlir::Builder b = mlir::Builder(&context); xla::PrimitiveType xla_type = GetParam().xla_t; mlir::Type mlir_type_expected = GetParam().mlir_t(b); TF_ASSERT_OK_AND_ASSIGN(mlir::Type mlir_type_actual, ConvertPrimitiveTypeToMlirType(xla_type, b)); EXPECT_EQ(mlir_type_actual, mlir_type_expected) << "Expected: " << mlirTypeToString(mlir_type_expected) << ". Actual: " << mlirTypeToString(mlir_type_actual) << "."; } TEST_P(TypeUtilTest, BidirectionalConversionTest) { mlir::MLIRContext context = mlir::MLIRContext(); mlir::Builder b = mlir::Builder(&context); xla::PrimitiveType xla_type_expected = GetParam().xla_t; TF_ASSERT_OK_AND_ASSIGN(mlir::Type mlir_type_actual, ConvertPrimitiveTypeToMlirType(xla_type_expected, b)); xla::PrimitiveType xla_type_actual = ConvertMlirTypeToPrimitiveType(mlir_type_actual); EXPECT_EQ(xla_type_actual, xla_type_expected) << "Expected: " << primitive_util::LowercasePrimitiveTypeName(xla_type_expected) << ". Actual: " << primitive_util::LowercasePrimitiveTypeName(xla_type_actual) << ". Intermediate MLIR type: " << mlirTypeToString(mlir_type_actual) << "."; } INSTANTIATE_TEST_SUITE_P( Execute, TypeUtilTest, ::testing::ValuesIn(std::vector<TypeUtilTestParam>( {{PRED, [](mlir::Builder b) { return b.getI1Type(); }}, {F8E5M2, [](mlir::Builder b) { return b.getFloat8E5M2Type(); }}, {F8E4M3FN, [](mlir::Builder b) { return b.getFloat8E4M3FNType(); }}, {F8E4M3B11FNUZ, [](mlir::Builder b) { return b.getFloat8E4M3B11FNUZType(); }}, {F8E5M2FNUZ, [](mlir::Builder b) { return b.getFloat8E5M2FNUZType(); }}, {F8E4M3FNUZ, [](mlir::Builder b) { return b.getFloat8E4M3FNUZType(); }}, {F16, [](mlir::Builder b) { return b.getF16Type(); }}, {BF16, [](mlir::Builder b) { return b.getBF16Type(); }}, {F32, [](mlir::Builder b) { return b.getF32Type(); }}, {F64, [](mlir::Builder b) { return b.getF64Type(); }}, {U4, [](mlir::Builder b) { return b.getIntegerType(4, false); }}, {U8, [](mlir::Builder b) { return b.getIntegerType(8, false); }}, {U16, [](mlir::Builder b) { return b.getIntegerType(16, false); }}, {U32, [](mlir::Builder b) { return b.getIntegerType(32, false); }}, {U64, [](mlir::Builder b) { return b.getIntegerType(64, false); }}, {S4, [](mlir::Builder b) { return mlir::IntegerType::get(b.getContext(), 4, mlir::IntegerType::Signless); }}, {S8, [](mlir::Builder b) { return mlir::IntegerType::get(b.getContext(), 8, mlir::IntegerType::Signless); }}, {S16, [](mlir::Builder b) { return mlir::IntegerType::get(b.getContext(), 16, mlir::IntegerType::Signless); }}, {S32, [](mlir::Builder b) { return mlir::IntegerType::get(b.getContext(), 32, mlir::IntegerType::Signless); }}, {S64, [](mlir::Builder b) { return mlir::IntegerType::get(b.getContext(), 64, mlir::IntegerType::Signless); }}})), [](const auto& info) { mlir::MLIRContext context; mlir::Builder b(&context); return absl::StrFormat( "xla_%s_mlir_%s", primitive_util::LowercasePrimitiveTypeName(info.param.xla_t), mlirTypeToString(info.param.mlir_t(b))); }); } }
1,099
cpp
tensorflow/tensorflow
xla_expression
tensorflow/compiler/tf2xla/xla_expression.cc
tensorflow/compiler/tf2xla/xla_expression_test.cc
#ifndef TENSORFLOW_COMPILER_TF2XLA_XLA_EXPRESSION_H_ #define TENSORFLOW_COMPILER_TF2XLA_XLA_EXPRESSION_H_ #include "absl/types/optional.h" #include "tensorflow/compiler/tf2xla/xla_resource.h" #include "xla/client/client.h" #include "xla/client/value_inference.h" #include "xla/client/xla_builder.h" #include "tensorflow/core/framework/tensor.h" #include "tensorflow/core/lib/core/status.h" #include "tensorflow/core/platform/statusor.h" namespace tensorflow { class XlaExpression { public: enum class Kind { kInvalid, kConstant, kXlaOp, kResource, kTensorList, }; XlaExpression(); XlaExpression(const XlaExpression&) = default; XlaExpression& operator=(const XlaExpression&) = default; static XlaExpression Invalid(); static XlaExpression Constant(Tensor value); static XlaExpression XlaOp(xla::XlaOp value, DataType dtype); static XlaExpression TensorList(xla::XlaOp tensor_list); static XlaExpression Resource(XlaResource* resource); static XlaExpression ConstantResource(Tensor value, XlaResource* resource); Kind kind() const { return kind_; } DataType dtype() const { return dtype_; } const xla::XlaOp& handle() const { return handle_; } std::optional<Tensor> constant_value() const { if (kind_ == Kind::kResource && resource_->IsOverwritten()) { return std::nullopt; } return constant_value_; } void set_value_bound(Tensor tensor) { value_bound_.emplace(std::move(tensor)); } std::optional<Tensor> value_bound() const { return value_bound_; } void set_value_dynamism(Tensor tensor) { value_dynamism_.emplace(std::move(tensor)); } std::optional<Tensor> value_dynamism() const { return value_dynamism_; } XlaResource* resource() const { return resource_; } string HumanString() const; xla::XlaOp AsXlaOp(xla::XlaBuilder* builder) const; absl::StatusOr<std::optional<Tensor>> ResolveConstant( xla::Client* client, bool dynamic_dimension_is_minus_one = false, xla::ValueInferenceMode mode = xla::ValueInferenceMode::kValue) const; absl::StatusOr<Tensor> ResolveDynamism() const; absl::StatusOr<TensorShape> GetShape() const; absl::StatusOr<xla::Shape> GetXlaShape() const; static const XlaExpression* CastExpressionFromTensor(const Tensor& tensor); static void AssignExpressionToTensor(const XlaExpression& value, Tensor* tensor); private: Kind kind_ = Kind::kInvalid; DataType dtype_ = DT_INVALID; xla::XlaOp handle_; std::optional<Tensor> constant_value_; std::optional<Tensor> value_bound_; std::optional<Tensor> value_dynamism_; XlaResource* resource_ = nullptr; }; } #endif #include "tensorflow/compiler/tf2xla/xla_expression.h" #include "tensorflow/compiler/tf2xla/literal_util.h" #include "tensorflow/compiler/tf2xla/shape_util.h" #include "xla/client/value_inference.h" #include "tensorflow/core/framework/types.pb.h" #include "tensorflow/core/lib/core/errors.h" namespace tensorflow { XlaExpression::XlaExpression() = default; XlaExpression XlaExpression::Invalid() { XlaExpression e; e.kind_ = Kind::kInvalid; return e; } XlaExpression XlaExpression::Constant(Tensor value) { XlaExpression e; e.kind_ = Kind::kConstant; e.dtype_ = value.dtype(); e.constant_value_ = value; return e; } XlaExpression XlaExpression::ConstantResource(Tensor value, XlaResource* resource) { XlaExpression e; e.kind_ = Kind::kResource; e.dtype_ = DT_RESOURCE; e.resource_ = resource; e.constant_value_ = value; return e; } XlaExpression XlaExpression::XlaOp(xla::XlaOp value, DataType dtype) { XlaExpression e; e.kind_ = Kind::kXlaOp; e.dtype_ = dtype; e.handle_ = value; return e; } XlaExpression XlaExpression::TensorList(xla::XlaOp tensor_list) { XlaExpression e; e.kind_ = Kind::kTensorList; e.dtype_ = DT_VARIANT; e.handle_ = tensor_list; return e; } XlaExpression XlaExpression::Resource(XlaResource* resource) { XlaExpression e; e.kind_ = Kind::kResource; e.dtype_ = DT_RESOURCE; e.resource_ = resource; return e; } string XlaExpression::HumanString() const { switch (kind_) { case Kind::kInvalid: return "invalid"; case Kind::kConstant: return "constant"; case Kind::kXlaOp: return "xla_op"; case Kind::kResource: return "resource"; case Kind::kTensorList: return "tensor_list"; } } xla::XlaOp XlaExpression::AsXlaOp(xla::XlaBuilder* builder) const { return builder->ReportErrorOrReturn([&]() -> absl::StatusOr<xla::XlaOp> { switch (kind_) { case Kind::kConstant: { xla::BorrowingLiteral literal; TF_RETURN_IF_ERROR( HostTensorToBorrowingLiteral(*constant_value_, &literal)); return xla::ConstantLiteral(builder, literal); } case Kind::kTensorList: TF_FALLTHROUGH_INTENDED; case Kind::kXlaOp: if (builder != handle_.builder()) { return errors::InvalidArgument( "Mismatched builders in XlaExpression::AsXlaOp"); } return handle_; default: return errors::InvalidArgument("AsXlaOp called on XlaExpression: ", HumanString()); } }); } absl::StatusOr<Tensor> XlaExpression::ResolveDynamism() const { switch (kind()) { case Kind::kConstant: { Tensor constant_false(DT_BOOL, constant_value()->shape()); auto flat = constant_false.flat<bool>(); for (int64_t i = 0; i < flat.size(); ++i) flat(i) = false; return constant_false; } case Kind::kXlaOp: break; case Kind::kTensorList: TF_FALLTHROUGH_INTENDED; case Kind::kResource: TF_FALLTHROUGH_INTENDED; case Kind::kInvalid: return errors::InvalidArgument( "ResolveDynamism called on unsupported XlaExpression: ", HumanString()); } TF_ASSIGN_OR_RETURN(TensorShape shape, GetShape()); std::vector<int64_t> layout_indices(shape.dims()); std::iota(layout_indices.rbegin(), layout_indices.rend(), 0); xla::ValueInference value_inference(handle().builder()); TF_ASSIGN_OR_RETURN(xla::LiteralSlice literal, value_inference.AnalyzeIsDynamic(handle())); Tensor tensor(DT_BOOL); TF_RETURN_IF_ERROR(LiteralToHostTensor(literal, DT_BOOL, &tensor)); return tensor; } absl::StatusOr<std::optional<Tensor>> XlaExpression::ResolveConstant( xla::Client* client, bool dynamic_dimension_is_minus_one, xla::ValueInferenceMode mode) const { switch (kind()) { case Kind::kConstant: case Kind::kResource: return constant_value(); case Kind::kXlaOp: break; case Kind::kTensorList: TF_FALLTHROUGH_INTENDED; case Kind::kInvalid: return errors::InvalidArgument( "ResolveConstant called on XlaExpression: ", HumanString()); } TF_ASSIGN_OR_RETURN(TensorShape shape, GetShape()); std::vector<int64_t> layout_indices(shape.dims()); std::iota(layout_indices.rbegin(), layout_indices.rend(), 0); xla::Layout layout = xla::LayoutUtil::MakeLayout(layout_indices); if (mode == xla::ValueInferenceMode::kLowerBound || mode == xla::ValueInferenceMode::kUpperBound || mode == xla::ValueInferenceMode::kValue) { std::vector<int64_t> layout_indices(shape.dims()); std::iota(layout_indices.rbegin(), layout_indices.rend(), 0); xla::ValueInference value_inference(handle().builder()); TF_ASSIGN_OR_RETURN(xla::OptionalLiteral literal, value_inference.AnalyzeConstant(handle(), mode)); if (!literal.GetValue().has_value()) { return {std::nullopt}; } Tensor tensor; TF_RETURN_IF_ERROR(LiteralToHostTensor( literal.GetValue().value().Relayout(layout), dtype(), &tensor)); return {tensor}; } TF_ASSIGN_OR_RETURN(bool is_constant, handle().builder()->IsConstant(handle())); if (!is_constant) { return {std::nullopt}; } if (!client) return errors::InvalidArgument("client is required to resolve constant"); TF_ASSIGN_OR_RETURN(xla::XlaComputation constant_graph, handle().builder()->BuildConstantSubGraph( handle(), dynamic_dimension_is_minus_one)); TF_ASSIGN_OR_RETURN(xla::Literal literal, client->ComputeConstant(constant_graph, &layout)); Tensor tensor; TF_RETURN_IF_ERROR(LiteralToHostTensor(literal, dtype(), &tensor)); return {tensor}; } absl::StatusOr<TensorShape> XlaExpression::GetShape() const { switch (kind_) { case Kind::kConstant: return constant_value()->shape(); case Kind::kResource: if (constant_value()) { return constant_value()->shape(); } return TensorShape({}); case Kind::kXlaOp: { TF_ASSIGN_OR_RETURN(xla::Shape xla_shape, handle().builder()->GetShape(handle())); TensorShape shape; TF_RETURN_IF_ERROR(XLAShapeToTensorShape(xla_shape, &shape)); return shape; } case Kind::kTensorList: return TensorShape({}); case Kind::kInvalid: return errors::InvalidArgument( "GetShape() called on invalid XlaExpression"); } } absl::StatusOr<xla::Shape> XlaExpression::GetXlaShape() const { if (kind_ == Kind::kXlaOp) { return handle().builder()->GetShape(handle()); } TF_ASSIGN_OR_RETURN(TensorShape shape, GetShape()); return TensorShapeToXLAShape(dtype_, shape); } const XlaExpression* XlaExpression::CastExpressionFromTensor( const Tensor& tensor) { const XlaExpression* expression = reinterpret_cast<const XlaExpression*>(tensor.tensor_data().data()); CHECK(expression->kind() != XlaExpression::Kind::kInvalid) << expression->HumanString(); return expression; } void XlaExpression::AssignExpressionToTensor(const XlaExpression& value, Tensor* tensor) { const XlaExpression* expression = reinterpret_cast<const XlaExpression*>(tensor->tensor_data().data()); CHECK(expression->kind() == XlaExpression::Kind::kInvalid) << expression->HumanString(); *const_cast<XlaExpression*>(expression) = value; } }
#include <memory> #include "absl/memory/memory.h" #include "tensorflow/compiler/tf2xla/xla_expression.h" #include "tensorflow/compiler/tf2xla/xla_resource.h" #include "xla/client/client_library.h" #include "xla/client/local_client.h" #include "xla/client/xla_builder.h" #include "xla/literal.h" #include "xla/shape_util.h" #include "xla/status_macros.h" #include "xla/tests/literal_test_util.h" #include "tensorflow/core/framework/tensor_testutil.h" #include "tensorflow/core/lib/core/status_test_util.h" #include "tensorflow/core/platform/test.h" namespace tensorflow { namespace { class XlaExpressionTest : public ::testing::Test { protected: void SetUp() override { client_ = xla::ClientLibrary::LocalClientOrDie(); builder_ = std::make_unique<xla::XlaBuilder>("acomputation"); constant_ = test::AsScalar<int32>(42); op_ = xla::ConstantR0<int32>(builder_.get(), 7); non_constant_op_ = xla::Parameter( builder_.get(), 0, xla::ShapeUtil::MakeShape(xla::F32, {}), "x"); resource_ = std::make_unique<XlaResource>( XlaResource::kVariable, 0, string("avariable"), DT_INT32, TensorShape({17, 3}), op_, -1, std::set<string>(), false); } xla::Client* client_; std::unique_ptr<xla::XlaBuilder> builder_; Tensor constant_; xla::XlaOp op_; xla::XlaOp non_constant_op_; std::unique_ptr<XlaResource> resource_; }; TEST_F(XlaExpressionTest, Kind) { EXPECT_TRUE(XlaExpression::Kind::kInvalid == XlaExpression().kind()); EXPECT_TRUE(XlaExpression::Kind::kInvalid == XlaExpression::Invalid().kind()); EXPECT_TRUE(XlaExpression::Kind::kConstant == XlaExpression::Constant(constant_).kind()); EXPECT_TRUE(XlaExpression::Kind::kXlaOp == XlaExpression::XlaOp(op_, DT_INT32).kind()); EXPECT_TRUE(XlaExpression::Kind::kResource == XlaExpression::Resource(resource_.get()).kind()); } TEST_F(XlaExpressionTest, HumanString) { EXPECT_EQ("invalid", XlaExpression().HumanString()); EXPECT_EQ("invalid", XlaExpression::Invalid().HumanString()); EXPECT_EQ("constant", XlaExpression::Constant(constant_).HumanString()); EXPECT_EQ("xla_op", XlaExpression::XlaOp(op_, DT_INT32).HumanString()); EXPECT_EQ("resource", XlaExpression::Resource(resource_.get()).HumanString()); } TEST_F(XlaExpressionTest, AsXlaOp) { xla::XlaOp op_as_op = XlaExpression::XlaOp(op_, DT_INT32).AsXlaOp(builder_.get()); EXPECT_TRUE(op_.IsIdenticalTo(op_as_op)); xla::XlaOp const_as_op = XlaExpression::Constant(constant_).AsXlaOp(builder_.get()); TF_ASSERT_OK_AND_ASSIGN(xla::XlaComputation computation, builder_->BuildConstantSubGraph(const_as_op)); TF_ASSERT_OK_AND_ASSIGN(xla::Literal value, client_->ComputeConstant(computation)); EXPECT_TRUE(xla::LiteralTestUtil::Equal(xla::LiteralUtil::CreateR0<int32>(42), value)); } TEST_F(XlaExpressionTest, GetShape) { EXPECT_FALSE(XlaExpression().GetShape().ok()); EXPECT_FALSE(XlaExpression::Invalid().GetShape().ok()); TF_ASSERT_OK_AND_ASSIGN(TensorShape resource_shape, XlaExpression::Resource(resource_.get()).GetShape()); EXPECT_EQ(TensorShape({}), resource_shape); TF_ASSERT_OK_AND_ASSIGN(TensorShape op_shape, XlaExpression::XlaOp(op_, DT_INT32).GetShape()); EXPECT_EQ(TensorShape({}), op_shape); TF_ASSERT_OK_AND_ASSIGN(TensorShape constant_shape, XlaExpression::Constant(constant_).GetShape()); EXPECT_EQ(TensorShape({}), constant_shape); } TEST_F(XlaExpressionTest, ResolveConstant) { EXPECT_FALSE(XlaExpression().ResolveConstant(client_).ok()); EXPECT_FALSE(XlaExpression::Invalid().ResolveConstant(client_).ok()); EXPECT_FALSE(XlaExpression::Resource(resource_.get()) .ResolveConstant(client_) ->has_value()); TF_ASSERT_OK_AND_ASSIGN( std::optional<Tensor> op_constant, XlaExpression::XlaOp(op_, DT_INT32).ResolveConstant(client_)); ASSERT_TRUE(op_constant.has_value()); test::ExpectTensorEqual<int32>(test::AsScalar<int32>(7), *op_constant); TF_ASSERT_OK_AND_ASSIGN(std::optional<Tensor> op_nonconstant, XlaExpression::XlaOp(non_constant_op_, DT_FLOAT) .ResolveConstant(client_)); EXPECT_FALSE(op_nonconstant.has_value()); TF_ASSERT_OK_AND_ASSIGN( std::optional<Tensor> constant_constant, XlaExpression::Constant(constant_).ResolveConstant(client_)); ASSERT_TRUE(constant_constant.has_value()); test::ExpectTensorEqual<int32>(constant_, *constant_constant); } TEST_F(XlaExpressionTest, ResolveConstantOnResource) { XlaExpression constant_resource = XlaExpression::ConstantResource(constant_, resource_.get()); EXPECT_TRUE(constant_resource.ResolveConstant(client_).ok()); EXPECT_TRUE(resource_->SetZeroValue(builder_.get()).ok()); LOG(ERROR) << "Resource is overwritten: " << resource_->IsOverwritten(); absl::StatusOr<std::optional<Tensor>> resolved_constant = constant_resource.ResolveConstant(client_); EXPECT_TRUE(resolved_constant.ok()); EXPECT_FALSE(resolved_constant->has_value()); } } }